コード例 #1
0
    def mergeJoinDispatch(self, event, obj):
        """ Performs a merge join on the pending fan-in dispatches.
        
        @param event: an event that is being merge joined (destination state must be a fan in) 
        @return: a list (possibly empty) of FSMContext instances
        """
        # this assertion comes from _queueDispatchFanIn - we never want fan-out info in a fan-in context
        assert not self.get(constants.GEN_PARAM)
        assert not self.get(constants.FORK_PARAM)

        # the work package index is stored in the url of the Task/FSMContext
        index = self.get(constants.INDEX_PARAM)
        taskNameBase = self.getTaskName(event, fanIn=True)

        # see comment (***) in self._queueDispatchFanIn
        #
        # in the case of failing to acquire a read lock (due to failed release of write lock)
        # we have decided to keep retrying
        raiseOnFail = False
        if self._getTaskRetryLimit() is not None:
            raiseOnFail = (self._getTaskRetryLimit() >
                           self.__obj[constants.RETRY_COUNT_PARAM])

        rwlock = ReadWriteLock(taskNameBase, self)
        rwlock.acquireReadLock(index, raiseOnFail=raiseOnFail)

        # and return the FSMContexts list
        class FSMContextList(list):
            """ A list that supports .logger.info(), .logger.warning() etc.for fan-in actions """
            def __init__(self, context, contexts):
                """ setup a self.logger for fan-in actions """
                super(FSMContextList, self).__init__(contexts)
                self.logger = Logger(context)
                self.instanceName = context.instanceName

        # see comment (A) in self._queueDispatchFanIn(...)
        time.sleep(constants.DATASTORE_ASYNCRONOUS_INDEX_WRITE_WAIT_TIME)

        # the following step ensure that fan-in only ever operates one time over a list of data
        # the entity is created in State.dispatch(...) _after_ all the actions have executed
        # successfully
        workIndex = '%s-%d' % (taskNameBase, knuthHash(index))
        if obj[constants.RETRY_COUNT_PARAM] > 0:
            semaphore = RunOnceSemaphore(workIndex, self)
            if semaphore.readRunOnceSemaphore(
                    payload=self.__obj[constants.TASK_NAME_PARAM]):
                self.logger.info(
                    "Fan-in idempotency guard for workIndex '%s', not processing any work items.",
                    workIndex)
                return FSMContextList(self,
                                      [])  # don't operate over the data again

        # fetch all the work packages in the current group for processing
        query = _FantasmFanIn.all() \
                             .filter('workIndex =', workIndex) \
                             .order('__key__')

        # construct a list of FSMContexts
        contexts = [self.clone(data=r.context) for r in query]
        return FSMContextList(self, contexts)
コード例 #2
0
ファイル: fsmcontext.py プロジェクト: GTxx/fsm
    def mergeJoinDispatch(self, event, obj):
        """ Performs a merge join on the pending fan-in dispatches.

        @param event: an event that is being merge joined (destination state must be a fan in)
        @return: a list (possibly empty) of FSMContext instances
        """
        # this assertion comes from _queueDispatchFanIn - we never want fan-out info in a fan-in context
        assert not self.get(constants.GEN_PARAM)
        assert not self.get(constants.FORK_PARAM)

        # the work package index is stored in the url of the Task/FSMContext
        index = self.get(constants.INDEX_PARAM)
        self.logger.debug('Index: %s', index)
        taskNameBase = self.getTaskName(event, fanIn=True)

        # see comment (***) in self._queueDispatchFanIn
        #
        # in the case of failing to acquire a read lock (due to failed release of write lock)
        # we have decided to keep retrying
        raiseOnFail = False
        if self._getTaskRetryLimit() is not None:
            raiseOnFail = (self._getTaskRetryLimit() > self.__obj[constants.RETRY_COUNT_PARAM])

        rwlock = ReadWriteLock(taskNameBase, self)
        rwlock.acquireReadLock(index, raiseOnFail=raiseOnFail)

        # and return the FSMContexts list
        class FSMContextList(list):
            """ A list that supports .logger.info(), .logger.warning() etc.for fan-in actions """
            def __init__(self, context, contexts, guarded=False):
                """ setup a self.logger for fan-in actions """
                super(FSMContextList, self).__init__(contexts)
                self.logger = Logger(context)
                self.instanceName = context.instanceName
                self.guarded = guarded

        # see comment (A) in self._queueDispatchFanIn(...)
        time.sleep(constants.DATASTORE_ASYNCRONOUS_INDEX_WRITE_WAIT_TIME)

        # the following step ensure that fan-in only ever operates one time over a list of data
        # the entity is created in State.dispatch(...) _after_ all the actions have executed
        # successfully
        khash = knuthHash(index)
        self.logger.debug('knuthHash of index: %s', khash)
        workIndex = '%s-%d' % (taskNameBase, khash)
        if obj[constants.RETRY_COUNT_PARAM] > 0:
            semaphore = RunOnceSemaphore(workIndex, self)
            if semaphore.readRunOnceSemaphore(payload=self.__obj[constants.TASK_NAME_PARAM]):
                self.logger.info("Fan-in idempotency guard for workIndex '%s', not processing any work items.",
                                 workIndex)
                return FSMContextList(self, [], guarded=True) # don't operate over the data again

        # fetch all the work packages in the current group for processing
        query = _FantasmFanIn.all(namespace='') \
                             .filter('workIndex =', workIndex) \
                             .order('__key__')

        # construct a list of FSMContexts
        contexts = [self.clone(replaceData=r.context) for r in query]
        return FSMContextList(self, contexts)
コード例 #3
0
ファイル: lock_test.py プロジェクト: insad/fantasm
 def test_readRunOnceSemaphore_memcache_expired(self):
     sem = RunOnceSemaphore('foo', None)
     sem.writeRunOnceSemaphore('payload', transactional=self.TRANSACTIONAL)
     memcache.delete('foo')
     payload = sem.readRunOnceSemaphore('payload',
                                        transactional=self.TRANSACTIONAL)
     self.assertEqual('payload', payload)
コード例 #4
0
ファイル: lock_test.py プロジェクト: oikmar/fantasm
 def test_writeRunOnceSemaphore_second_time_wrong_payload_datastore(self):
     sem = RunOnceSemaphore('foo', None)
     self.assertEqual(None, memcache.get('foo'))
     self.assertEqual(0, _FantasmTaskSemaphore.all(namespace='').count())
     success, payload = sem.writeRunOnceSemaphore(
         'payload', transactional=self.TRANSACTIONAL)
     self.assertTrue(success)
     self.assertEqual('payload', payload)
     self.assertEqual('payload', memcache.get('foo'))
     self.assertEqual(1, _FantasmTaskSemaphore.all(namespace='').count())
     self.assertEqual('payload',
                      _FantasmTaskSemaphore.all(namespace='').get().payload)
     e = _FantasmTaskSemaphore.all(namespace='').get()
     e.payload = 'bar'
     e.put()
     memcache.delete('foo')
     success, payload = sem.writeRunOnceSemaphore(
         'payload', transactional=self.TRANSACTIONAL)
     self.assertEqual(1, len(self.loggingDouble.messages['critical']))
     self.assertTrue(self.loggingDouble.messages['critical'][0]\
                     .startswith("Run-once semaphore datastore payload write error."))
     self.assertFalse(success)
     self.assertEqual('bar', payload)
     self.assertEqual('bar', memcache.get('foo'))
     self.assertEqual(1, _FantasmTaskSemaphore.all(namespace='').count())
     self.assertEqual('bar',
                      _FantasmTaskSemaphore.all(namespace='').get().payload)
コード例 #5
0
    def execute(self, context, obj):
        """ Writes the CSV file """

        # if already wrote a file, don't do anything
        semaphore = RunOnceSemaphore(context.instanceName, context)
        if not semaphore.readRunOnceSemaphore(payload='payload'):

            # fetch the CsvCounter, since it is the parent of all the other Models
            counter = CsvProgressCounter.get_by_key_name(context.instanceName)
            # fetch the single aggregated results Model
            aggResults = CsvIntermediateResults.get_by_key_name(
                context.instanceName, counter)

            # open the file
            fileName = files.blobstore.create(
                mime_type='application/octet-stream')
            with files.open(fileName, 'a') as f:

                # the csv module has a convenient row writing interface
                writer = csv.writer(f)

                # this queries for all the intermediate results
                query = CsvIntermediateResults.all().ancestor(counter)
                for results in query:

                    # the aggregated results may also be in the results, so skip them
                    if aggResults and results.key() == aggResults.key():
                        continue

                    # for all the intermediate data, write the rows
                    data = results.data
                    for item in data:
                        rows = self.getRows(context, obj, item,
                                            aggResults.data)
                        if rows:
                            for row in rows:
                                writer.writerow(row)

                if aggResults:
                    # now also write down any specific aggregated data rows
                    rows = self.getAggregatedRows(context, obj,
                                                  aggResults.data)
                    if rows:
                        for row in rows:
                            writer.writerow(row)

            # finalize the file
            files.finalize(fileName)

            # FIXME: what to do with this?
            blobKey = files.blobstore.get_blob_key(fileName)

            # at this point we have successfully written the file, lets make sure we don't do it again
            # if a retry occurs downstream
            semaphore.writeRunOnceSemaphore(payload='payload')

        # store the key of the counter (ie. parent of intermediate results) for cleanup
        context[COUNTER_KEY_PARAM] = counter.key()
        return OK_EVENT
コード例 #6
0
ファイル: lock_test.py プロジェクト: BobDohnal/fantasm
 def test_readRunOnceSemaphore_payload_error_memcache_expired(self):
     sem = RunOnceSemaphore('foo', None)
     sem.writeRunOnceSemaphore('payload', transactional=self.TRANSACTIONAL)
     payload = sem.readRunOnceSemaphore('bar', transactional=self.TRANSACTIONAL)
     self.assertEqual('payload', payload)
     self.assertEqual(1, len(self.loggingDouble.messages['critical']))
     self.assertTrue(self.loggingDouble.messages['critical'][0]\
                     .startswith("Run-once semaphore memcache payload read error."))
コード例 #7
0
ファイル: lock_test.py プロジェクト: insad/fantasm
 def test_readRunOnceSemaphore_payload_error_memcache_expired(self):
     sem = RunOnceSemaphore('foo', None)
     sem.writeRunOnceSemaphore('payload', transactional=self.TRANSACTIONAL)
     payload = sem.readRunOnceSemaphore('bar',
                                        transactional=self.TRANSACTIONAL)
     self.assertEqual('payload', payload)
     self.assertEqual(["Run-once semaphore memcache payload read error."],
                      self.loggingDouble.messages['critical'])
コード例 #8
0
ファイル: lock_test.py プロジェクト: oikmar/fantasm
 def test_readRunOnceSemaphore_payload_error(self):
     sem = RunOnceSemaphore('foo', None)
     sem.writeRunOnceSemaphore('payload', transactional=self.TRANSACTIONAL)
     payload = sem.readRunOnceSemaphore('bar',
                                        transactional=self.TRANSACTIONAL)
     self.assertEqual('payload', payload)
     self.assertEqual(1, len(self.loggingDouble.messages['critical']))
     self.assertTrue(self.loggingDouble.messages['critical'][0]\
                     .startswith("Run-once semaphore memcache payload read error."))
コード例 #9
0
ファイル: lock_test.py プロジェクト: BobDohnal/fantasm
 def test_writeRunOnceSemaphore(self):
     sem = RunOnceSemaphore('foo', None)
     self.assertEqual(None, memcache.get('foo'))
     self.assertEqual(0, _FantasmTaskSemaphore.all(namespace='').count())
     success, payload = sem.writeRunOnceSemaphore('payload', transactional=self.TRANSACTIONAL)
     self.assertTrue(success)
     self.assertEqual('payload', payload)
     self.assertEqual('payload', memcache.get('foo'))
     self.assertEqual(1, _FantasmTaskSemaphore.all(namespace='').count())
     self.assertEqual('payload', _FantasmTaskSemaphore.all(namespace='').get().payload)
コード例 #10
0
 def execute(self, context, obj):
     """ Writes the CSV file """
     
     # if already wrote a file, don't do anything
     semaphore = RunOnceSemaphore(context.instanceName, context)
     if not semaphore.readRunOnceSemaphore(payload='payload'):
     
         # fetch the CsvCounter, since it is the parent of all the other Models
         counter = CsvProgressCounter.get_by_key_name(context.instanceName)
         # fetch the single aggregated results Model
         aggResults = CsvIntermediateResults.get_by_key_name(context.instanceName, counter)
         
         # open the file
         fileName = files.blobstore.create(mime_type='application/octet-stream')
         with files.open(fileName, 'a') as f:
             
             # the csv module has a convenient row writing interface
             writer = csv.writer(f)
             
             # this queries for all the intermediate results
             query = CsvIntermediateResults.all().ancestor(counter)
             for results in query:
                 
                 # the aggregated results may also be in the results, so skip them
                 if aggResults and results.key() == aggResults.key():
                     continue
                 
                 # for all the intermediate data, write the rows
                 data = results.data
                 for item in data:
                     rows = self.getRows(context, obj, item, aggResults.data)
                     if rows:
                         for row in rows:
                             writer.writerow(row)
             
             if aggResults:
                 # now also write down any specific aggregated data rows
                 rows = self.getAggregatedRows(context, obj, aggResults.data)
                 if rows:
                     for row in rows:
                         writer.writerow(row)
         
         # finalize the file
         files.finalize(fileName)
         
         # FIXME: what to do with this?
         blobKey = files.blobstore.get_blob_key(fileName)
         
         # at this point we have successfully written the file, lets make sure we don't do it again
         # if a retry occurs downstream
         semaphore.writeRunOnceSemaphore(payload='payload')
         
     # store the key of the counter (ie. parent of intermediate results) for cleanup
     context[COUNTER_KEY_PARAM] = counter.key()
     return OK_EVENT
コード例 #11
0
ファイル: lock_test.py プロジェクト: insad/fantasm
 def test_writeRunOnceSemaphore(self):
     sem = RunOnceSemaphore('foo', None)
     self.assertEqual(None, memcache.get('foo'))
     self.assertEqual(0, _FantasmTaskSemaphore.all().count())
     success, payload = sem.writeRunOnceSemaphore(
         'payload', transactional=self.TRANSACTIONAL)
     self.assertTrue(success)
     self.assertEqual('payload', payload)
     self.assertEqual('payload', memcache.get('foo'))
     self.assertEqual(1, _FantasmTaskSemaphore.all().count())
     self.assertEqual('payload', _FantasmTaskSemaphore.all().get().payload)
コード例 #12
0
ファイル: lock_test.py プロジェクト: iki/fantasm
 def test_writeRunOnceSemaphore_second_time_False_memcache_expired(self):
     sem = RunOnceSemaphore('foo', None)
     self.assertEqual(None, memcache.get('foo'))
     self.assertEqual(0, _FantasmTaskSemaphore.all().count())
     success, payload = sem.writeRunOnceSemaphore('payload', transactional=self.TRANSACTIONAL)
     self.assertTrue(success)
     self.assertEqual('payload', payload)
     self.assertEqual('payload', memcache.get('foo'))
     self.assertEqual(1, _FantasmTaskSemaphore.all().count())
     self.assertEqual('payload', _FantasmTaskSemaphore.all().get().payload)
     memcache.delete('foo')
     success, payload = sem.writeRunOnceSemaphore('payload', transactional=self.TRANSACTIONAL)
     self.assertFalse(success)
     self.assertEqual('payload', payload)
     self.assertEqual('payload', memcache.get('foo'))
     self.assertEqual(1, _FantasmTaskSemaphore.all().count())
     self.assertEqual('payload', _FantasmTaskSemaphore.all().get().payload)
コード例 #13
0
ファイル: lock_test.py プロジェクト: iki/fantasm
 def test_writeRunOnceSemaphore_second_time_wrong_payload_memcache(self):
     sem = RunOnceSemaphore('foo', None)
     self.assertEqual(None, memcache.get('foo'))
     self.assertEqual(0, _FantasmTaskSemaphore.all().count())
     success, payload = sem.writeRunOnceSemaphore('payload', transactional=self.TRANSACTIONAL)
     self.assertTrue(success)
     self.assertEqual('payload', payload)
     self.assertEqual('payload', memcache.get('foo'))
     self.assertEqual(1, _FantasmTaskSemaphore.all().count())
     self.assertEqual('payload', _FantasmTaskSemaphore.all().get().payload)
     memcache.set('foo', 'bar')
     success, payload = sem.writeRunOnceSemaphore('payload', transactional=self.TRANSACTIONAL)
     self.assertEqual(["Run-once semaphore memcache payload write error."], 
                      self.loggingDouble.messages['critical'])
     self.assertFalse(success)
     self.assertEqual('bar', payload)
     self.assertEqual('bar', memcache.get('foo'))
     self.assertEqual(1, _FantasmTaskSemaphore.all().count())
     self.assertEqual('payload', _FantasmTaskSemaphore.all().get().payload)
コード例 #14
0
ファイル: lock_test.py プロジェクト: insad/fantasm
 def test_writeRunOnceSemaphore_second_time_wrong_payload_memcache(self):
     sem = RunOnceSemaphore('foo', None)
     self.assertEqual(None, memcache.get('foo'))
     self.assertEqual(0, _FantasmTaskSemaphore.all().count())
     success, payload = sem.writeRunOnceSemaphore(
         'payload', transactional=self.TRANSACTIONAL)
     self.assertTrue(success)
     self.assertEqual('payload', payload)
     self.assertEqual('payload', memcache.get('foo'))
     self.assertEqual(1, _FantasmTaskSemaphore.all().count())
     self.assertEqual('payload', _FantasmTaskSemaphore.all().get().payload)
     memcache.set('foo', 'bar')
     success, payload = sem.writeRunOnceSemaphore(
         'payload', transactional=self.TRANSACTIONAL)
     self.assertEqual(["Run-once semaphore memcache payload write error."],
                      self.loggingDouble.messages['critical'])
     self.assertFalse(success)
     self.assertEqual('bar', payload)
     self.assertEqual('bar', memcache.get('foo'))
     self.assertEqual(1, _FantasmTaskSemaphore.all().count())
     self.assertEqual('payload', _FantasmTaskSemaphore.all().get().payload)
コード例 #15
0
ファイル: lock_test.py プロジェクト: oikmar/fantasm
 def test_writeRunOnceSemaphore_second_time_False_memcache_expired(self):
     sem = RunOnceSemaphore('foo', None)
     self.assertEqual(None, memcache.get('foo'))
     self.assertEqual(0, _FantasmTaskSemaphore.all(namespace='').count())
     success, payload = sem.writeRunOnceSemaphore(
         'payload', transactional=self.TRANSACTIONAL)
     self.assertTrue(success)
     self.assertEqual('payload', payload)
     self.assertEqual('payload', memcache.get('foo'))
     self.assertEqual(1, _FantasmTaskSemaphore.all(namespace='').count())
     self.assertEqual('payload',
                      _FantasmTaskSemaphore.all(namespace='').get().payload)
     memcache.delete('foo')
     success, payload = sem.writeRunOnceSemaphore(
         'payload', transactional=self.TRANSACTIONAL)
     self.assertFalse(success)
     self.assertEqual('payload', payload)
     self.assertEqual('payload', memcache.get('foo'))
     self.assertEqual(1, _FantasmTaskSemaphore.all(namespace='').count())
     self.assertEqual('payload',
                      _FantasmTaskSemaphore.all(namespace='').get().payload)
コード例 #16
0
ファイル: lock_test.py プロジェクト: BobDohnal/fantasm
 def test_writeRunOnceSemaphore_second_time_wrong_payload_datastore(self):
     sem = RunOnceSemaphore('foo', None)
     self.assertEqual(None, memcache.get('foo'))
     self.assertEqual(0, _FantasmTaskSemaphore.all(namespace='').count())
     success, payload = sem.writeRunOnceSemaphore('payload', transactional=self.TRANSACTIONAL)
     self.assertTrue(success)
     self.assertEqual('payload', payload)
     self.assertEqual('payload', memcache.get('foo'))
     self.assertEqual(1, _FantasmTaskSemaphore.all(namespace='').count())
     self.assertEqual('payload', _FantasmTaskSemaphore.all(namespace='').get().payload)
     e = _FantasmTaskSemaphore.all(namespace='').get()
     e.payload = 'bar'
     e.put()
     memcache.delete('foo')
     success, payload = sem.writeRunOnceSemaphore('payload', transactional=self.TRANSACTIONAL)
     self.assertEqual(1, len(self.loggingDouble.messages['critical']))
     self.assertTrue(self.loggingDouble.messages['critical'][0]\
                     .startswith("Run-once semaphore datastore payload write error."))
     self.assertFalse(success)
     self.assertEqual('bar', payload)
     self.assertEqual('bar', memcache.get('foo'))
     self.assertEqual(1, _FantasmTaskSemaphore.all(namespace='').count())
     self.assertEqual('bar', _FantasmTaskSemaphore.all(namespace='').get().payload)
コード例 #17
0
ファイル: state.py プロジェクト: BobDohnal/fantasm
                raise # let it bubble up quietly
            except Exception, e:
                level = context.logger.error
                if e.__class__ in TRANSIENT_ERRORS:
                    level = context.logger.warn
                level('Error processing action for state. (Machine %s, State %s, Action %s)',
                      context.machineName, context.currentState.name, context.currentState.doAction.__class__)
                raise

        if transition.target.isFanIn:

            # this prevents fan-in from re-counting the data if there is an Exception
            # or DeadlineExceeded _after_ doAction.execute(...) succeeds
            index = context.get(constants.INDEX_PARAM) or contextOrContexts[0].get(constants.INDEX_PARAM)
            workIndex = '%s-%d' % (taskNameBase, knuthHash(index))
            semaphore = RunOnceSemaphore(workIndex, context)
            semaphore.writeRunOnceSemaphore(payload=obj[constants.TASK_NAME_PARAM])

            try:
                # at this point we have processed the work items, delete them
                task = Task(name=obj[constants.TASK_NAME_PARAM] + '-cleanup',
                            url=constants.DEFAULT_CLEANUP_URL,
                            params={constants.WORK_INDEX_PARAM: workIndex})
                context.Queue(name=constants.DEFAULT_CLEANUP_QUEUE_NAME).add(task)

            except (TaskAlreadyExistsError, TombstonedTaskError):
                context.logger.info("Fan-in cleanup Task already exists.")

            if context.get('UNITTEST_RAISE_AFTER_FAN_IN'): # only way to generate this failure
                if not contextOrContexts.guarded:
                    raise Exception()
コード例 #18
0
ファイル: state.py プロジェクト: utkarshx/UserInfuser
    def dispatch(self, context, event, obj):
        """ Fires the transition and executes the next States's entry, do and exit actions.
            
        @param context: an FSMContext instance
        @param event: a string event to dispatch to the State
        @param obj: an object that the Transition can operate on  
        @return: the event returned from the next state's main action.
        """
        transition = self.getTransition(event)

        if context.currentState.exitAction:
            try:
                context.currentAction = context.currentState.exitAction
                context.currentState.exitAction.execute(context, obj)
            except Exception:
                context.logger.error(
                    'Error processing entry action for state. (Machine %s, State %s, exitAction %s)',
                    context.machineName, context.currentState.name,
                    context.currentState.exitAction.__class__)
                raise

        # join the contexts of a fan-in
        contextOrContexts = context
        if transition.target.isFanIn:
            taskNameBase = context.getTaskName(event, fanIn=True)
            contextOrContexts = context.mergeJoinDispatch(event, obj)
            if not contextOrContexts:
                context.logger.info(
                    'Fan-in resulted in 0 contexts. Terminating machine. (Machine %s, State %s)',
                    context.machineName, context.currentState.name)
                obj[constants.TERMINATED_PARAM] = True

        transition.execute(context, obj)

        if context.currentState.entryAction:
            try:
                context.currentAction = context.currentState.entryAction
                context.currentState.entryAction.execute(
                    contextOrContexts, obj)
            except Exception:
                context.logger.error(
                    'Error processing entry action for state. (Machine %s, State %s, entryAction %s)',
                    context.machineName, context.currentState.name,
                    context.currentState.entryAction.__class__)
                raise

        if context.currentState.isContinuation:
            try:
                token = context.get(constants.CONTINUATION_PARAM, None)
                nextToken = context.currentState.doAction.continuation(
                    contextOrContexts, obj, token=token)
                if nextToken:
                    context.continuation(nextToken)
                context.pop(constants.CONTINUATION_PARAM,
                            None)  # pop this off because it is really long

            except Exception:
                context.logger.error(
                    'Error processing continuation for state. (Machine %s, State %s, continuation %s)',
                    context.machineName, context.currentState.name,
                    context.currentState.doAction.__class__)
                raise

        # either a fan-in resulted in no contexts, or a continuation was completed
        if obj.get(constants.TERMINATED_PARAM):
            return None

        nextEvent = None
        if context.currentState.doAction:
            try:
                context.currentAction = context.currentState.doAction
                nextEvent = context.currentState.doAction.execute(
                    contextOrContexts, obj)
            except Exception:
                context.logger.error(
                    'Error processing action for state. (Machine %s, State %s, Action %s)',
                    context.machineName, context.currentState.name,
                    context.currentState.doAction.__class__)
                raise

        if transition.target.isFanIn:

            # this prevents fan-in from re-counting the data if there is an Exception
            # or DeadlineExceeded _after_ doAction.execute(...) succeeds
            index = context.get(constants.INDEX_PARAM)
            workIndex = '%s-%d' % (taskNameBase, knuthHash(index))
            semaphore = RunOnceSemaphore(workIndex, context)
            semaphore.writeRunOnceSemaphore(
                payload=obj[constants.TASK_NAME_PARAM])

            try:
                # at this point we have processed the work items, delete them
                task = Task(name=obj[constants.TASK_NAME_PARAM] + '-cleanup',
                            url=constants.DEFAULT_CLEANUP_URL,
                            params={constants.WORK_INDEX_PARAM: workIndex})
                context.Queue(
                    name=constants.DEFAULT_CLEANUP_QUEUE_NAME).add(task)

            except (TaskAlreadyExistsError, TombstonedTaskError):
                context.logger.info("Fan-in cleanup Task already exists.")

            if context.get('UNITTEST_RAISE_AFTER_FAN_IN'
                           ):  # only way to generate this failure
                raise Exception()

        if nextEvent:
            if not isinstance(nextEvent,
                              str) or not constants.NAME_RE.match(nextEvent):
                raise InvalidEventNameRuntimeError(nextEvent,
                                                   context.machineName,
                                                   context.currentState.name,
                                                   context.instanceName)

        return nextEvent
コード例 #19
0
ファイル: handlers.py プロジェクト: iki/fantasm
    def get_or_post(self, method='POST'):
        """ Handles the GET/POST request. 
        
        FIXME: this is getting a touch long
        """
        
        # ensure that we have our services for the next 30s (length of a single request)
        unavailable = set()
        for service in REQUIRED_SERVICES:
            if not CapabilitySet(service).is_enabled():
                unavailable.add(service)
        if unavailable:
            raise RequiredServicesUnavailableRuntimeError(unavailable)
        
        # the case of headers is inconsistent on dev_appserver and appengine
        # ie 'X-AppEngine-TaskRetryCount' vs. 'X-AppEngine-Taskretrycount'
        lowerCaseHeaders = dict([(key.lower(), value) for key, value in self.request.headers.items()])

        taskName = lowerCaseHeaders.get('x-appengine-taskname')
        retryCount = int(lowerCaseHeaders.get('x-appengine-taskretrycount', 0))
        
        # Taskqueue can invoke multiple tasks of the same name occassionally. Here, we'll use
        # a datastore transaction as a semaphore to determine if we should actually execute this or not.
        if taskName:
            semaphoreKey = '%s--%s' % (taskName, retryCount)
            semaphore = RunOnceSemaphore(semaphoreKey, None)
            if not semaphore.writeRunOnceSemaphore(payload='fantasm')[0]:
                # we can simply return here, this is a duplicate fired task
                logging.info('A duplicate task "%s" has been queued by taskqueue infrastructure. Ignoring.', taskName)
                self.response.status_code = 200
                return
            
        # pull out X-Fantasm-* headers
        headers = None
        for key, value in self.request.headers.items():
            if key.startswith(HTTP_REQUEST_HEADER_PREFIX):
                headers = headers or {}
                if ',' in value:
                    headers[key] = [v.strip() for v in value.split(',')]
                else:
                    headers[key] = value.strip()
            
        requestData = {'POST': self.request.POST, 'GET': self.request.GET}[method]
        method = requestData.get('method') or method
        
        machineName = getMachineNameFromRequest(self.request)
        
        # get the incoming instance name, if any
        instanceName = requestData.get(INSTANCE_NAME_PARAM)
        
        # get the incoming state, if any
        fsmState = requestData.get(STATE_PARAM)
        
        # get the incoming event, if any
        fsmEvent = requestData.get(EVENT_PARAM)
        
        assert (fsmState and instanceName) or True # if we have a state, we should have an instanceName
        assert (fsmState and fsmEvent) or True # if we have a state, we should have an event
        
        obj = TemporaryStateObject()
        
        # make a copy, add the data
        fsm = getCurrentFSM().createFSMInstance(machineName, 
                                                currentStateName=fsmState, 
                                                instanceName=instanceName,
                                                method=method,
                                                obj=obj,
                                                headers=headers)
        
        # in "immediate mode" we try to execute as much as possible in the current request
        # for the time being, this does not include things like fork/spawn/contuniuations/fan-in
        immediateMode = IMMEDIATE_MODE_PARAM in requestData.keys()
        if immediateMode:
            obj[IMMEDIATE_MODE_PARAM] = immediateMode
            obj[MESSAGES_PARAM] = []
            fsm.Queue = NoOpQueue # don't queue anything else
        
        # pylint: disable-msg=W0201
        # - initialized outside of ctor is ok in this case
        self.fsm = fsm # used for logging in handle_exception
        
        # pull all the data off the url and stuff into the context
        for key, value in requestData.items():
            if key in NON_CONTEXT_PARAMS:
                continue # these are special, don't put them in the data
            
            # deal with ...a=1&a=2&a=3...
            value = requestData.get(key)
            valueList = requestData.getall(key)
            if len(valueList) > 1:
                value = valueList
                
            if key.endswith('[]'):
                key = key[:-2]
                value = [value]
                
            if key in fsm.contextTypes.keys():
                fsm.putTypedValue(key, value)
            else:
                fsm[key] = value
        
        if not (fsmState or fsmEvent):
            
            # just queue up a task to run the initial state transition using retries
            fsm[STARTED_AT_PARAM] = time.time()
            
            # initialize the fsm, which returns the 'pseudo-init' event
            fsmEvent = fsm.initialize()
            
        else:
            
            # add the retry counter into the machine context from the header
            obj[RETRY_COUNT_PARAM] = retryCount
            
            # add the actual task name to the context
            obj[TASK_NAME_PARAM] = taskName
            
            # dispatch and return the next event
            fsmEvent = fsm.dispatch(fsmEvent, obj)
            
        # loop and execute until there are no more events - any exceptions
        # will make it out to the user in the response - useful for debugging
        if immediateMode:
            while fsmEvent:
                fsmEvent = fsm.dispatch(fsmEvent, obj)
            self.response.headers['Content-Type'] = 'application/json'
            data = {
                'obj' : obj,
                'context': fsm,
            }
            self.response.out.write(simplejson.dumps(data, cls=Encoder))
コード例 #20
0
    def get_or_post(self, method='POST'):
        """ Handles the GET/POST request. 
        
        FIXME: this is getting a touch long
        """

        # ensure that we have our services for the next 30s (length of a single request)
        unavailable = set()
        for service in REQUIRED_SERVICES:
            if not CapabilitySet(service).is_enabled():
                unavailable.add(service)
        if unavailable:
            raise RequiredServicesUnavailableRuntimeError(unavailable)

        # the case of headers is inconsistent on dev_appserver and appengine
        # ie 'X-AppEngine-TaskRetryCount' vs. 'X-AppEngine-Taskretrycount'
        lowerCaseHeaders = dict([
            (key.lower(), value)
            for key, value in self.request.headers.items()
        ])

        taskName = lowerCaseHeaders.get('x-appengine-taskname')
        retryCount = int(lowerCaseHeaders.get('x-appengine-taskretrycount', 0))

        # Taskqueue can invoke multiple tasks of the same name occassionally. Here, we'll use
        # a datastore transaction as a semaphore to determine if we should actually execute this or not.
        if taskName:
            semaphoreKey = '%s--%s' % (taskName, retryCount)
            semaphore = RunOnceSemaphore(semaphoreKey, None)
            if not semaphore.writeRunOnceSemaphore(payload='fantasm')[0]:
                # we can simply return here, this is a duplicate fired task
                logging.info(
                    'A duplicate task "%s" has been queued by taskqueue infrastructure. Ignoring.',
                    taskName)
                self.response.status_code = 200
                return

        # pull out X-Fantasm-* headers
        headers = None
        for key, value in self.request.headers.items():
            if key.startswith(HTTP_REQUEST_HEADER_PREFIX):
                headers = headers or {}
                if ',' in value:
                    headers[key] = [v.strip() for v in value.split(',')]
                else:
                    headers[key] = value.strip()

        requestData = {
            'POST': self.request.POST,
            'GET': self.request.GET
        }[method]
        method = requestData.get('method') or method

        machineName = getMachineNameFromRequest(self.request)

        # get the incoming instance name, if any
        instanceName = requestData.get(INSTANCE_NAME_PARAM)

        # get the incoming state, if any
        fsmState = requestData.get(STATE_PARAM)

        # get the incoming event, if any
        fsmEvent = requestData.get(EVENT_PARAM)

        assert (fsmState and instanceName
                ) or True  # if we have a state, we should have an instanceName
        assert (fsmState and fsmEvent
                ) or True  # if we have a state, we should have an event

        obj = TemporaryStateObject()

        # make a copy, add the data
        fsm = getCurrentFSM().createFSMInstance(machineName,
                                                currentStateName=fsmState,
                                                instanceName=instanceName,
                                                method=method,
                                                obj=obj,
                                                headers=headers)

        # in "immediate mode" we try to execute as much as possible in the current request
        # for the time being, this does not include things like fork/spawn/contuniuations/fan-in
        immediateMode = IMMEDIATE_MODE_PARAM in requestData.keys()
        if immediateMode:
            obj[IMMEDIATE_MODE_PARAM] = immediateMode
            obj[MESSAGES_PARAM] = []
            fsm.Queue = NoOpQueue  # don't queue anything else

        # pylint: disable-msg=W0201
        # - initialized outside of ctor is ok in this case
        self.fsm = fsm  # used for logging in handle_exception

        # pull all the data off the url and stuff into the context
        for key, value in requestData.items():
            if key in NON_CONTEXT_PARAMS:
                continue  # these are special, don't put them in the data

            # deal with ...a=1&a=2&a=3...
            value = requestData.get(key)
            valueList = requestData.getall(key)
            if len(valueList) > 1:
                value = valueList

            if key.endswith('[]'):
                key = key[:-2]
                value = [value]

            if key in fsm.contextTypes.keys():
                fsm.putTypedValue(key, value)
            else:
                fsm[key] = value

        if not (fsmState or fsmEvent):

            # just queue up a task to run the initial state transition using retries
            fsm[STARTED_AT_PARAM] = time.time()

            # initialize the fsm, which returns the 'pseudo-init' event
            fsmEvent = fsm.initialize()

        else:

            # add the retry counter into the machine context from the header
            obj[RETRY_COUNT_PARAM] = retryCount

            # add the actual task name to the context
            obj[TASK_NAME_PARAM] = taskName

            # dispatch and return the next event
            fsmEvent = fsm.dispatch(fsmEvent, obj)

        # loop and execute until there are no more events - any exceptions
        # will make it out to the user in the response - useful for debugging
        if immediateMode:
            while fsmEvent:
                fsmEvent = fsm.dispatch(fsmEvent, obj)
            self.response.headers['Content-Type'] = 'application/json'
            data = {
                'obj': obj,
                'context': fsm,
            }
            self.response.out.write(json.dumps(data, cls=Encoder))
コード例 #21
0
ファイル: fsm.py プロジェクト: iki/fantasm
 def _queueDispatchFanIn(self, nextEvent, fanInPeriod=0, retryOptions=None, queueName=None):
     """ Queues a call to .dispatch(nextEvent) in the task queue, or saves the context to the 
     datastore for processing by the queued .dispatch(nextEvent)
     
     @param nextEvent: a string event 
     @param fanInPeriod: the period of time between fan in Tasks 
     @param queueName: the queue name to Queue into 
     @return: a taskqueue.Task instance which may or may not have been queued already
     """
     assert nextEvent is not None
     assert not self.get(constants.INDEX_PARAM) # fan-in after fan-in is not allowed
     assert queueName
     
     # we pop this off here because we do not want the fan-out/continuation param as part of the
     # task name, otherwise we loose the fan-in - each fan-in gets one work unit.
     self.pop(constants.GEN_PARAM, None)
     fork = self.pop(constants.FORK_PARAM, None)
     
     # transfer the fan-in-group into the context (under a fixed value key) so that states beyond 
     # the fan-in get unique Task names
     # FIXME: this will likely change once we formalize what to do post fan-in
     transition = self.currentState.getTransition(nextEvent)
     if self.get(transition.target.fanInGroup) is not None:
         self[constants.FAN_IN_GROUP_PARAM] = self[transition.target.fanInGroup]
     
     taskNameBase = self.getTaskName(nextEvent, fanIn=True)
     rwlock = ReadWriteLock(taskNameBase, self)
     index = rwlock.currentIndex()
         
     # (***)
     #
     # grab the lock - memcache.incr()
     # 
     # on Task retry, multiple incr() calls are possible. possible ways to handle:
     #
     # 1. release the lock in a 'finally' clause, but then risk missing a work
     #    package because acquiring the read lock will succeed even though the
     #    work package was not written yet.
     #
     # 2. allow the lock to get too high. the fan-in logic attempts to wait for 
     #    work packages across multiple-retry attempts, so this seems like the 
     #    best option. we basically trade a bit of latency in fan-in for reliability.
     #    
     rwlock.acquireWriteLock(index, nextEvent=nextEvent)
     
     # insert the work package, which is simply a serialized FSMContext
     workIndex = '%s-%d' % (taskNameBase, knuthHash(index))
     
     # on retry, we want to ensure we get the same work index for this task
     actualTaskName = self.__obj[constants.TASK_NAME_PARAM]
     indexKeyName = 'workIndex-' + '-'.join([str(i) for i in [actualTaskName, fork] if i]) or None
     semaphore = RunOnceSemaphore(indexKeyName, self)
     
     # check if the workIndex changed during retry
     semaphoreWritten = False
     if self.__obj[constants.RETRY_COUNT_PARAM] > 0:
         # see comment (A) in self._queueDispatchFanIn(...)
         time.sleep(constants.DATASTORE_ASYNCRONOUS_INDEX_WRITE_WAIT_TIME)
         payload = semaphore.readRunOnceSemaphore(payload=workIndex, transactional=False)
         if payload:
             semaphoreWritten = True
             if payload != workIndex:
                 self.logger.info("Work index changed from '%s' to '%s' on retry.", payload, workIndex)
                 workIndex = payload
     
     # update this here so it gets written down into the work package too
     self[constants.INDEX_PARAM] = index
             
     # write down two models, one actual work package, one idempotency package
     keyName = '-'.join([str(i) for i in [actualTaskName, fork] if i]) or None
     work = _FantasmFanIn(context=self, workIndex=workIndex, key_name=keyName)
     
     # close enough to idempotent, but could still write only one of the entities
     # FIXME: could be made faster using a bulk put, but this interface is cleaner
     if not semaphoreWritten:
         semaphore.writeRunOnceSemaphore(payload=workIndex, transactional=False)
     
     # put the work item
     db.put(work)
     
     # (A) now the datastore is asynchronously writing the indices, so the work package may
     #     not show up in a query for a period of time. there is a corresponding time.sleep()
     #     in the fan-in of self.mergeJoinDispatch(...) 
         
     # release the lock - memcache.decr()
     rwlock.releaseWriteLock(index)
         
     try:
         
         # insert a task to run in the future and process a bunch of work packages
         now = time.time()
         url = self.buildUrl(self.currentState, nextEvent)
         params = self.buildParams(self.currentState, nextEvent)
         task = Task(name='%s-%d' % (taskNameBase, index),
                     method=self.method,
                     url=url,
                     params=params,
                     eta=datetime.datetime.utcfromtimestamp(now) + datetime.timedelta(seconds=fanInPeriod),
                     headers=self.headers,
                     retry_options=retryOptions)
         self.Queue(name=queueName).add(task)
         return task
     
     except (TaskAlreadyExistsError, TombstonedTaskError):
         pass # Fan-in magic
コード例 #22
0
ファイル: lock_test.py プロジェクト: BobDohnal/fantasm
 def test_readRunOnceSemaphore_not_written(self):
     sem = RunOnceSemaphore('foo', None)
     self.assertEqual(None, sem.readRunOnceSemaphore('payload', transactional=self.TRANSACTIONAL))
コード例 #23
0
ファイル: state.py プロジェクト: nlake44/GAEBenchmark
 def dispatch(self, context, event, obj):
     """ Fires the transition and executes the next States's entry, do and exit actions.
         
     @param context: an FSMContext instance
     @param event: a string event to dispatch to the State
     @param obj: an object that the Transition can operate on  
     @return: the event returned from the next state's main action.
     """
     transition = self.getTransition(event)
     
     if context.currentState.exitAction:
         try:
             context.currentAction = context.currentState.exitAction
             context.currentState.exitAction.execute(context, obj)
         except Exception:
             context.logger.error('Error processing entry action for state. (Machine %s, State %s, exitAction %s)',
                           context.machineName, 
                           context.currentState.name, 
                           context.currentState.exitAction.__class__)
             raise
     
     # join the contexts of a fan-in
     contextOrContexts = context
     if transition.target.isFanIn:
         taskNameBase = context.getTaskName(event, fanIn=True)
         contextOrContexts = context.mergeJoinDispatch(event, obj)
         if not contextOrContexts and not contextOrContexts.guarded:
             # by implementation, EVERY fan-in should have at least one work package available to it, this
             # is likely caused by an index writing delay, and it is suitable to simply retry this task
             raise FanInNoContextsAvailableRuntimeError(event, 
                                                        context.machineName,
                                                        context.currentState.name,
                                                        context.instanceName)
             
     transition.execute(context, obj)
     
     if context.currentState.entryAction:
         try:
             context.currentAction = context.currentState.entryAction
             context.currentState.entryAction.execute(contextOrContexts, obj)
         except Exception:
             context.logger.error('Error processing entry action for state. (Machine %s, State %s, entryAction %s)',
                           context.machineName, 
                           context.currentState.name, 
                           context.currentState.entryAction.__class__)
             raise
         
     if context.currentState.isContinuation:
         try:
             token = context.get(constants.CONTINUATION_PARAM, None)
             nextToken = context.currentState.doAction.continuation(contextOrContexts, obj, token=token)
             if nextToken:
                 context.continuation(nextToken)
             context.pop(constants.CONTINUATION_PARAM, None) # pop this off because it is really long
             
         except Exception:
             context.logger.error('Error processing continuation for state. (Machine %s, State %s, continuation %s)',
                           context.machineName, 
                           context.currentState.name, 
                           context.currentState.doAction.__class__)
             raise
         
     # either a fan-in resulted in no contexts, or a continuation was completed
     if obj.get(constants.TERMINATED_PARAM):
         return None
         
     nextEvent = None
     if context.currentState.doAction:
         try:
             context.currentAction = context.currentState.doAction
             nextEvent = context.currentState.doAction.execute(contextOrContexts, obj)
         except Exception:
             context.logger.error('Error processing action for state. (Machine %s, State %s, Action %s)',
                           context.machineName, 
                           context.currentState.name, 
                           context.currentState.doAction.__class__)
             raise
         
     if transition.target.isFanIn:
         
         # this prevents fan-in from re-counting the data if there is an Exception
         # or DeadlineExceeded _after_ doAction.execute(...) succeeds
         index = context.get(constants.INDEX_PARAM)
         workIndex = '%s-%d' % (taskNameBase, knuthHash(index))
         semaphore = RunOnceSemaphore(workIndex, context)
         semaphore.writeRunOnceSemaphore(payload=obj[constants.TASK_NAME_PARAM])
         
         try:
             # at this point we have processed the work items, delete them
             task = Task(name=obj[constants.TASK_NAME_PARAM] + '-cleanup', 
                         url=constants.DEFAULT_CLEANUP_URL, 
                         params={constants.WORK_INDEX_PARAM: workIndex})
             context.Queue(name=constants.DEFAULT_CLEANUP_QUEUE_NAME).add(task)
             
         except (TaskAlreadyExistsError, TombstonedTaskError):
             context.logger.info("Fan-in cleanup Task already exists.")
             
         if context.get('UNITTEST_RAISE_AFTER_FAN_IN'): # only way to generate this failure
             if not contextOrContexts.guarded:
                 raise Exception()
             
     if nextEvent:
         if not isinstance(nextEvent, str) or not constants.NAME_RE.match(nextEvent):
             raise InvalidEventNameRuntimeError(nextEvent, context.machineName, context.currentState.name,
                                                context.instanceName)
         
     return nextEvent
コード例 #24
0
ファイル: lock_test.py プロジェクト: BobDohnal/fantasm
 def test_readRunOnceSemaphore(self):
     sem = RunOnceSemaphore('foo', None)
     sem.writeRunOnceSemaphore('payload', transactional=self.TRANSACTIONAL)
     payload = sem.readRunOnceSemaphore('payload', transactional=self.TRANSACTIONAL)
     self.assertEqual('payload', payload)
コード例 #25
0
ファイル: lock_test.py プロジェクト: insad/fantasm
 def test_readRunOnceSemaphore_not_written(self):
     sem = RunOnceSemaphore('foo', None)
     self.assertEqual(
         None,
         sem.readRunOnceSemaphore('payload',
                                  transactional=self.TRANSACTIONAL))
コード例 #26
0
ファイル: lock_test.py プロジェクト: BobDohnal/fantasm
 def test_readRunOnceSemaphore_memcache_expired(self):
     sem = RunOnceSemaphore('foo', None)
     sem.writeRunOnceSemaphore('payload', transactional=self.TRANSACTIONAL)
     memcache.delete('foo')
     payload = sem.readRunOnceSemaphore('payload', transactional=self.TRANSACTIONAL)
     self.assertEqual('payload', payload)
コード例 #27
0
ファイル: lock_test.py プロジェクト: insad/fantasm
 def test_readRunOnceSemaphore(self):
     sem = RunOnceSemaphore('foo', None)
     sem.writeRunOnceSemaphore('payload', transactional=self.TRANSACTIONAL)
     payload = sem.readRunOnceSemaphore('payload',
                                        transactional=self.TRANSACTIONAL)
     self.assertEqual('payload', payload)
コード例 #28
0
    def _queueDispatchFanIn(self,
                            nextEvent,
                            fanInPeriod=0,
                            retryOptions=None,
                            queueName=None):
        """ Queues a call to .dispatch(nextEvent) in the task queue, or saves the context to the 
        datastore for processing by the queued .dispatch(nextEvent)
        
        @param nextEvent: a string event 
        @param fanInPeriod: the period of time between fan in Tasks 
        @param queueName: the queue name to Queue into 
        @return: a taskqueue.Task instance which may or may not have been queued already
        """
        assert nextEvent is not None
        assert not self.get(
            constants.INDEX_PARAM)  # fan-in after fan-in is not allowed
        assert queueName

        # we pop this off here because we do not want the fan-out/continuation param as part of the
        # task name, otherwise we loose the fan-in - each fan-in gets one work unit.
        self.pop(constants.GEN_PARAM, None)
        fork = self.pop(constants.FORK_PARAM, None)

        taskNameBase = self.getTaskName(nextEvent, fanIn=True)
        rwlock = ReadWriteLock(taskNameBase, self)
        index = rwlock.currentIndex()

        # (***)
        #
        # grab the lock - memcache.incr()
        #
        # on Task retry, multiple incr() calls are possible. possible ways to handle:
        #
        # 1. release the lock in a 'finally' clause, but then risk missing a work
        #    package because acquiring the read lock will succeed even though the
        #    work package was not written yet.
        #
        # 2. allow the lock to get too high. the fan-in logic attempts to wait for
        #    work packages across multiple-retry attempts, so this seems like the
        #    best option. we basically trade a bit of latency in fan-in for reliability.
        #
        rwlock.acquireWriteLock(index, nextEvent=nextEvent)

        # insert the work package, which is simply a serialized FSMContext
        workIndex = '%s-%d' % (taskNameBase, knuthHash(index))

        # on retry, we want to ensure we get the same work index for this task
        actualTaskName = self.__obj[constants.TASK_NAME_PARAM]
        indexKeyName = 'workIndex-' + '-'.join(
            [str(i) for i in [actualTaskName, fork] if i]) or None
        semaphore = RunOnceSemaphore(indexKeyName, self)

        # check if the workIndex changed during retry
        semaphoreWritten = False
        if self.__obj[constants.RETRY_COUNT_PARAM] > 0:
            # see comment (A) in self._queueDispatchFanIn(...)
            time.sleep(constants.DATASTORE_ASYNCRONOUS_INDEX_WRITE_WAIT_TIME)
            payload = semaphore.readRunOnceSemaphore(payload=workIndex,
                                                     transactional=False)
            if payload:
                semaphoreWritten = True
                if payload != workIndex:
                    self.logger.info(
                        "Work index changed from '%s' to '%s' on retry.",
                        payload, workIndex)
                    workIndex = payload

        # write down two models, one actual work package, one idempotency package
        keyName = '-'.join([str(i)
                            for i in [actualTaskName, fork] if i]) or None
        work = _FantasmFanIn(context=self,
                             workIndex=workIndex,
                             key_name=keyName)

        # close enough to idempotent, but could still write only one of the entities
        # FIXME: could be made faster using a bulk put, but this interface is cleaner
        if not semaphoreWritten:
            semaphore.writeRunOnceSemaphore(payload=workIndex,
                                            transactional=False)

        # put the work item
        db.put(work)

        # (A) now the datastore is asynchronously writing the indices, so the work package may
        #     not show up in a query for a period of time. there is a corresponding time.sleep()
        #     in the fan-in of self.mergeJoinDispatch(...)

        # release the lock - memcache.decr()
        rwlock.releaseWriteLock(index)

        try:

            # insert a task to run in the future and process a bunch of work packages
            now = time.time()
            self[constants.INDEX_PARAM] = index
            url = self.buildUrl(self.currentState, nextEvent)
            params = self.buildParams(self.currentState, nextEvent)
            task = Task(name='%s-%d' % (taskNameBase, index),
                        method=self.method,
                        url=url,
                        params=params,
                        eta=datetime.datetime.utcfromtimestamp(now) +
                        datetime.timedelta(seconds=fanInPeriod),
                        headers=self.headers,
                        retry_options=retryOptions)
            self.Queue(name=queueName).add(task)
            return task

        except (TaskAlreadyExistsError, TombstonedTaskError):
            pass  # Fan-in magic
コード例 #29
0
ファイル: state.py プロジェクト: oikmar/fantasm
                    level = context.logger.warn
                level(
                    'Error processing action for state. (Machine %s, State %s, Action %s)',
                    context.machineName, context.currentState.name,
                    context.currentState.doAction.__class__)
                raise

        if transition.target.isFanIn:

            # this prevents fan-in from re-counting the data if there is an Exception
            # or DeadlineExceeded _after_ doAction.execute(...) succeeds
            index = context.get(
                constants.INDEX_PARAM) or contextOrContexts[0].get(
                    constants.INDEX_PARAM)
            workIndex = '%s-%d' % (taskNameBase, knuthHash(index))
            semaphore = RunOnceSemaphore(workIndex, context)
            semaphore.writeRunOnceSemaphore(
                payload=obj[constants.TASK_NAME_PARAM])

            try:
                # at this point we have processed the work items, delete them
                task = Task(name=obj[constants.TASK_NAME_PARAM] + '-cleanup',
                            url=constants.DEFAULT_CLEANUP_URL,
                            params={constants.WORK_INDEX_PARAM: workIndex})
                context.Queue(
                    name=constants.DEFAULT_CLEANUP_QUEUE_NAME).add(task)

            except (TaskAlreadyExistsError, TombstonedTaskError):
                context.logger.info("Fan-in cleanup Task already exists.")

            if context.get('UNITTEST_RAISE_AFTER_FAN_IN'
コード例 #30
0
ファイル: lock_test.py プロジェクト: iki/fantasm
 def test_readRunOnceSemaphore_payload_error(self):
     sem = RunOnceSemaphore('foo', None)
     sem.writeRunOnceSemaphore('payload', transactional=self.TRANSACTIONAL)
     payload = sem.readRunOnceSemaphore('bar', transactional=self.TRANSACTIONAL)
     self.assertEqual('payload', payload)
     self.assertEqual(["Run-once semaphore memcache payload read error."], self.loggingDouble.messages['critical'])