コード例 #1
0
ファイル: factory.py プロジェクト: soulhez/Goperation
def flow_factory(job):
    """
    @param job:                 class: sqlalchemy:session
    """
    retryer = None
    if job.retry:
        retryer = Times(attempts=job.retry, revert_all=job.revertall)
    main_flow = lf.Flow('scheduler_taskflow', retry=retryer)
    for index, step in enumerate(job.steps):
        _executor = getattr(executor, step.executor)
        _analyzer = getattr(analyzer, step.executor)
        task_executor = _executor or _analyzer
        task = task_executor.builder('%d-%d' % (job.job_id, index), step)
        main_flow.add(task)
    return main_flow
コード例 #2
0
def mysql_flow_factory(app,
                       store,
                       create_cls=MysqlCreate,
                       backup_cls=MysqlDump,
                       update_cls=MysqlUpdate):
    if not app.databases:
        return None
    middleware = app.middleware
    endpoint_name = middleware.endpoint
    entity = middleware.entity
    uflow = uf.Flow('db_cbu_%s_%d' % (endpoint_name, entity))
    for index, database in enumerate(app.databases):
        retry = None
        if database.retry:
            retry = Times(attempts=database.retry,
                          name='db_retry_%s_%d_%d' %
                          (endpoint_name, entity, index))
        lfow = lf.Flow(name='db_%s_%d_%d' % (endpoint_name, entity, index),
                       retry=retry)
        if database.create:
            lfow.add(create_cls(middleware, database))
        if database.backup:
            lfow.add(backup_cls(middleware, database))
        if database.update:
            if database.update.rollback and not database.backup:
                raise ValueError('Database rollback need backup')
            lfow.add(update_cls(middleware, database))
        if len(lfow):
            uflow.add(lfow)
        else:
            del lfow

    if len(uflow):
        return uflow

    del uflow
    return None
コード例 #3
0
ファイル: merge.py プロジェクト: lolizeppelin/gogamechen1
def merge_entitys(appendpoint, uuid, entity, databases):
    datadb = databases[common.DATADB]
    mergepath = 'merge-%s' % uuid
    mergeroot = os.path.join(appendpoint.endpoint_backup, mergepath)
    stepsfile = os.path.join(mergeroot, 'steps.dat')
    initfile = os.path.join(mergeroot, 'init.sql')
    if not os.path.exists(stepsfile):
        raise exceptions.MergeException('Steps file not exist')
    with open(stepsfile, 'rb') as f:
        data = cPickle.load(f)
        steps = data['steps']
    prepares = []
    for _entity, step in six.iteritems(steps):
        # 一些post sql执行错误对整体无影响情况下
        # 可以直接讲step改为FINISHED避免重复合服步骤
        if step == FINISHED:
            for _step in six.itervalues(steps):
                if _step != FINISHED:
                    raise exceptions.MergeException('Steps is finish?')
            appendpoint.client.finish_merge(uuid)
            appendpoint.flush_config(entity,
                                     databases,
                                     opentime=data['opentime'],
                                     chiefs=data['chiefs'])
            return
        if step != INSERT:
            prepares.append(_entity)
    mini_entity = min(prepares)
    if prepares:
        name = 'prepare-merge-at-%d' % int(time.time())
        book = LogBook(name=name)
        store = dict(timeout=5,
                     dtimeout=600,
                     mergeroot=mergeroot,
                     entity=entity)
        taskflow_session = build_session(
            'sqlite:///%s' % os.path.join(mergeroot, '%s.db' % name))
        connection = Connection(taskflow_session)

        prepare_uflow = uf.Flow(name)
        for _entity in prepares:
            entity_flow = lf.Flow('prepare-%d' % _entity)
            entity_flow.add(Swallow(uuid, steps, _entity, appendpoint))
            entity_flow.add(
                DumpData(uuid, steps, _entity, appendpoint,
                         _entity != mini_entity))
            entity_flow.add(Swallowed(uuid, steps, _entity, appendpoint))
            prepare_uflow.add(entity_flow)
        engine = load(connection,
                      prepare_uflow,
                      store=store,
                      book=book,
                      engine_cls=ParallelActionEngine,
                      max_workers=4)
        try:
            engine.run()
        except Exception as e:
            if LOG.isEnabledFor(logging.DEBUG):
                LOG.exception('Prepare merge task execute fail')
            raise exceptions.MergeException(
                'Prepare merge task execute fail, %s %s' %
                (e.__class__.__name__, str(e)))
        finally:
            connection.session = None
            taskflow_session.close()
            with open(stepsfile, 'wb') as f:
                cPickle.dump(data, f)

    for _entity, step in six.iteritems(steps):
        if step != INSERT:
            raise exceptions.MergeException('Some step not on %s' % INSERT)
        if not os.path.exists(os.path.join(mergeroot, sqlfile(_entity))):
            raise exceptions.MergeException('Entity %d sql file not exist' %
                                            _entity)

    if not os.path.exists(initfile):
        LOG.error('Init database file not exist')
        raise exceptions.MergeException('Init database file not exist')
    LOG.info('Prepare merge success, try merge database')

    now = int(time.time())
    name = 'merge-at-%d' % now
    book = LogBook(name=name)
    store = dict(timeout=1800, root=mergeroot, database=datadb, timeline=now)
    taskflow_session = build_session('sqlite:///%s' %
                                     os.path.join(mergeroot, '%s.db' % name))
    connection = Connection(taskflow_session)

    merge_flow = lf.Flow('merge-to')
    merge_flow.add(SafeCleanDb())
    merge_flow.add(InitDb())
    insert_lflow = lf.Flow('insert-db')
    stoper = [0]
    for _entity in steps:
        insert_lflow.add(InserDb(_entity, stoper))
    merge_flow.add(insert_lflow)
    merge_flow.add(PostDo(uuid, appendpoint))

    engine = load(connection,
                  merge_flow,
                  store=store,
                  book=book,
                  engine_cls=ParallelActionEngine,
                  max_workers=4)
    try:
        engine.run()
    except Exception as e:
        if LOG.isEnabledFor(logging.DEBUG):
            LOG.exception('Merge database task execute fail')
        raise exceptions.MergeException(
            'Merge database task execute fail, %s %s' %
            (e.__class__.__name__, str(e)))
    else:
        for _entity in steps:
            steps[_entity] = FINISHED
        with open(stepsfile, 'wb') as f:
            cPickle.dump(data, f)
        appendpoint.client.finish_merge(uuid)
        appendpoint.flush_config(entity,
                                 databases,
                                 opentime=data['opentime'],
                                 chiefs=data['chiefs'])
        LOG.info('Merge task %s all finish' % uuid)
    finally:
        connection.session = None
        taskflow_session.close()
コード例 #4
0
ファイル: test_api.py プロジェクト: zliang90/simpleflow
def test_flow_factory():
    return lf.Flow('example').add(TestTask(name='first'),
                                  UnfortunateTask(name='boom'),
                                  TestTask(name='second'))
コード例 #5
0
ファイル: retry_flow.py プロジェクト: zliang90/simpleflow

class CallJoe(task.Task):
    def execute(self, joe_number, *args, **kwargs):
        print("Calling joe %s." % joe_number)

    def revert(self, joe_number, *args, **kwargs):
        print("Calling joe %s and apologizing." % joe_number)


class CallJim(task.Task):
    def execute(self, jim_number):
        print("Calling jim %s." % jim_number)
        if jim_number != 5551:
            raise Exception("Wrong number! of jim")
        else:
            print("Hello Jim!")

    def revert(self, jim_number, **kwargs):
        print("Wrong number jim, apologizing.")


retryer = retry.ParameterizedForEach(rebind=['phone_directory'], provides='jim_number')
lflow = lf.Flow('retrying-linear', retry=retryer)
lflow.add(CallJoe())
lflow.add(CallJim())
# Create your flow and associated tasks (the work to be done).
# Now run that flow using the provided initial data (store below).
result = api.run(session, lflow, store={'phone_directory': [333, 444, 555, 666], 'joe_number': 111})
print result
コード例 #6
0
ファイル: graph_flow.py プロジェクト: zliang90/simpleflow
}
from simpleservice.ormdb.argformater import connformater
sql_connection = connformater % dst
session = build_session(sql_connection)


class Adder(task.Task):
    def execute(self, x, y):
        print 'do!!!', x, y
        return x + y


flow = gf.Flow('root').add(
    lf.Flow('nested_linear').add(
        # x2 = y3+y4 = 12
        Adder("add2", provides='x2', rebind=['y3', 'y4']),
        # x1 = y1+y2 = 4
        Adder("add1", provides='x1', rebind=['y1', 'y2'])),
    # x5 = x1+x3 = 20
    Adder("add5", provides='x5', rebind=['x1', 'x3']),
    # x3 = x1+x2 = 16
    Adder("add3", provides='x3', rebind=['x1', 'x2']),
    # x4 = x2+y5 = 21
    Adder("add4", provides='x4', rebind=['x2', 'y5']),
    # x6 = x5+x4 = 41
    Adder("add6", provides='x6', rebind=['x5', 'x4']),
    # x7 = x6+x6 = 82
    Adder("add7", provides='x7', rebind=['x6', 'x6']))

# Provide the initial variable inputs using a storage dictionary.
store = {
コード例 #7
0
ファイル: retry_all_flow.py プロジェクト: zliang90/simpleflow
            global revert_all
            if revert_all:
                return retry.REVERT_ALL
            return retry.REVERT
        return retry.RETRY

    def execute(self, history, *args, **kwargs):
        print 'RetryFlow exec', args, kwargs, 'history:', len(history)
        return 2

    def revert(self, history, *args, **kwargs):
        return 'RetryFlow revert', args, kwargs, 'history:', len(history)


retryer = RetryFlow()
lflow = lf.Flow('xxx')
lflow.add(A('a', rebind=['va']))
lflow.add(A('b', rebind=['vb']))

# retry应该是写在这里,如果在顶层flow
# 那么RETRY和REVERT_ALL是没有区别的
gflow = gf.Flow('ogm', retry=retryer)
gflow.add(A('c', rebind=['vc']))  # 因为使用的是graph_flow, c和d是没有顺序的
gflow.add(A('d', rebind=['vd']))
lflow.add(gflow)

# 设置是否回滚全部
revert_all = True

api.run(session,
        lflow,
コード例 #8
0
    def execute(self, joe_number, *args, **kwargs):
        print("Calling joe %s." % joe_number)

    def revert(self, joe_number, *args, **kwargs):
        print("Calling %s and apologizing." % joe_number)


class CallSuzzie(task.Task):
    def execute(self, suzzie_number, *args, **kwargs):
        raise Exception("Suzzie not home right now.")


# Create your flow and associated tasks (the work to be done).
flow = lf.Flow('simple-linear').add(
    CallJim(),
    CallJoe(),
    CallSuzzie()
)

stone = dict(joe_number=444,
             jim_number=555,
             suzzie_number=666)


try:
    # Now run that flow using the provided initial data (store below).
    api.run(session, flow, store=stone)
except Exception as e:
    # NOTE(harlowja): This exception will be the exception that came out of the
    # 'CallSuzzie' task instead of a different exception, this is useful since
    # typically surrounding code wants to handle the original exception and not
コード例 #9
0
ファイル: pipe.py プロジェクト: soulhez/Goperation
def flow_factory(session, book,
                 applications,
                 upgradefile=None,
                 backupfile=None,
                 store=None,
                 db_flow_factory=database.mysql_flow_factory,
                 **kwargs):
    """
    @param session:                 class: sqlalchemy:session
    @param middlewares:             class:list EntityMiddleware
    @param upgradefile:             class:AppUpgradeFile    app upgrade file
    @param backupfile:              class:basestring of path/AppRemoteBackupFile  app backup file
    @param store:                   class:dict
    @param db_flow_factory:         class:function   默认database.mysql_flow_factory
    @param create_cls:              class:class      数据库创建任务类 参考database.MysqlCreate
    @param backup_cls:              class:class      数据库备份任务类 参考database.MysqlDump
    @param update_cls:              class:class      数据库更新任务类  参考database.MysqlUpdate
    """
    if not applications:
        raise RuntimeError('No application found')
    if upgradefile and not isinstance(upgradefile, TaskPublicFile):
        raise TypeError('upgradefile not TaskPublicFile')
    if backupfile and not isinstance(backupfile, TaskPublicFile):
        raise TypeError('backupfile not TaskPublicFile')
    store = store or {}
    if store.get('backupfile') or store.get('upgradefile'):
        raise RuntimeError('Backupfile or Upgradefile in store')

    # choice one entity by randomizing
    # 随机选择一个app
    app = applications[random.randint(0, len(applications)-1)]
    # 获取endpoint的名称
    endpoint_name = app.middleware.endpoint
    main_flow = lf.Flow('%s_taskflow' % endpoint_name)

    # prepare file for app update and database
    # 准备工作
    prepare_uflow = uf.Flow('%s_prepare' % endpoint_name)
    # 下载程序更新文件
    if upgradefile:
        rebind = ['download_timeout']
        format_store_rebind(store, rebind)
        #  get app update file, all middlewares use same app upload file
        prepare_uflow.add(application.AppUpgradeFileGet(app.middleware, upgradefile, rebind=rebind))
    # 备份程序文件
    if backupfile:
        rebind = ['download_timeout']
        format_store_rebind(store, rebind)
        prepare_uflow.add(application.AppBackUp(app.middleware, backupfile, rebind=rebind))
    # 下载数据库更新文件
    if app.databases and not all([False if d.update else True for d in app.databases]):
        rebind = ['download_timeout']
        format_store_rebind(store, rebind)
        # get database upload file, all middlewares use same database upload file
        prepare_uflow.add(database.DbUpdateSqlGet(app.middleware, app.databases, rebind=rebind))
    if len(prepare_uflow):
        main_flow.add(prepare_uflow)
    else:
        del prepare_uflow

    entitys_taskflow = uf.Flow('%s_entitys_task' % endpoint_name)
    # 批量更新操作
    for app in applications:
        # all entity task
        entitys_taskflow.add(entity_factory(session, book, app, store,
                                            upgradefile, backupfile,
                                            db_flow_factory, **kwargs))
        eventlet.sleep(0)
    main_flow.add(entitys_taskflow)

    return main_flow
コード例 #10
0
ファイル: pipe.py プロジェクト: soulhez/Goperation
def entity_factory(session, book, app, store,
                   upgradefile, backupfile,
                   db_flow_factory, **kwargs):
    """
    @param session:                 class: sqlalchemy:session
    @param book:                    class: taskflow book
    @param store:                   class: dict
    @param db_flow_factory:         class: function
    @param upgradefile:             class: TaskPublicFile
    @param backupfile:              class: TaskPublicFile
    @param kwargs:                  class: create_cls,backup_cls,update_cls
    """
    endpoint_name = app.middleware.endpoint
    entity = app.middleware.entity
    entity_flow = lf.Flow('entity_%s_%d' % (endpoint_name, entity))
    # 为创建更新和备份提供文件
    entity_flow.add(ProvidesTask(name='provides_%s_%d' % (endpoint_name, entity),
                                 upgradefile=upgradefile,
                                 backupfile=backupfile))
    # 创建任务,串行
    if app.createtask:
        if not upgradefile:
            raise ValueError('No file found for createtask')
        entity_flow.add(app.createtask)
    # 停止任务,串行
    if app.stoptask:
        # kill if stop fail
        prepare_flow = uf.Flow('recheck_stop_%s_%d' % (endpoint_name, entity),
                               retry=application.AppKill('kill_%s_%d' % (endpoint_name, entity)))
        # sure entity stoped
        prepare_flow.add(app.stoptask)
        entity_flow.add(prepare_flow)

    # 创建一个并行工作流
    upflow = uf.Flow('up_%s_%d' % (endpoint_name, entity))
    # 更新任务(与其他任务并行)
    if app.upgradetask:
        if not upgradefile:
            raise ValueError('No file found for upgradetask')
        # upgrade app file
        upflow.add(app.upgradetask)
    # 数据库备份与升级任务, 与更新任务并行
    database_flow = db_flow_factory(app, store, **kwargs)
    if database_flow:
        upflow.add(database_flow)

    # 并行工作流插入到主工作流中
    if len(upflow):
        entity_flow.add(upflow)
    else:
        del upflow

    # 其他串行任务(更新完成后)
    if app.updatetask:
        entity_flow.add(app.updatetask)
    # start appserver
    if app.startstak:
        entity_flow.add(app.startstak)
    # delete appserver
    if app.deletetask:
        entity_flow.add(app.deletetask)

    # entity task is independent event
    return EntityTask(session, book, entity_flow, store)
コード例 #11
0
ファイル: linear_flow.py プロジェクト: zliang90/simpleflow

class MysqlDumper(task.Task):
    def execute(self, server_id):
        print 'execute server_id', server_id
        return server_id + 1

    def revert(self, *args, **kwargs):
        print 'revert', args, kwargs


atask = MysqlDumper('s1', provides='s2', rebind=['s1'])
btask = MysqlDumper('s2', provides='s3', rebind=['s2'])
ctask = MysqlDumper('s3', provides='s4', rebind=['s3'])
dtask = MysqlDumper('s4', provides='s5', rebind=['s4'])
etask = MysqlDumper('s5', provides='s6', rebind=['s5'])

lflow = lf.Flow('lftest')
lflow.add(atask)
lflow.add(btask)
lflow.add(ctask)
lflow.add(dtask)
lflow.add(etask)

data = {'s1': 1}

result = api.run(session, lflow, store=data, engine_cls=ParallelActionEngine)
# result = api.run(session, lflow, store=data)
print 'all success',
print result