Example #1
0
def create_entity(appendpoint, entity, objtype, databases, appfile, timeout):
    middleware = GogameMiddle(endpoint=appendpoint, entity=entity, objtype=objtype)

    conf = CONF['%s.%s' % (common.NAME, objtype)]
    _database = []
    # format database to class
    for subtype in databases:
        database_id = databases[subtype]
        schema = '%s_%s_%s_%d' % (common.NAME, objtype, subtype, entity)
        # 默认认证后缀
        postfix = '-%d' % entity
        auth = dict(user=conf.get('%s_%s' % (subtype, 'user')) + postfix,
                    passwd=conf.get('%s_%s' % (subtype, 'passwd')),
                    ro_user=conf.get('%s_%s' % (subtype, 'ro_user')) + postfix,
                    ro_passwd=conf.get('%s_%s' % (subtype, 'ro_passwd')),
                    # source='%s/%s' % (appendpoint.manager.ipnetwork.network, appendpoint.manager.ipnetwork.netmask),
                    source=conf.source or '%',
                    rosource=conf.rosource or '%')
        LOG.debug('Create schema %s in %d with auth %s' % (schema, database_id, str(auth)))
        _database.append(GogameDatabase(create=True, backup=None, update=None,
                                        database_id=database_id, schema=schema,
                                        character_set='utf8',
                                        subtype=subtype,
                                        host=None, port=None, **auth))

    app = application.Application(middleware,
                                  createtask=GogameAppCreate(middleware, timeout),
                                  databases=_database)

    book = LogBook(name='create_%s_%d' % (appendpoint.namespace, entity))
    store = dict(download_timeout=timeout)
    taskflow_session = sqlite.get_taskflow_session()
    create_flow = pipe.flow_factory(taskflow_session, book,
                                    applications=[app, ],
                                    upgradefile=GogameAppFile(source=appfile, objtype=objtype),
                                    store=store,
                                    create_cls=GogameDatabaseCreateTask)
    connection = Connection(taskflow_session)
    engine = load(connection, create_flow, store=store,
                  book=book, engine_cls=ParallelActionEngine)

    try:
        engine.run()
    except Exception as e:
        if LOG.isEnabledFor(logging.DEBUG):
            LOG.exception('Create task execute fail')
        else:
            LOG.error('Create task execute fail, %s %s' % (e.__class__.__name__, str(e)))
    finally:
        connection.destroy_logbook(book.uuid)
        for dberror in middleware.dberrors:
            LOG.error(str(dberror))
    return middleware
Example #2
0
 def __init__(self, session, book, flow, store):
     super(EntityTask, self).__init__(name='engine_%s' % flow.name)
     self.connection = Connection(session)
     self.engine = api.load(self.connection,
                            flow,
                            book=book,
                            store=store,
                            engine_cls=ParallelActionEngine)
Example #3
0
def merge_entitys(appendpoint, uuid, entity, databases):
    datadb = databases[common.DATADB]
    mergepath = 'merge-%s' % uuid
    mergeroot = os.path.join(appendpoint.endpoint_backup, mergepath)
    stepsfile = os.path.join(mergeroot, 'steps.dat')
    initfile = os.path.join(mergeroot, 'init.sql')
    if not os.path.exists(stepsfile):
        raise exceptions.MergeException('Steps file not exist')
    with open(stepsfile, 'rb') as f:
        data = cPickle.load(f)
        steps = data['steps']
    prepares = []
    for _entity, step in six.iteritems(steps):
        # 一些post sql执行错误对整体无影响情况下
        # 可以直接讲step改为FINISHED避免重复合服步骤
        if step == FINISHED:
            for _step in six.itervalues(steps):
                if _step != FINISHED:
                    raise exceptions.MergeException('Steps is finish?')
            appendpoint.client.finish_merge(uuid)
            appendpoint.flush_config(entity,
                                     databases,
                                     opentime=data['opentime'],
                                     chiefs=data['chiefs'])
            return
        if step != INSERT:
            prepares.append(_entity)
    mini_entity = min(prepares)
    if prepares:
        name = 'prepare-merge-at-%d' % int(time.time())
        book = LogBook(name=name)
        store = dict(timeout=5,
                     dtimeout=600,
                     mergeroot=mergeroot,
                     entity=entity)
        taskflow_session = build_session(
            'sqlite:///%s' % os.path.join(mergeroot, '%s.db' % name))
        connection = Connection(taskflow_session)

        prepare_uflow = uf.Flow(name)
        for _entity in prepares:
            entity_flow = lf.Flow('prepare-%d' % _entity)
            entity_flow.add(Swallow(uuid, steps, _entity, appendpoint))
            entity_flow.add(
                DumpData(uuid, steps, _entity, appendpoint,
                         _entity != mini_entity))
            entity_flow.add(Swallowed(uuid, steps, _entity, appendpoint))
            prepare_uflow.add(entity_flow)
        engine = load(connection,
                      prepare_uflow,
                      store=store,
                      book=book,
                      engine_cls=ParallelActionEngine,
                      max_workers=4)
        try:
            engine.run()
        except Exception as e:
            if LOG.isEnabledFor(logging.DEBUG):
                LOG.exception('Prepare merge task execute fail')
            raise exceptions.MergeException(
                'Prepare merge task execute fail, %s %s' %
                (e.__class__.__name__, str(e)))
        finally:
            connection.session = None
            taskflow_session.close()
            with open(stepsfile, 'wb') as f:
                cPickle.dump(data, f)

    for _entity, step in six.iteritems(steps):
        if step != INSERT:
            raise exceptions.MergeException('Some step not on %s' % INSERT)
        if not os.path.exists(os.path.join(mergeroot, sqlfile(_entity))):
            raise exceptions.MergeException('Entity %d sql file not exist' %
                                            _entity)

    if not os.path.exists(initfile):
        LOG.error('Init database file not exist')
        raise exceptions.MergeException('Init database file not exist')
    LOG.info('Prepare merge success, try merge database')

    now = int(time.time())
    name = 'merge-at-%d' % now
    book = LogBook(name=name)
    store = dict(timeout=1800, root=mergeroot, database=datadb, timeline=now)
    taskflow_session = build_session('sqlite:///%s' %
                                     os.path.join(mergeroot, '%s.db' % name))
    connection = Connection(taskflow_session)

    merge_flow = lf.Flow('merge-to')
    merge_flow.add(SafeCleanDb())
    merge_flow.add(InitDb())
    insert_lflow = lf.Flow('insert-db')
    stoper = [0]
    for _entity in steps:
        insert_lflow.add(InserDb(_entity, stoper))
    merge_flow.add(insert_lflow)
    merge_flow.add(PostDo(uuid, appendpoint))

    engine = load(connection,
                  merge_flow,
                  store=store,
                  book=book,
                  engine_cls=ParallelActionEngine,
                  max_workers=4)
    try:
        engine.run()
    except Exception as e:
        if LOG.isEnabledFor(logging.DEBUG):
            LOG.exception('Merge database task execute fail')
        raise exceptions.MergeException(
            'Merge database task execute fail, %s %s' %
            (e.__class__.__name__, str(e)))
    else:
        for _entity in steps:
            steps[_entity] = FINISHED
        with open(stepsfile, 'wb') as f:
            cPickle.dump(data, f)
        appendpoint.client.finish_merge(uuid)
        appendpoint.flush_config(entity,
                                 databases,
                                 opentime=data['opentime'],
                                 chiefs=data['chiefs'])
        LOG.info('Merge task %s all finish' % uuid)
    finally:
        connection.session = None
        taskflow_session.close()
Example #4
0
def upgrade_entitys(appendpoint, objtype, objfiles, entitys, timeline):
    upgradefile = None
    backupfile = None
    download_time = 600
    upzip_timeout = 600
    if common.APPFILE in objfiles:
        objfile = objfiles[common.APPFILE]
        md5 = objfile.get('md5')
        backup = objfile.get('backup', True)
        revertable = objfile.get('revertable', False)
        rollback = objfile.get('rollback', True)
        timeout = objfile.get('timeout')
        if timeout < download_time:
            download_time = timeout
        if timeout < upzip_timeout:
            upzip_timeout = timeout
        # 程序更新文件
        upgradefile = GogameAppFile(md5,
                                    objtype,
                                    rollback=rollback,
                                    revertable=revertable)
        if backup:
            # 备份entity在flow_factory随机抽取
            outfile = os.path.join(
                appendpoint.endpoint_backup,
                '%s.%s.%d.gz' % (objtype, common.APPFILE, timeline))
            # 程序备份文件
            backupfile = GogameAppBackupFile(outfile, objtype)

    applications = []
    middlewares = []
    _updates = {}
    for entity in entitys:
        if objtype != appendpoint._objtype(entity):
            raise ValueError('Entity not the same objtype')
        middleware = GogameMiddle(endpoint=appendpoint,
                                  entity=entity,
                                  objtype=objtype)
        middlewares.append(middleware)
        _database = []
        # 备份数据库信息
        for subtype in (common.DATADB, common.LOGDB):
            if subtype in objfiles:
                objfile = objfiles[subtype]
                md5 = objfile.get('md5')
                revertable = objfile.get('revertable', False)
                rollback = objfile.get('rollback', False)
                timeout = objfile.get('timeout')
                if timeout < download_time:
                    download_time = timeout
                dbinfo = appendpoint.local_database_info(entity, subtype)
                try:
                    update = _updates[subtype]
                except KeyError:
                    LOG.debug('New %s update file' % subtype)
                    update = DbUpdateFile(md5, revertable, rollback)
                    _updates[subtype] = update
                # 数据库备份文件
                backup = None
                if objfile.get('backup', False):
                    outfile = os.path.join(
                        appendpoint.endpoint_backup, '%s.%d.%s.%d.gz' %
                        (objtype, entity, subtype, timeline))
                    backup = DbBackUpFile(outfile)
                _database.append(
                    GogameDatabase(backup=backup,
                                   update=update,
                                   timeout=timeout,
                                   **dbinfo))
        # 更新程序文件任务
        upgradetask = None
        if common.APPFILE in objfiles:
            upgradetask = AppFileUpgradeByFile(
                middleware,
                native=False,
                rebind=['upgradefile', 'upzip_timeout'])
        app = Application(middleware,
                          upgradetask=upgradetask,
                          databases=_database)
        applications.append(app)
    _updates.clear()

    book = LogBook(name='upgrade_%s' % appendpoint.namespace)
    store = dict(download_timeout=download_time, upzip_timeout=upzip_timeout)
    taskflow_session = sqlite.get_taskflow_session()
    upgrade_flow = pipe.flow_factory(taskflow_session,
                                     book,
                                     applications=applications,
                                     upgradefile=upgradefile,
                                     backupfile=backupfile,
                                     store=store)
    connection = Connection(taskflow_session)
    engine = load(connection,
                  upgrade_flow,
                  store=store,
                  book=book,
                  engine_cls=ParallelActionEngine,
                  max_workers=4)
    e = None
    try:
        engine.run()
    except Exception as e:
        if LOG.isEnabledFor(logging.DEBUG):
            LOG.exception('Upgrade task execute fail')
        else:
            LOG.error('Upgrade task execute fail, %s %s' %
                      (e.__class__.__name__, str(e)))
    finally:
        connection.destroy_logbook(book.uuid)
    return middlewares, e
Example #5
0
                                  UnfortunateTask(name='boom'),
                                  TestTask(name='second'))


print 'load_from_factory test'

engine = api.load_from_factory(session=session, flow_factory=test_flow_factory)
print('Running flow %s %s' %
      (engine.storage.flow_name, engine.storage.flow_uuid))
engine.run()

print 'finish load_from_factory test~'

print 'finish load_from_detail test~'


def resume(flowdetail, session):
    print('Resuming flow %s %s' % (flowdetail.name, flowdetail.uuid))
    engine = api.load_from_detail(session, flow_detail=flowdetail)
    engine.run()


logbooks = list(Connection(session).get_logbooks())

for lb in logbooks:
    for fd in lb:
        print fd.state
        print fd
        if fd.state not in FINISHED_STATES:
            resume(fd, session)
Example #6
0
eventlet.monkey_patch()

failure.TRACEBACK = True

dst = {
    'host': '172.20.0.3',
    'port': 3304,
    'schema': 'simpleflow',
    'user': '******',
    'passwd': '111111'
}
from simpleservice.ormdb.argformater import connformater
sql_connection = connformater % dst
session = build_session(sql_connection)

connection = Connection(session)


class MysqlDumper(task.Task):
    def execute(self, server_id):
        if server_id % 2 != 0:
            raise Exception('server id %d error' % server_id)
        print 'success', server_id

    def revert(self, *args, **kwargs):
        print 'revert', args, kwargs


servers = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]

Example #7
0
def hotfix_entitys(appendpoint, objtype, appfile, entitys, timeline):
    backupfile = None
    download_time = 600
    upzip_timeout = 600

    md5 = appfile.get('md5')
    backup = appfile.get('backup', True)
    revertable = appfile.get('revertable', False)
    rollback = appfile.get('rollback', True)
    timeout = appfile.get('timeout')
    if timeout < download_time:
        download_time = timeout
    if timeout < upzip_timeout:
        upzip_timeout = timeout
    stream = appfile.get('stream')

    # 程序更新文件
    upgradefile = GogameAppFile(md5,
                                objtype,
                                rollback=rollback,
                                revertable=revertable,
                                stream=stream)
    if backup:
        # 备份entity在flow_factory随机抽取
        outfile = os.path.join(
            appendpoint.endpoint_backup,
            '%s.%s.%d.gz' % (objtype, common.APPFILE, timeline))
        # 程序备份文件
        backupfile = GogameAppBackupFile(outfile, objtype)

    applications = []
    middlewares = []
    _updates = {}
    for entity in entitys:
        if objtype != appendpoint._objtype(entity):
            raise ValueError('Entity not the same objtype')
        middleware = GogameMiddle(endpoint=appendpoint,
                                  entity=entity,
                                  objtype=objtype)
        middlewares.append(middleware)
        _updates.clear()
        upgradetask = AppFileUpgradeByFile(
            middleware,
            native=False,
            exclude=hofixexcluer,
            rebind=['upgradefile', 'upzip_timeout'])
        app = Application(middleware, upgradetask=upgradetask)
        applications.append(app)

    book = LogBook(name='hotfix_%s' % appendpoint.namespace)
    store = dict(download_timeout=download_time, upzip_timeout=upzip_timeout)
    taskflow_session = sqlite.get_taskflow_session()
    upgrade_flow = pipe.flow_factory(taskflow_session,
                                     book,
                                     applications=applications,
                                     upgradefile=upgradefile,
                                     backupfile=backupfile,
                                     store=store)
    connection = Connection(taskflow_session)
    engine = load(connection,
                  upgrade_flow,
                  store=store,
                  book=book,
                  engine_cls=ParallelActionEngine,
                  max_workers=4)
    e = None
    try:
        engine.run()
    except Exception as e:
        if LOG.isEnabledFor(logging.DEBUG):
            LOG.exception('Hotfix task execute fail')
        else:
            LOG.error('Hotfix task execute fail, %s %s' %
                      (e.__class__.__name__, str(e)))
    finally:
        connection.destroy_logbook(book.uuid)
    if stream:
        upgradefile.clean()
    return middlewares, e