Beispiel #1
0
 def execute(self, entity, timeout):
     step = self.stpes[self.entity]
     if step == SWALLOWED:
         with self.endpoint.mlock:
             result = self.endpoint.client.swallowed_entity(
                 self.entity, self.uuid, entity)
         try:
             if result.get(
                     'resultcode'
             ) != manager_common.RESULT_SUCCESS or not result.get('data'):
                 LOG.error(
                     'Swallowed success, but can not find areas from result'
                 )
                 return None
             data = result.get('data')
             areas = data[0].get('areas')
             if not areas:
                 raise KeyError('Not areas found')
         except KeyError as e:
             LOG.error('Get areas fail %s' % e.message)
         else:
             self.stpes[self.entity] = INSERT
             for i in range(5):
                 if entity not in self.endpoint.konwn_appentitys:
                     eventlet.sleep(3)
             try:
                 self.endpoint.konwn_appentitys[entity]['areas'].extend(
                     areas)
             except KeyError:
                 raise exceptions.MergeException(
                     'Target entity %d not in konwn appentitys' % entity)
             LOG.debug('Extend new areas of konwn appentitys success')
Beispiel #2
0
 def execute(self, root, timeout, databases):
     """
     导出需要合并的实体数据库
     如果init.sql文件不存在,导出一份init.sql文件
     """
     step = self.stpes[self.entity]
     if step == DUMPING:
         _file = os.path.join(root, sqlfile(self.entity))
         if os.path.exists(_file):
             return
         database = DumpData._prepare_database(databases)
         try:
             mysqldump(_file,
                       database.get('host'),
                       database.get('port'),
                       database.get('user'),
                       database.get('passwd'),
                       database.get('schema'),
                       character_set=None,
                       extargs=self._ext_args(database.get('schema')),
                       logfile=None,
                       callable=safe_fork,
                       timeout=timeout)
         except (ExitBySIG, UnExceptExit):
             LOG.error('Dump database of entity %d fail' % self.entity)
             if os.path.exists(_file):
                 try:
                     os.remove(_file)
                 except (OSError, OSError):
                     LOG.error('Try remove file %d fail!' % _file)
                     raise exceptions.MergeException(
                         'Remove error file %s fail' % _file)
         else:
             self.stpes[self.entity] = SWALLOWED
         # create init file
         initfile = os.path.join(root, 'init.sql')
         if not os.path.exists(initfile):
             try:
                 with self.endpoint.mlock:
                     if not os.path.exists(initfile):
                         LOG.info(
                             'Dump init sql from entity %d, schema %s' %
                             (self.entity, database.get('schema')))
                         mysqldump(initfile,
                                   database.get('host'),
                                   database.get('port'),
                                   database.get('user'),
                                   database.get('passwd'),
                                   database.get('schema'),
                                   character_set=None,
                                   extargs=['-R', '-d'],
                                   logfile=None,
                                   callable=safe_fork,
                                   timeout=timeout)
             except (ExitBySIG, UnExceptExit):
                 if os.path.exists(initfile):
                     try:
                         os.remove(initfile)
                     except (OSError, OSError):
                         LOG.error('Try remove init sql file fail!')
Beispiel #3
0
def create_merge(appendpoint, uuid, entitys, middleware, opentime, chiefs):
    mergepath = 'merge-%s' % uuid
    mergeroot = os.path.join(appendpoint.endpoint_backup, mergepath)
    if not os.path.exists(mergeroot):
        os.makedirs(mergeroot)
    stepsfile = os.path.join(mergeroot, 'steps.dat')
    if os.path.exists(stepsfile):
        raise exceptions.MergeException('Steps file exist, can not merge')
    data = {}
    steps = {}
    for _entity in entitys:
        steps[_entity] = SWALLOW
    data['opentime'] = opentime
    data['chiefs'] = chiefs
    data['steps'] = steps
    with open(stepsfile, 'wb') as f:
        cPickle.dump(data, f)
    merge_entitys(appendpoint, uuid, middleware.entity, middleware.databases)
Beispiel #4
0
 def continues(self, req, uuid, body=None):
     """中途失败的合服任务再次运行"""
     session = endpoint_session()
     query = model_query(session, MergeTask, filter=MergeTask.uuid == uuid)
     query = query.options(joinedload(MergeTask.entitys, innerjoin=False))
     etask = query.one()
     if etask.status == common.MERGEFINISH:
         raise InvalidArgument('Merge task has all ready finished')
     _query = model_query(session,
                          AppEntity,
                          filter=AppEntity.entity == etask.entity)
     _query = _query.options(
         joinedload(AppEntity.databases, innerjoin=False))
     appentity = _query.one_or_none()
     if not appentity or not appentity.databases or appentity.objtype != common.GAMESERVER:
         LOG.error('Etask entity can not be found or type/database error')
         raise exceptions.MergeException(
             'Etask entity can not be found or type/database error')
     databases = self._database_to_dict(appentity)
     rpc = get_client()
     metadata, ports = self._entityinfo(req=req, entity=appentity.entity)
     target = targetutils.target_agent_by_string(metadata.get('agent_type'),
                                                 metadata.get('host'))
     target.namespace = common.NAME
     rpc_ret = rpc.call(target,
                        ctxt={'agents': [
                            appentity.agent_id,
                        ]},
                        msg={
                            'method':
                            'continue_merge',
                            'args':
                            dict(entity=etask.entity,
                                 uuid=uuid,
                                 databases=databases)
                        })
     if not rpc_ret:
         raise RpcResultError('continue entity result is None')
     if rpc_ret.get('resultcode') != manager_common.RESULT_SUCCESS:
         raise RpcResultError('continue entity fail %s' %
                              rpc_ret.get('result'))
     return resultutils.results(
         result='continue merge task command has been send',
         data=[dict(uuid=etask.uuid, entity=etask.entity)])
Beispiel #5
0
 def execute(self, timeline, root, database, timeout):
     if self.stoper[0]:
         raise exceptions.MergeException('Stop mark is true')
     _file = os.path.join(root, sqlfile(self.entity))
     logfile = os.path.join(
         root, 'insert-%d.err.%d.log' % (self.entity, timeline))
     LOG.info('Insert database of entity %d, sql file %s' %
              (self.entity, _file))
     mysqlload(_file,
               database.get('host'),
               database.get('port'),
               database.get('user'),
               database.get('passwd'),
               database.get('schema'),
               character_set=None,
               extargs=None,
               logfile=logfile,
               callable=safe_fork,
               timeout=timeout)
     LOG.info('Insert database of entity %d success' % self.entity)
     os.remove(logfile)
Beispiel #6
0
def merge_entitys(appendpoint, uuid, entity, databases):
    datadb = databases[common.DATADB]
    mergepath = 'merge-%s' % uuid
    mergeroot = os.path.join(appendpoint.endpoint_backup, mergepath)
    stepsfile = os.path.join(mergeroot, 'steps.dat')
    initfile = os.path.join(mergeroot, 'init.sql')
    if not os.path.exists(stepsfile):
        raise exceptions.MergeException('Steps file not exist')
    with open(stepsfile, 'rb') as f:
        data = cPickle.load(f)
        steps = data['steps']
    prepares = []
    for _entity, step in six.iteritems(steps):
        # 一些post sql执行错误对整体无影响情况下
        # 可以直接讲step改为FINISHED避免重复合服步骤
        if step == FINISHED:
            for _step in six.itervalues(steps):
                if _step != FINISHED:
                    raise exceptions.MergeException('Steps is finish?')
            appendpoint.client.finish_merge(uuid)
            appendpoint.flush_config(entity,
                                     databases,
                                     opentime=data['opentime'],
                                     chiefs=data['chiefs'])
            return
        if step != INSERT:
            prepares.append(_entity)
    mini_entity = min(prepares)
    if prepares:
        name = 'prepare-merge-at-%d' % int(time.time())
        book = LogBook(name=name)
        store = dict(timeout=5,
                     dtimeout=600,
                     mergeroot=mergeroot,
                     entity=entity)
        taskflow_session = build_session(
            'sqlite:///%s' % os.path.join(mergeroot, '%s.db' % name))
        connection = Connection(taskflow_session)

        prepare_uflow = uf.Flow(name)
        for _entity in prepares:
            entity_flow = lf.Flow('prepare-%d' % _entity)
            entity_flow.add(Swallow(uuid, steps, _entity, appendpoint))
            entity_flow.add(
                DumpData(uuid, steps, _entity, appendpoint,
                         _entity != mini_entity))
            entity_flow.add(Swallowed(uuid, steps, _entity, appendpoint))
            prepare_uflow.add(entity_flow)
        engine = load(connection,
                      prepare_uflow,
                      store=store,
                      book=book,
                      engine_cls=ParallelActionEngine,
                      max_workers=4)
        try:
            engine.run()
        except Exception as e:
            if LOG.isEnabledFor(logging.DEBUG):
                LOG.exception('Prepare merge task execute fail')
            raise exceptions.MergeException(
                'Prepare merge task execute fail, %s %s' %
                (e.__class__.__name__, str(e)))
        finally:
            connection.session = None
            taskflow_session.close()
            with open(stepsfile, 'wb') as f:
                cPickle.dump(data, f)

    for _entity, step in six.iteritems(steps):
        if step != INSERT:
            raise exceptions.MergeException('Some step not on %s' % INSERT)
        if not os.path.exists(os.path.join(mergeroot, sqlfile(_entity))):
            raise exceptions.MergeException('Entity %d sql file not exist' %
                                            _entity)

    if not os.path.exists(initfile):
        LOG.error('Init database file not exist')
        raise exceptions.MergeException('Init database file not exist')
    LOG.info('Prepare merge success, try merge database')

    now = int(time.time())
    name = 'merge-at-%d' % now
    book = LogBook(name=name)
    store = dict(timeout=1800, root=mergeroot, database=datadb, timeline=now)
    taskflow_session = build_session('sqlite:///%s' %
                                     os.path.join(mergeroot, '%s.db' % name))
    connection = Connection(taskflow_session)

    merge_flow = lf.Flow('merge-to')
    merge_flow.add(SafeCleanDb())
    merge_flow.add(InitDb())
    insert_lflow = lf.Flow('insert-db')
    stoper = [0]
    for _entity in steps:
        insert_lflow.add(InserDb(_entity, stoper))
    merge_flow.add(insert_lflow)
    merge_flow.add(PostDo(uuid, appendpoint))

    engine = load(connection,
                  merge_flow,
                  store=store,
                  book=book,
                  engine_cls=ParallelActionEngine,
                  max_workers=4)
    try:
        engine.run()
    except Exception as e:
        if LOG.isEnabledFor(logging.DEBUG):
            LOG.exception('Merge database task execute fail')
        raise exceptions.MergeException(
            'Merge database task execute fail, %s %s' %
            (e.__class__.__name__, str(e)))
    else:
        for _entity in steps:
            steps[_entity] = FINISHED
        with open(stepsfile, 'wb') as f:
            cPickle.dump(data, f)
        appendpoint.client.finish_merge(uuid)
        appendpoint.flush_config(entity,
                                 databases,
                                 opentime=data['opentime'],
                                 chiefs=data['chiefs'])
        LOG.info('Merge task %s all finish' % uuid)
    finally:
        connection.session = None
        taskflow_session.close()