Пример #1
0
    def setUpClass(self):
        host = config().get('redis', 'host')
        port = config().get('redis', 'port')
        db = config().get('redis', 'db')
        self.client = RedisClient(host, port, db)

        self.key = '__redis_key_' + app.getUuid()
Пример #2
0
def testKafkaConnection(result):
    host = config().get('kafka', 'host')
    result['config']['kafka'] = {'host': host}

    startTime = time.time()

    try:
        producer = utils.kafkaclient.KafkaProducer(host)
        metadata = producer.list_topics()

        endTime = time.time()
        elapsedTime = int((endTime - startTime) * 1000)

        topicMetadata = metadata.topics
        topicName = config().get('kafka', 'topic')
        relayTopicName = config().get('kafka', 'relay_topic')
        if topicName in topicMetadata and relayTopicName in topicMetadata:
            result['passed']['kafka'] = {'elapsed_ms': elapsedTime}
        else:
            result['failed']['kafka'] = {
                'elapsed_ms': elapsedTime,
                'message': 'topics NOT exist'
            }
    except Exception as e:
        endTime = time.time()
        elapsedTime = int((endTime - startTime) * 1000)
        result['code'] = 1001
        result['failed']['kafka'] = {
            'elapsed_ms': elapsedTime,
            'message': str(e)
        }
Пример #3
0
def testRedisConnection(result):
    host = config().get('redis', 'host')
    port = config().get('redis', 'port')
    db = config().get('redis', 'db')
    result['config']['redis'] = {'host': host, 'port': port, 'db': db}

    startTime = time.time()

    try:
        client = utils.redisclient.RedisClient(host, port, db)
        pingResult = client.ping()
        endTime = time.time()
        elapsedTime = int((endTime - startTime) * 1000)
        if pingResult:
            result['passed']['redis'] = {'elapsed_ms': elapsedTime}
        else:
            result['code'] = 1001
            result['failed']['redis'] = {'elapsed_ms': elapsedTime}
    except Exception as e:
        endTime = time.time()
        elapsedTime = int((endTime - startTime) * 1000)
        result['code'] = 1001
        result['failed']['redis'] = {
            'elapsed_ms': elapsedTime,
            'message': str(e)
        }
Пример #4
0
def testMysqlConnection(result, configKey):
    host = config().get('mysql:' + configKey, 'host')
    port = config().get('mysql:' + configKey, 'port')
    db = config().get('mysql:' + configKey, 'database')

    result['config']['mysql:' + configKey] = {
        'host': host,
        'port': port,
        'database': db
    }

    startTime = time.time()

    try:
        connPool = ConnectinoPool()
        conn = connPool.connection(configKey)
        with conn.cursor() as cursor:
            cursor.execute('show tables;')
            data = cursor.fetchall()

        endTime = time.time()
        elapsedTime = int((endTime - startTime) * 1000)
        result['passed']['mysql:' + configKey] = {'elapsed_ms': elapsedTime}
    except Exception as e:
        endTime = time.time()
        elapsedTime = int((endTime - startTime) * 1000)
        result['code'] = 1001
        result['failed']['mysql:' + configKey] = {
            'elapsed_ms': elapsedTime,
            'message': str(e)
        }
Пример #5
0
def getRedisClient():
    global _redis_client

    if _redis_client is None:
        host = config().get('redis', 'host')
        port = config().get('redis', 'port')
        db = config().get('redis', 'db')
        _redis_client = utils.redisclient.RedisClient(host, port, db)

    return _redis_client
Пример #6
0
def testElasticSearchConnection(result):
    host = config().get('elasticsearch', 'host')
    hosts = host.split(',')

    result['config']['elasticsearch'] = {'host': host}

    startTime = time.time()

    try:
        client = Elasticsearch(hosts)
        pingResult = client.ping()
        endTime = time.time()
        elapsedTime = int((endTime - startTime) * 1000)
        if pingResult:
            result['passed']['elasticsearch'] = {'elapsed_ms': elapsedTime}
        else:
            result['failed']['elasticsearch'] = {
                'elapsed_ms': elapsedTime,
                'message': 'fail to ping'
            }
    except Exception as e:
        endTime = time.time()
        elapsedTime = int((endTime - startTime) * 1000)
        result['code'] = 1001
        result['failed']['elasticsearch'] = {
            'elapsed_ms': elapsedTime,
            'message': str(e)
        }
Пример #7
0
    def __init__(self, taskName):
        # instance of RedisStatus
        self.status = RedisStatus(taskName)
        self.relayTopic = config().get('kafka', 'topic')

        self.statusConfig = None
        self.kafkaConsumer = None
        self.handler = None

        self.itemCount = 0

        super(SyncService, self).__init__()
Пример #8
0
    def __init__(self, database):
        self.topic = config().get('kafka', 'topic')
        self.database = database

        self._connPool = ConnectinoPool()
        self._runPath = app.getPrjRoot() + "/run"
        self._binlogPosFile = self._runPath + "/" + database + "_collector_position.safe"

        self._kafkaProducer = self._initKafkaProducer()
        self._kafkaMsgRedeliveryCount = 0

        self._posStream = None
        self._stream = None
Пример #9
0
def db_connect(query, parameter =  None):
    conn = None
    try:
        params = config.config()
        conn = psycopg2.connect(**params)
        cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
        cur.execute(query, (parameter,))
        row_headers=[desc[0] for desc in cur.description] # automagically get row headers 
        data = cur.fetchall()

        return (data, row_headers)

    except (Exception, psycopg2.DatabaseError) as error:
        print(error)
    finally:
         if conn:
            conn.close()
            print("Connection closed")
Пример #10
0
    def _copyKafkaOffset(self):
        """
        将新的消费者的offset设置为 latest
        """
        # 首先要获取kafka topic的所有分区
        topicName = config().get('kafka', 'topic')

        if self.status.nextConfig:
            nextStatusConfig = RedisStatusConfig(self.status.nextConfig, forceSync=True) 

            try:
                nextConsumer = remote.getKafkaConsumer(
                        nextStatusConfig.kafkaGroupId,
                        autoCommit=False,
                        autoOffsetReset='latest'
                        )
                
                _logger.debug('next kafka groupid is: %s', nextStatusConfig.kafkaGroupId)

                clusterMetadata = nextConsumer.list_topics(topicName)
                topicMetadata = clusterMetadata.topics.get(topicName, {})
                partitions = topicMetadata.partitions

                for pid in partitions.keys():
                    p = TopicPartition(topicName, pid)
                    nextConsumer.assign([p])

                    msg = nextConsumer.poll(10)
                    if msg:
                        offset = msg.offset() - 1
                        _logger.debug('pid[%s] topic[%s] offset[%s]', pid, topicName, offset)

                        if offset >= 0:
                            p.offset = offset
                            nextConsumer.commit(offsets=[p])
            except Exception as e:
                _logger.error('exception occurs when setting offset for new consumer: %s', Failure()) 
                raise
            finally:
                if nextConsumer:
                    nextConsumer.close()
Пример #11
0
 def setUpClass(self):
     self.host = config().get('kafka', 'host')
     self.groupId = '__group_id_' + app.getUuid()
Пример #12
0
def getElasticClient():
    host = config().get('elasticsearch', 'host')
    hosts = host.split(',')
    return Elasticsearch(hosts)
Пример #13
0
def getKafkaConsumer(groupId, autoCommit=True, autoOffsetReset='earliest'):
    host = config().get('kafka', 'host')
    return utils.kafkaclient.KafkaConsumer(groupId,
                                           host,
                                           autoCommit=autoCommit,
                                           autoOffsetReset=autoOffsetReset)
Пример #14
0
def getKafkaProducer():
    host = config().get('kafka', 'host')
    return utils.kafkaclient.KafkaProducer(host)
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from application.config import config

app = Flask(__name__, static_url_path='/static')
config(app)
db = SQLAlchemy(app)

# endpoints
from application.http.index import index
from application.http.index import any_root_path
from application.http.get_all_users import get_all_users
from application.http.login import login
from application.http.register import register
Пример #16
0
    def listen(self):
        _logger.info("Start to listen to the binlog of %s" % self.database)

        section = 'mysql:' + self.database
        mysqlSetting = {
            "host": config().get(section, "host"),
            "port": int(config().get(section, 'port')),
            "user": config().get(section, "user"),
            "password": config().get(section, "password"),
        }

        watchedDatabases = [self.database]

        # load last binlog reader position
        logFile, logPos, resumeStrem = self._loadLastBinlogPos()

        self._stream = BinLogStreamReader(
            connection_settings=mysqlSetting,
            server_id=int(config().get(section, "slaveid")),
            only_events=[DeleteRowsEvent, WriteRowsEvent, UpdateRowsEvent],
            blocking=True,
            resume_stream=resumeStrem,
            log_file=logFile,
            log_pos=logPos,
        )

        while True:
            refresh = False
            try:
                for binlogEvent in self._stream:
                    refresh = True
                    logFile, logPos = self._stream.log_file, self._stream.log_pos

                    # filter no watch database
                    if binlogEvent.schema not in watchedDatabases:
                        self._writeBinlogPos(logFile, logPos)
                        continue

                    binlog = {}
                    binlog['storage'] = 'mysql'
                    binlog['database'] = '%s' % binlogEvent.schema
                    binlog['table'] = '%s' % binlogEvent.table
                    binlog['timestamp'] = datetime.fromtimestamp(
                        binlogEvent.timestamp).strftime('%Y-%m-%d %H:%M:%S')

                    for row in binlogEvent.rows:
                        if isinstance(binlogEvent, DeleteRowsEvent):
                            binlog['values'] = row['values']
                            binlog['type'] = 'DELETE'
                        elif isinstance(binlogEvent, UpdateRowsEvent):
                            binlog['before'] = row['before_values']
                            binlog['values'] = row['after_values']
                            binlog['type'] = 'UPDATE'
                        elif isinstance(binlogEvent, WriteRowsEvent):
                            binlog['values'] = row['values']
                            binlog['type'] = 'INSERT'

                        binlogRow = json.dumps(binlog,
                                               default=timeutil.dateHandler)
                        self._pushToKafka(binlogRow, binlog['database'],
                                          binlog['table'])

                    # after pushing binlog to kafka, update the binlog position
                    self._writeBinlogPos(logFile, logPos)

                if not refresh:
                    _logger.info(
                        "NO new input binlog, current position: [%s:%d]",
                        logFile if logFile is not None else "",
                        logPos if logPos is not None else 0)
                    time.sleep(0.1)
            except Exception as e:
                print(e)
                sys.exit(1)