示例#1
0
            def __handle_state_change(data, stat):
                if self.__stop_requested.is_set():
                    return False  # we're exiting anyway, don't do anything

                if data is None:
                    # TODO: it would probably make sense for this to have an exit code
                    logger.warning(
                        'Received no replication set configuration data! Requesting exit...'
                    )
                    self.__stop_requested.set()
                    return False

                logger.debug(
                    'Recieved an update to replication set configuration.')
                configuration = BinaryCodec(
                    ReplicationSetConfiguration).decode(data)

                with self.__worker_state_lock:
                    # TODO: this is annoying and repetative and should be cleaned up
                    if self.__worker_state is None:
                        self.__worker_state = start_worker(
                            configuration.database.dsn)
                    elif self.__worker_state.worker.database.dsn != configuration.database.dsn:
                        self.__worker_state.worker.stop_async()
                        stopping.append(
                            WorkerState(self.__worker_state.worker,
                                        time.time()))
                        self.__worker_state = start_worker(
                            configuration.database.dsn)
示例#2
0
文件: kafka.py 项目: disqus/pgshovel
 def __init__(self, cluster, set, hosts, topic, prime_threshold):
     self.cluster = cluster
     self.set = set
     self.hosts = hosts
     self.topic = topic
     self.codec = BinaryCodec(Message)
     self.prime_threshold = prime_threshold
示例#3
0
def upgrade_cluster(cluster, force=False):
    zookeeper = cluster.zookeeper

    codec = BinaryCodec(ClusterConfiguration)
    data, stat = zookeeper.get(cluster.path)
    configuration = codec.decode(data)

    # if the configuration is newer or equal, require manual intervention
    assert parse_version(__version__) > parse_version(
        configuration.version) or force, 'cannot downgrade %s to %s' % (
            configuration.version, __version__)

    logger.info('Upgrading cluster from %s to %s...', configuration.version,
                __version__)
    configuration.version = __version__

    ztransaction = zookeeper.transaction()
    ztransaction.set_data(cluster.path,
                          codec.encode(configuration),
                          version=stat.version)

    # collect databases
    databases = set()
    for s, (configuration, stat) in fetch_sets(cluster):
        databases.add(configuration.database.dsn)

        # TODO: not entirely sure that this is necessary, but can't hurt
        ztransaction.check(cluster.get_set_path(s), version=stat.version)

    transactions = []
    # get_managed_databases prevents duplicates, so this is safe to perform
    # without doing any advisory locking (although it will error if two sets
    # refer to the same database using different DSNs.) get_managed_databases
    # should provide some capacity for doing deduplication to make this more
    # convenient, probably, but this at least keeps it from inadvertently
    # breaking for now.
    for connection in get_managed_databases(cluster,
                                            databases,
                                            configure=False,
                                            same_version=False).values():
        transaction = Transaction(connection, 'update-cluster')
        transactions.append(transaction)
        with connection.cursor() as cursor:
            setup_database(cluster, cursor)

    with managed(transactions):
        commit(ztransaction)
示例#4
0
def initialize_cluster(cluster):
    """
    Initialize a pgshovel cluster in ZooKeeper.
    """
    logger.info('Creating a new cluster for %s...', cluster)

    configuration = ClusterConfiguration(version=__version__)
    ztransaction = cluster.zookeeper.transaction()
    ztransaction.create(
        cluster.path,
        BinaryCodec(ClusterConfiguration).encode(configuration))
    ztransaction.create(cluster.get_set_path())
    commit(ztransaction)
示例#5
0
def upgrade_cluster(cluster, force=False):
    zookeeper = cluster.zookeeper

    codec = BinaryCodec(ClusterConfiguration)
    data, stat = zookeeper.get(cluster.path)
    configuration = codec.decode(data)

    # if the configuration is newer or equal, require manual intervention
    assert parse_version(__version__) > parse_version(configuration.version) or force, 'cannot downgrade %s to %s' % (configuration.version, __version__)

    logger.info('Upgrading cluster from %s to %s...', configuration.version, __version__)
    configuration.version = __version__

    ztransaction = zookeeper.transaction()
    ztransaction.set_data(cluster.path, codec.encode(configuration), version=stat.version)

    # collect databases
    databases = set()
    for s, (configuration, stat) in fetch_sets(cluster):
        databases.add(configuration.database.dsn)

        # TODO: not entirely sure that this is necessary, but can't hurt
        ztransaction.check(cluster.get_set_path(s), version=stat.version)

    transactions = []
    # get_managed_databases prevents duplicates, so this is safe to perform
    # without doing any advisory locking (although it will error if two sets
    # refer to the same database using different DSNs.) get_managed_databases
    # should provide some capacity for doing deduplication to make this more
    # convenient, probably, but this at least keeps it from inadvertently
    # breaking for now.
    for connection in get_managed_databases(cluster, databases, configure=False, same_version=False).values():
        transaction = Transaction(connection, 'update-cluster')
        transactions.append(transaction)
        with connection.cursor() as cursor:
            setup_database(cluster, cursor)

    with managed(transactions):
        commit(ztransaction)
示例#6
0
def check_version(cluster):
    zookeeper = cluster.zookeeper

    logger.debug('Checking cluster version...')
    data, stat = zookeeper.get(cluster.path)
    configuration = BinaryCodec(ClusterConfiguration).decode(data)
    if __version__ != configuration.version:
        raise VersionMismatchError(configuration.version)

    logger.debug('Remote version: %s', configuration.version)

    ztransaction = zookeeper.transaction()
    ztransaction.check(cluster.path, version=stat.version)
    return ztransaction
示例#7
0
def fetch_sets(cluster, names=None):
    if names is None:
        names = cluster.zookeeper.get_children(cluster.get_set_path())

    sets = map(VersionedSet.expand, names)
    paths = map(
        cluster.get_set_path,
        map(operator.attrgetter('name'), sets),
    )
    futures = map(cluster.zookeeper.get_async, paths)

    results = []
    decode = BinaryCodec(ReplicationSetConfiguration).decode
    for s, future in zip(sets, futures):
        data, stat = future.get()
        configuration = decode(data)
        assert s.version is None or s.version == get_version(configuration), \
            'versions do not match (%s and %s)' % (s.version, get_version(configuration))
        results.append((s.name, (configuration, stat)))

    return results
示例#8
0
def test_handler():
    topic = '%s-mutations' % (uuid.uuid1().hex,)

    codec = BinaryCodec(Message)

    client = KafkaClient('kafka')
    producer = SimpleProducer(client)
    writer = KafkaWriter(producer, topic, codec)

    inputs = list(transaction)
    writer.push(inputs)

    consumer = SimpleConsumer(client, 'test', topic, auto_offset_reset='smallest')

    outputs = map(
        codec.decode,
        map(
            operator.attrgetter('message.value'),
            list(consumer.get_messages(count=3)),
        ),
    )

    assert outputs == inputs
示例#9
0
def create_set(cluster, name, configuration):
    # TODO: add dry run support

    validate_set_configuration(configuration)

    databases = get_managed_databases(cluster, (configuration.database.dsn, ))

    ztransaction = check_version(cluster)

    transactions = []
    for connection in databases.values():
        transaction = Transaction(connection, 'create-set:%s' % (name, ))
        transactions.append(transaction)

        with connection.cursor() as cursor:
            configure_set(cluster, cursor, name, configuration)

    ztransaction.create(
        cluster.get_set_path(name),
        BinaryCodec(ReplicationSetConfiguration).encode(configuration),
    )

    with managed(transactions):
        commit(ztransaction)
示例#10
0
def inspect(cluster, name):
    with cluster:
        data, stat = cluster.zookeeper.get(cluster.get_set_path(name))
        configuration = BinaryCodec(ReplicationSetConfiguration).decode(data)
        click.echo(TextCodec(ReplicationSetConfiguration).encode(configuration))
        click.echo('version: %s' % (administration.get_version(configuration)), err=True)
示例#11
0
def main(cluster, set, kafka_hosts, kafka_topic):
    client = KafkaClient(kafka_hosts)
    producer = SimpleProducer(client)
    topic = kafka_topic.format(cluster=cluster.name, set=set)
    return KafkaWriter(producer, topic, BinaryCodec(Message))
示例#12
0
文件: kafka.py 项目: disqus/pgshovel
class KafkaStream(object):
    def __init__(self, cluster, set, hosts, topic, prime_threshold):
        self.cluster = cluster
        self.set = set
        self.hosts = hosts
        self.topic = topic
        self.codec = BinaryCodec(Message)
        self.prime_threshold = prime_threshold

    def consume(self, state):
        """
        Starts consuming from the configured Kafka topic given a possible
        existing ``pgshovel.interfaces.replication_pb2:State``.

        If the provided ``state`` does not contain a
        ``stream_state.consumer_state`` value, the ``KafaStream`` attempts to
        start reading from the Kafka topic after first "priming" the stream.
        Priming involves consuming messages from the topic looking for a
        ``BeginOperation``. Any message that is not a ``BeginOperation`` is
        dropped, until a ``BeginOperation`` is seen or the ``prime_threshold``
        is reached. The latter of which raises a
        ``pgshovel.streams.utilities:UnableToPrimeError`` error.

        In general, it makes sense to set the ``prime_threshold`` to high enough
        value that exceeds the max transaction size you expect to see in your
        data.  Generally speaking a  ``prime_threshold`` can effectively be
        infinite (and you could construct the stream with ``float('inf')``,
        however the lack of a ``BeginOperation`` in the stream would cause the
        stream to hang, possibly forever, so the ``prime_threshold`` config
        parameter is provided to raise an exception if this unexpected behavior
        occurs.
        """
        consumer = SimpleConsumer(KafkaClient(self.hosts), None, self.topic)

        # You can only update one offset at a time with kafka-python, plus
        # dealing with reconstituting global order from a partitioned stream is
        # hard we don't really need to deal with it right now.
        assert len(consumer.offsets) is 1

        decoded = imap(
            lambda (offset, msg): (offset, self.codec.decode(msg.value)),
            consumer
        )

        if state.stream_state.HasField('consumer_state'):
            # Seeking to a direct offset was not in the PyPI release of
            # kafka-python when this was implemented:
            # https://github.com/mumrah/kafka-python/pull/412
            current = consumer.offsets[0]
            offset = state.stream_state.consumer_state.offset + 1
            delta = offset - current
            logger.debug('Moving to previous replication log offset: %s (current position: %s)...', offset, current)
            consumer.seek(delta, 1)
            assert consumer.offsets[0] == offset
        else:
            logger.info('No consumer state provided, will attempt to prime to begin BeginOperation')
            # The call to ``prime_for_batch_start`` "primes" the stream by
            # dropping messages until it sees a message that is an intance of
            # one of the types in
            # ``pgshovel.replication.validation.TRANSACTION_START_EVENT_TYPES``
            decoded = prime_for_batch_start(
                max_messages=self.prime_threshold,
                stream=decoded
            )

        for offset, message in decoded:
            state = validate_state(state, offset, message)
            # XXX: This is necessary because of a bug in protocol buffer oneof.
            state = type(state).FromString(state.SerializeToString())
            yield state, offset, message


    @classmethod
    def configure(cls, configuration, cluster, set):
        topic = '{cluster}.{set}.mutations'.format(cluster=cluster.name, set=set)
        return cls(
            cluster,
            set,
            configuration['hosts'],
            topic,
            configuration.get('prime_threshold', 1000)
        )
示例#13
0
def update_set(cluster,
               name,
               updated_configuration,
               allow_forced_removal=False):
    # TODO: add dry run support

    validate_set_configuration(updated_configuration)

    (name, (current_configuration, stat)) = fetch_sets(cluster, (name, ))[0]

    # TODO: It probably makes sense to normalize the database URIs here.
    current_databases = set((current_configuration.database.dsn, ))
    updated_databases = set((updated_configuration.database.dsn, ))

    additions = get_managed_databases(cluster,
                                      updated_databases - current_databases)
    mutations = get_managed_databases(cluster,
                                      updated_databases & current_databases)
    deletions = get_managed_databases(
        cluster,
        current_databases - updated_databases,
        skip_inaccessible=allow_forced_removal,
    )

    # ensure no items show up multiple times, since that causes incorrect behavior
    # TODO: this is a very naive approach to avoid shooting ourselves in the
    # foot and could be improved for valid cases (updating a dsn for an
    # existing set should be treated as a mutation, not an addition and
    # deletion) but this would require a more intelligent implementation
    occurrences = collections.Counter()
    for nodes in map(operator.methodcaller('keys'),
                     (additions, mutations, deletions)):
        occurrences.update(nodes)

    duplicates = list(
        itertools.takewhile(lambda (node, count): count > 1,
                            occurrences.most_common()))
    assert not duplicates, 'found duplicates: %s' % (duplicates, )

    ztransaction = check_version(cluster)

    transactions = []

    for connection in additions.values():
        transaction = Transaction(connection,
                                  'update-set:create:%s' % (name, ))
        transactions.append(transaction)
        with connection.cursor() as cursor:
            configure_set(cluster, cursor, name, updated_configuration, None)

    for connection in mutations.values():
        transaction = Transaction(connection,
                                  'update-set:update:%s' % (name, ))
        transactions.append(transaction)
        with connection.cursor() as cursor:
            configure_set(cluster, cursor, name, updated_configuration,
                          current_configuration)

    # TODO: add help to inform user of the possiblity of retry
    for connection in deletions.values():
        transaction = Transaction(connection,
                                  'update-set:delete:%s' % (name, ))
        transactions.append(transaction)
        with connection.cursor() as cursor:
            unconfigure_set(cluster, cursor, name, current_configuration)

    ztransaction.set_data(
        cluster.get_set_path(name),
        BinaryCodec(ReplicationSetConfiguration).encode(updated_configuration),
        version=stat.version,
    )

    with managed(transactions):
        commit(ztransaction)