示例#1
0
文件: simple.py 项目: disqus/pgshovel
def test_yields_a_bootstrap_state_containing_node_and_snapshot_value(cluster):
    dsn = create_temporary_database('primary')
    connection = psycopg2.connect(dsn)

    with connection as conn, conn.cursor() as cursor:
        cursor.executemany(
            'INSERT INTO auth_user (username) VALUES (%s)',
            (('example',), ('example2',))
        )

    with connection as conn, conn.cursor() as cursor:
        replication_set = ReplicationSetConfiguration()
        replication_set.database.dsn = dsn
        replication_set.tables.add(name='auth_user', primary_keys=['id'])

        with cluster:
            create_set(cluster, 'example', replication_set)
            with SimpleLoader(cluster, 'example').fetch() as (state, loaders):
                assert state.node == get_node_identifier(cluster, cursor).bytes

                ((table_config, stream),) = loaders
                assert table_config.name == 'auth_user'
                assert table_config.primary_keys == ['id']

                values = [
                    (c.name, c.string)
                    for row in stream
                    for c in row.columns
                ]
                assert values == [
                    (u'id', u''),
                    (u'username', u'example'),
                    (u'id', u''),
                    (u'username', u'example2')
                ]
示例#2
0
def test_workflows(cluster):
    primary_dsn = create_temporary_database('primary')
    replica_dsn = create_temporary_database('replica')

    replication_set = ReplicationSetConfiguration()
    replication_set.database.dsn = primary_dsn
    replication_set.tables.add(
        name='auth_user',
        primary_keys=['id'],
    )

    with cluster:
        create_set(cluster, 'example', replication_set)

        replication_set.database.dsn = replica_dsn
        replication_set.tables.add(name='accounts_userprofile',
                                   primary_keys=['id'],
                                   columns=['id', 'user_id', 'display_name'])
        update_set(cluster, 'example', replication_set)

        upgrade_cluster(cluster, force=True)

        del replication_set.tables[0]
        update_set(cluster, 'example', replication_set)

        drop_set(cluster, 'example')
示例#3
0
def test_workflows(cluster):
    primary_dsn = create_temporary_database('primary')
    replica_dsn = create_temporary_database('replica')

    replication_set = ReplicationSetConfiguration()
    replication_set.database.dsn = primary_dsn
    replication_set.tables.add(
        name='auth_user',
        primary_keys=['id'],
    )

    with cluster:
        create_set(cluster, 'example', replication_set)

        replication_set.database.dsn = replica_dsn
        replication_set.tables.add(
            name='accounts_userprofile',
            primary_keys=['id'],
            columns=['id', 'user_id', 'display_name']
        )
        update_set(cluster, 'example', replication_set)

        upgrade_cluster(cluster, force=True)

        del replication_set.tables[0]
        update_set(cluster, 'example', replication_set)

        drop_set(cluster, 'example')
示例#4
0
def set(cluster, source_connection):
    name = str(uuid.uuid1())
    replication_set = ReplicationSetConfiguration()
    replication_set.database.dsn = source_connection.dsn
    replication_set.tables.add(name='auth_user', primary_keys=['id'])
    create_set(cluster, name, replication_set)
    yield name
示例#5
0
def test_worker(cluster):
    dsn = create_temporary_database()

    create_set(cluster, 'example', create_set_configuration(dsn))
    configure_tick_frequency(dsn)

    queue = Queue()
    worker = Worker(cluster, dsn, 'example', 'consumer', QueueHandler(queue))
    worker.start()

    with closing(psycopg2.connect(
            dsn)) as connection, connection.cursor() as cursor:
        cursor.execute('INSERT INTO auth_user (username) VALUES (%s)',
                       ('example', ))
        connection.commit()
        force_tick(connection, cluster.get_queue_name('example'))

    events = get_events(queue, 3)
    assert_same_batch(events)
    (mutation, ) = unwrap_transaction(events)

    assert mutation.table == 'auth_user'
    assert mutation.schema == 'public'
    assert mutation.operation == MutationOperation.INSERT
    assert not mutation.HasField('old')
    assert sorted(mutation.new.columns, key=lambda c: c.name) == [
        Column(name='id', integer64=1),
        Column(name='username', string='example'),
    ]

    # also make sure tables without column whitelist defined replicate the entire row state
    with closing(psycopg2.connect(
            dsn)) as connection, connection.cursor() as cursor:
        cursor.execute(
            'INSERT INTO accounts_userprofile (user_id, display_name) VALUES (%s, %s)',
            (
                1,
                'example',
            ))
        connection.commit()
        force_tick(connection, cluster.get_queue_name('example'))

    events = get_events(queue, 3)
    assert_same_batch(events)
    (mutation, ) = unwrap_transaction(events)

    assert mutation.table == 'accounts_userprofile'
    assert mutation.schema == 'public'
    assert mutation.operation == MutationOperation.INSERT
    assert not mutation.HasField('old')
    assert sorted(mutation.new.columns, key=lambda c: c.name) == [
        Column(name='display_name', string='example'),
        Column(name='id', integer64=1),
        Column(name='user_id', integer64=1),
    ]

    worker.stop_async()
    worker.result(1)
示例#6
0
def test_relay(cluster):
    primary_dsn = create_temporary_database()
    secondary_dsn = create_temporary_database()

    create_set(cluster, 'example', create_set_configuration(primary_dsn))
    configure_tick_frequency(primary_dsn)

    queue = Queue()
    relay = Relay(cluster, 'example', 'consumer', QueueHandler(queue), throttle=0.1)
    relay.start()

    with closing(psycopg2.connect(primary_dsn)) as connection, connection.cursor() as cursor:
        cursor.execute('INSERT INTO auth_user (username) VALUES (%s)', ('example',))
        connection.commit()
        force_tick(connection, cluster.get_queue_name('example'))

    events = get_events(queue, 3)
    assert_same_batch(events)
    (mutation,) = unwrap_transaction(events)

    assert mutation.table == 'auth_user'
    assert mutation.schema == 'public'
    assert mutation.operation == MutationOperation.INSERT
    assert not mutation.HasField('old')
    assert sorted(mutation.new.columns, key=lambda c: c.name) == [
        Column(name='id', integer64=1),
        Column(name='username', string='example'),
    ]

    # ensure the connection recovers after being killed
    with closing(psycopg2.connect(primary_dsn)) as connection, connection.cursor() as cursor:
        connection.autocommit = True
        cursor.execute('SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE pid != pg_backend_pid()')

    with closing(psycopg2.connect(primary_dsn)) as connection, connection.cursor() as cursor:
        cursor.execute('INSERT INTO auth_user (username) VALUES (%s)', ('example',))
        connection.commit()
        force_tick(connection, cluster.get_queue_name('example'))

    events = get_events(queue, 3)
    assert_same_batch(events)
    (mutation,) = unwrap_transaction(events)

    assert mutation.table == 'auth_user'
    assert mutation.schema == 'public'
    assert mutation.operation == MutationOperation.INSERT
    assert not mutation.HasField('old')
    assert sorted(mutation.new.columns, key=lambda c: c.name) == [
        Column(name='id', integer64=2),
        Column(name='username', string='example'),
    ]

    relay.stop_async()
    relay.result(1)

    """
示例#7
0
def test_worker(cluster):
    dsn = create_temporary_database()

    create_set(cluster, 'example', create_set_configuration(dsn))
    configure_tick_frequency(dsn)

    queue = Queue()
    worker = Worker(cluster, dsn, 'example', 'consumer', QueueHandler(queue))
    worker.start()

    with closing(psycopg2.connect(dsn)) as connection, connection.cursor() as cursor:
        cursor.execute('INSERT INTO auth_user (username) VALUES (%s)', ('example',))
        connection.commit()
        force_tick(connection, cluster.get_queue_name('example'))

    events = get_events(queue, 3)
    assert_same_batch(events)
    (mutation,) = unwrap_transaction(events)

    assert mutation.table == 'auth_user'
    assert mutation.schema == 'public'
    assert mutation.operation == MutationOperation.INSERT
    assert not mutation.HasField('old')
    assert sorted(mutation.new.columns, key=lambda c: c.name) == [
        Column(name='id', integer64=1),
        Column(name='username', string='example'),
    ]

    # also make sure tables without column whitelist defined replicate the entire row state
    with closing(psycopg2.connect(dsn)) as connection, connection.cursor() as cursor:
        cursor.execute('INSERT INTO accounts_userprofile (user_id, display_name) VALUES (%s, %s)', (1, 'example',))
        connection.commit()
        force_tick(connection, cluster.get_queue_name('example'))

    events = get_events(queue, 3)
    assert_same_batch(events)
    (mutation,) = unwrap_transaction(events)

    assert mutation.table == 'accounts_userprofile'
    assert mutation.schema == 'public'
    assert mutation.operation == MutationOperation.INSERT
    assert not mutation.HasField('old')
    assert sorted(mutation.new.columns, key=lambda c: c.name) == [
        Column(name='display_name', string='example'),
        Column(name='id', integer64=1),
        Column(name='user_id', integer64=1),
    ]

    worker.stop_async()
    worker.result(1)
示例#8
0
def create(cluster, name, configuration):
    codec = TextCodec(ReplicationSetConfiguration)
    configuration = codec.decode(configuration.read())

    with cluster:
        return administration.create_set(cluster, name, configuration)
示例#9
0
def create(cluster, name, configuration):
    codec = TextCodec(ReplicationSetConfiguration)
    configuration = codec.decode(configuration.read())

    with cluster:
        return administration.create_set(cluster, name, configuration)
示例#10
0
def test_relay(cluster):
    primary_dsn = create_temporary_database()
    secondary_dsn = create_temporary_database()

    create_set(cluster, 'example', create_set_configuration(primary_dsn))
    configure_tick_frequency(primary_dsn)

    queue = Queue()
    relay = Relay(cluster,
                  'example',
                  'consumer',
                  QueueHandler(queue),
                  throttle=0.1)
    relay.start()

    with closing(psycopg2.connect(
            primary_dsn)) as connection, connection.cursor() as cursor:
        cursor.execute('INSERT INTO auth_user (username) VALUES (%s)',
                       ('example', ))
        connection.commit()
        force_tick(connection, cluster.get_queue_name('example'))

    events = get_events(queue, 3)
    assert_same_batch(events)
    (mutation, ) = unwrap_transaction(events)

    assert mutation.table == 'auth_user'
    assert mutation.schema == 'public'
    assert mutation.operation == MutationOperation.INSERT
    assert not mutation.HasField('old')
    assert sorted(mutation.new.columns, key=lambda c: c.name) == [
        Column(name='id', integer64=1),
        Column(name='username', string='example'),
    ]

    # ensure the connection recovers after being killed
    with closing(psycopg2.connect(
            primary_dsn)) as connection, connection.cursor() as cursor:
        connection.autocommit = True
        cursor.execute(
            'SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE pid != pg_backend_pid()'
        )

    with closing(psycopg2.connect(
            primary_dsn)) as connection, connection.cursor() as cursor:
        cursor.execute('INSERT INTO auth_user (username) VALUES (%s)',
                       ('example', ))
        connection.commit()
        force_tick(connection, cluster.get_queue_name('example'))

    events = get_events(queue, 3)
    assert_same_batch(events)
    (mutation, ) = unwrap_transaction(events)

    assert mutation.table == 'auth_user'
    assert mutation.schema == 'public'
    assert mutation.operation == MutationOperation.INSERT
    assert not mutation.HasField('old')
    assert sorted(mutation.new.columns, key=lambda c: c.name) == [
        Column(name='id', integer64=2),
        Column(name='username', string='example'),
    ]

    relay.stop_async()
    relay.result(1)
    """