def test_inline_large_attachments(setup_client_syncgateway_test): """ 1. Start LiteServ and Sync Gateway 2. Create 2 databases on LiteServ (ls_db1, ls_db2) 3. Start continuous push replication from ls_db1 to sg_db 4. Start continuous pull replication from sg_db to ls_db2 5. PUT 5 large inline attachments to ls_db1 6. DELETE the docs on ls_db1 7. PUT same 5 large inline attachments to ls_db1 8. Verify docs replicate to ls_db2 9. Purge ls_db1 10. Verify docs removed """ log_info("Running 'test_inline_large_attachments' ...") sg_url = setup_client_syncgateway_test["sg_url"] sg_url_admin = setup_client_syncgateway_test["sg_admin_url"] ls_url = setup_client_syncgateway_test["ls_url"] log_info("ls_url: {}".format(ls_url)) log_info("sg_url: {}".format(sg_url)) log_info("sg_url_admin: {}".format(sg_url_admin)) ls_db1 = "ls_db1" ls_db2 = "ls_db2" sg_db = "db" client = MobileRestClient() client.create_database(ls_url, ls_db1) client.create_database(ls_url, ls_db2) # Start continuous push replication from ls_db1 -> sg_db client.start_replication(url=ls_url, continuous=True, from_db=ls_db1, to_url=sg_url, to_db=sg_db) # Start continuous push replication from sg_db -> ls_db2 client.start_replication(url=ls_url, continuous=True, from_url=sg_url, from_db=sg_db, to_db=ls_db2) # doc with 2.36 PNG attachment attachment_docs = [] for i in range(5): doc = document.create_doc( doc_id="large_attach_{}".format(i), attachment_name="golden_gate_large.jpg", channels=["ABC"] ) attachment_docs.append(doc) # add large attachments to ls_db1 docs = [] for doc in attachment_docs: docs.append(client.add_doc(ls_url, ls_db1, doc, use_post=False)) # Delete docs client.delete_docs(ls_url, ls_db1, docs) client.verify_docs_deleted(ls_url, ls_db1, docs) # Recreated docs recreated_docs = [] for doc in attachment_docs: recreated_docs.append(client.add_doc(ls_url, ls_db1, doc, use_post=False)) client.verify_docs_present(ls_url, ls_db1, recreated_docs) client.verify_docs_present(sg_url, sg_db, recreated_docs) client.verify_docs_present(ls_url, ls_db2, recreated_docs) purged_docs = client.purge_docs(ls_url, ls_db1, recreated_docs) log_info(purged_docs) # All purged docs should have replicated and should be gone now. # This is currently failing due to some docs not replicating to ls_db2 client.verify_docs_deleted(ls_url, ls_db1, recreated_docs) client.verify_docs_deleted(sg_url, sg_db, recreated_docs) client.verify_docs_deleted(ls_url, ls_db2, recreated_docs)
def test_inline_large_attachments(setup_client_syncgateway_test): """ 1. Start LiteServ and Sync Gateway 2. Create 2 databases on LiteServ (ls_db1, ls_db2) 3. Start continuous push replication from ls_db1 to sg_db 4. Start continuous pull replication from sg_db to ls_db2 5. PUT 5 large inline attachments to ls_db1 6. DELETE the docs on ls_db1 7. PUT same 5 large inline attachments to ls_db1 8. Verify docs replicate to ls_db2 9. Purge ls_db1 10. Verify docs removed """ log_info("Running 'test_inline_large_attachments' ...") cluster_config = setup_client_syncgateway_test["cluster_config"] sg_mode = setup_client_syncgateway_test["sg_mode"] sg_url = setup_client_syncgateway_test["sg_url"] sg_url_admin = setup_client_syncgateway_test["sg_admin_url"] ls_url = setup_client_syncgateway_test["ls_url"] log_info("ls_url: {}".format(ls_url)) log_info("sg_url: {}".format(sg_url)) log_info("sg_url_admin: {}".format(sg_url_admin)) ls_db1 = "ls_db1" ls_db2 = "ls_db2" sg_db = "db" # Reset cluster to ensure no data in system sg_config = sync_gateway_config_path_for_mode( "listener_tests/listener_tests", sg_mode) c = cluster.Cluster(config=cluster_config) c.reset(sg_config_path=sg_config) client = MobileRestClient() client.create_database(ls_url, ls_db1) client.create_database(ls_url, ls_db2) # Start continuous push replication from ls_db1 -> sg_db client.start_replication(url=ls_url, continuous=True, from_db=ls_db1, to_url=sg_url_admin, to_db=sg_db) # Start continuous push replication from sg_db -> ls_db2 client.start_replication(url=ls_url, continuous=True, from_url=sg_url_admin, from_db=sg_db, to_db=ls_db2) # doc with 2.36 PNG attachment attachment_docs = [] for i in range(5): atts = attachment.load_from_data_dir(["golden_gate_large.jpg"]) doc = document.create_doc(doc_id="large_attach_{}".format(i), attachments=atts, channels=["ABC"]) attachment_docs.append(doc) # add large attachments to ls_db1 docs = [] for doc in attachment_docs: docs.append(client.add_doc(ls_url, ls_db1, doc, use_post=False)) # Delete docs client.delete_docs(ls_url, ls_db1, docs) client.verify_docs_deleted(ls_url, ls_db1, docs) # Recreated docs recreated_docs = [] for doc in attachment_docs: recreated_docs.append( client.add_doc(ls_url, ls_db1, doc, use_post=False)) client.verify_docs_present(ls_url, ls_db1, recreated_docs) client.verify_docs_present(sg_url_admin, sg_db, recreated_docs) client.verify_docs_present(ls_url, ls_db2, recreated_docs) purged_docs = client.purge_docs(ls_url, ls_db1, recreated_docs) log_info(purged_docs) client.verify_docs_deleted(ls_url, ls_db1, recreated_docs, reason="missing")
def test_document_resurrection(params_from_base_test_setup, sg_conf_name, deletion_type): """ Scenarios: Doc meta mode / tombstone - Create docs (set A) via Sync Gateway - Delete docs (set A) via Sync Gateway - Verify docs (set A) are deleted via Sync Gateway - Create docs (set A) via Sync Gateway - Verify revs (set A) are generation 3 via Sync Gateway Doc meta mode / purge - Create docs (set A) via Sync Gateway - Purge docs (set A) via Sync Gateway - Verify docs (set A) are deleted via Sync Gateway - Create docs (set A) via Sync Gateway - Verify revs (set A) are generation 1 via Sync Gateway XATTRs / tombstone - Create docs (set A) via Sync Gateway - Create docs (set B) via SDK - Delete SDK docs (set B) via Sync Gateway - Delete SG docs (set A) via SDK - Verify docs (set B) are deleted via Sync Gateway - Verify docs (set B) are deleted via SDK - Verify docs (set A) are deleted via Sync Gateway - Verify docs (set A) are deleted via SDK - Create docs (set A) via Sync Gateway - Create docs (set B) via SDK - Verify revs (set A, B) are generation 3 via Sync Gateway XATTRs / purge - Create docs (set A) via Sync Gateway - Create docs (set B) via SDK - Purge SDK docs (set B) via Sync Gateway - Delete SG docs (set A) via SDK - Verify docs (set B) are deleted via Sync Gateway - Verify docs (set B) are deleted via SDK - Verify docs (set A) are deleted via Sync Gateway - Verify docs (set A) are deleted via SDK - Create docs (set A) via Sync Gateway - Create docs (set B) via SDK - Verify revs (set A, B) are generation 1 via Sync Gateway """ cluster_conf = params_from_base_test_setup['cluster_config'] cluster_topology = params_from_base_test_setup['cluster_topology'] mode = params_from_base_test_setup['mode'] xattrs_enabled = params_from_base_test_setup['xattrs_enabled'] cbs_url = cluster_topology['couchbase_servers'][0] sg_admin_url = cluster_topology['sync_gateways'][0]['admin'] sg_url = cluster_topology['sync_gateways'][0]['public'] bucket_name = 'data-bucket' sg_db = 'db' cbs_host = host_for_url(cbs_url) num_docs_per_client = 10 # Reset cluster sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) # Initialize clients sg_client = MobileRestClient() sdk_client = Bucket('couchbase://{}/{}'.format(cbs_host, bucket_name), password='******') # Create Sync Gateway user sg_user_channels = ['NASA', 'NATGEO'] sg_client.create_user(url=sg_admin_url, db=sg_db, name='seth', password='******', channels=sg_user_channels) sg_user_auth = sg_client.create_session(url=sg_admin_url, db=sg_db, name='seth', password='******') # Create / Add docs from SG sg_doc_bodies = document.create_docs( doc_id_prefix='sg_doc', number=num_docs_per_client, content={'foo': 'bar'}, channels=sg_user_channels, attachments_generator=attachment.generate_2_png_10_10 ) sg_doc_ids = [doc['_id'] for doc in sg_doc_bodies] sg_bulk_docs_resp = sg_client.add_bulk_docs(url=sg_url, db=sg_db, docs=sg_doc_bodies, auth=sg_user_auth) assert len(sg_bulk_docs_resp) == num_docs_per_client all_doc_ids = sg_doc_ids assert len(all_doc_ids) == num_docs_per_client if xattrs_enabled: # Create / Add docs from sdk log_info('Adding docs via SDK') sdk_doc_bodies = document.create_docs( doc_id_prefix='sdk_doc', number=num_docs_per_client, content={'foo': 'bar'}, channels=sg_user_channels, ) sdk_docs = {doc['_id']: doc for doc in sdk_doc_bodies} sdk_doc_ids = [doc['_id'] for doc in sdk_doc_bodies] log_info('Creating SDK docs') sdk_client.upsert_multi(sdk_docs) all_doc_ids = sg_doc_ids + sdk_doc_ids assert len(all_doc_ids) == num_docs_per_client * 2 if deletion_type == 'tombstone': # Set the target docs. # Doc meta mode: Delete Sync Gateway docs via Sync Gateway # XATTR mode: Delete SDK docs via Sync Gateway sg_doc_ids_to_delete = sg_doc_ids if xattrs_enabled: sg_doc_ids_to_delete = sdk_doc_ids # SG delete target docs for doc_id in sg_doc_ids_to_delete: doc = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id, auth=sg_user_auth) deleted = sg_client.delete_doc(url=sg_url, db=sg_db, doc_id=doc_id, rev=doc['_rev'], auth=sg_user_auth) log_info(deleted) if xattrs_enabled: log_info('Deleting SG docs via SDK') sdk_client.remove_multi(sg_doc_ids) elif deletion_type == 'purge': # SG Purge all docs all_docs, errors = sg_client.get_bulk_docs(url=sg_url, db=sg_db, doc_ids=all_doc_ids, auth=sg_user_auth) if xattrs_enabled: assert len(all_docs) == num_docs_per_client * 2 assert len(errors) == 0 else: assert len(all_docs) == num_docs_per_client assert len(errors) == 0 log_info('Purging docs via Sync Gateway') sg_client.purge_docs(url=sg_admin_url, db=sg_db, docs=all_docs) else: raise ValueError('Invalid test parameters') # Verify deletes via Sync Gateway deleted_docs_to_verify = sg_doc_ids assert len(deleted_docs_to_verify) == num_docs_per_client # If running is xattr mode, make sure to verify SG + SDK docs if xattrs_enabled: deleted_docs_to_verify = sg_doc_ids + sdk_doc_ids assert len(deleted_docs_to_verify) == num_docs_per_client * 2 if xattrs_enabled and deletion_type == 'tombstone': # Verify SDK + SG docs are deleted from Sync Gateway verify_sg_deletes(sg_client, sg_url, sg_db, deleted_docs_to_verify, sg_user_auth) # Verify SDK + SG docs are deleted from SDK verify_sdk_deletes(sdk_client, deleted_docs_to_verify) elif xattrs_enabled and deletion_type == 'purge': # Verify SDK + SG docs are purged from Sync Gateway verify_sg_purges(sg_client, sg_url, sg_db, deleted_docs_to_verify, sg_user_auth) # Verify SDK + SG docs are deleted from SDK verify_sdk_deletes(sdk_client, deleted_docs_to_verify) elif not xattrs_enabled and deletion_type == 'tombstone': # Doc meta: Verify SG docs are all deleted via SG verify_sg_deletes(sg_client, sg_url, sg_db, deleted_docs_to_verify, sg_user_auth) elif not xattrs_enabled and deletion_type == 'purge': # Doc meta: Verify SG docs are all deleted via SG verify_sg_purges(sg_client, sg_url, sg_db, deleted_docs_to_verify, sg_user_auth) else: raise ValueError('Invalid test parameters') # Recreate deleted docs from Sync Gateway sg_bulk_docs_resp = sg_client.add_bulk_docs(url=sg_url, db=sg_db, docs=sg_doc_bodies, auth=sg_user_auth) assert len(sg_bulk_docs_resp) == num_docs_per_client if xattrs_enabled: log_info('Recreating SDK docs') # Recreate deleted docs from SDK sdk_client.upsert_multi(sdk_docs) # Get docs via Sync Gateway doc_ids_to_get = sg_doc_ids if xattrs_enabled: doc_ids_to_get = sg_doc_ids + sdk_doc_ids docs, errors = sg_client.get_bulk_docs( url=sg_url, db=sg_db, doc_ids=doc_ids_to_get, auth=sg_user_auth, validate=False ) if xattrs_enabled: assert len(docs) == num_docs_per_client * 2 assert len(errors) == 0 else: assert len(docs) == num_docs_per_client assert len(errors) == 0 if xattrs_enabled: # Get SDK docs and makes sure all docs were recreated all_docs_from_sdk = sdk_client.get_multi(doc_ids_to_get) assert len(all_docs_from_sdk) == num_docs_per_client * 2 log_info('Found: {} recreated docs via SDK'.format(len(all_docs_from_sdk))) # Make sure we are able to get recreated docs via SDK doc_ids_to_get_scratch = list(doc_ids_to_get) assert len(doc_ids_to_get_scratch) == num_docs_per_client * 2 for doc_id in all_docs_from_sdk: doc_ids_to_get_scratch.remove(doc_id) assert len(doc_ids_to_get_scratch) == 0 # Make sure we are able to get recreated docs via SDK doc_ids_to_get_scratch = list(doc_ids_to_get) if xattrs_enabled: # SG + SDK docs assert len(doc_ids_to_get_scratch) == num_docs_per_client * 2 else: # SG docs assert len(doc_ids_to_get_scratch) == num_docs_per_client for doc in docs: # Verify expected document revisions if xattrs_enabled: if deletion_type == 'purge': # SG purges SG docs and recreates them, expecting 1- rev # SDK removes SDK docs and recreates them, expecting 1- rev assert doc['_rev'].startswith('1-') else: # SG tombstones SG docs and recreates them, expecting 3- rev # SDK removes SDK docs and recreates them, expecting 1- rev if doc['_id'].startswith('sg_'): assert doc['_rev'].startswith('3-') else: assert doc['_rev'].startswith('1-') else: if deletion_type == 'purge': # SG purges SG docs and recreates them, expecting 1- rev assert doc['_rev'].startswith('1-') else: # SG tombstones SG docs and recreates them, expecting 3- rev assert doc['_rev'].startswith('3-') doc_ids_to_get_scratch.remove(doc['_id']) # Make sure all docs were found assert len(doc_ids_to_get_scratch) == 0