def _export_message(self, uuid, doc): """ Given a UUID and a CouchDocument, it saves it directly in the couchdb that serves as a backend for Soledad, in a db accessible to the recipient of the mail. :param uuid: the mail owner's uuid :type uuid: str :param doc: CouchDocument that represents the email :type doc: CouchDocument :return: True if it's ok to remove the message, False otherwise :rtype: bool """ if uuid is None or doc is None: log.msg("_export_message: Something went wrong, here's all " "I know: %r | %r" % (uuid, doc)) return False log.msg("Exporting message for %s" % (uuid,)) db = CouchDatabase(self._mail_couch_url, "user-%s" % (uuid,)) db.put_doc(doc) log.msg("Done exporting") return True
def test_encrypted_sym_sync_with_unicode_passphrase(self): """ Test the complete syncing chain between two soledad dbs using a Soledad server backed by a couch database, using an unicode passphrase. """ self.startServer() # instantiate soledad and create a document sol1 = self._soledad_instance( # token is verified in test_target.make_token_soledad_app auth_token='auth-token', passphrase=u'ãáàäéàëíìïóòöõúùüñç', ) _, doclist = sol1.get_all_docs() self.assertEqual([], doclist) doc1 = sol1.create_doc(json.loads(simple_doc)) # sync with server sol1._server_url = self.getURL() sol1.sync() # assert doc was sent to couch db db = CouchDatabase( self._couch_url, # the name of the user database is "user-<uuid>". 'user-user-uuid', ) _, doclist = db.get_all_docs() self.assertEqual(1, len(doclist)) couchdoc = doclist[0] # assert document structure in couch server self.assertEqual(doc1.doc_id, couchdoc.doc_id) self.assertEqual(doc1.rev, couchdoc.rev) self.assertEqual(6, len(couchdoc.content)) self.assertTrue(target.ENC_JSON_KEY in couchdoc.content) self.assertTrue(target.ENC_SCHEME_KEY in couchdoc.content) self.assertTrue(target.ENC_METHOD_KEY in couchdoc.content) self.assertTrue(target.ENC_IV_KEY in couchdoc.content) self.assertTrue(target.MAC_KEY in couchdoc.content) self.assertTrue(target.MAC_METHOD_KEY in couchdoc.content) # instantiate soledad with empty db, but with same secrets path sol2 = self._soledad_instance( prefix='x', auth_token='auth-token', passphrase=u'ãáàäéàëíìïóòöõúùüñç', ) _, doclist = sol2.get_all_docs() self.assertEqual([], doclist) sol2._secrets_path = sol1.secrets_path sol2._load_secrets() sol2._set_secret_id(sol1._secret_id) # sync the new instance sol2._server_url = self.getURL() sol2.sync() _, doclist = sol2.get_all_docs() self.assertEqual(1, len(doclist)) doc2 = doclist[0] # assert incoming doc is equal to the first sent doc self.assertEqual(doc1, doc2)
def user_db_create(args): from leap.soledad.common.couch import CouchDatabase url = 'http://localhost:%d/user-%s' % (args.port, args.uuid) try: CouchDatabase.open_database(url=url, create=False, replica_uid=None) print '[*] error: database "user-%s" already exists' % args.uuid exit(1) except DatabaseDoesNotExist: CouchDatabase.open_database(url=url, create=True, replica_uid=None) print '[+] database created: user-%s' % args.uuid
def tearDown(self): main_test_class = getattr(self, 'main_test_class', None) if main_test_class is not None: main_test_class.tearDown(self) # delete the test database try: db = CouchDatabase(self._couch_url, 'test') db.delete_database() except DatabaseDoesNotExist: pass BaseSoledadTest.tearDown(self) CouchDBTestCase.tearDown(self)
def setUp(self): TestCaseWithServer.setUp(self) CouchDBTestCase.setUp(self) self.tempdir = tempfile.mkdtemp(prefix="leap_tests-") self._couch_url = 'http://localhost:' + str(self.wrapper.port) # create the databases CouchDatabase.open_database(urljoin(self._couch_url, 'shared'), create=True, ensure_ddocs=True) CouchDatabase.open_database(urljoin(self._couch_url, 'tokens'), create=True, ensure_ddocs=True) self._state = CouchServerState(self._couch_url, 'shared', 'tokens')
def setup(self): if self._create: return CouchDatabase.open_database(url=self._remote_db_url, create=True, replica_uid=None) else: _request('put', self._remote_db_url, do=False)
def put_doc(self, uuid, doc): """ Update a document. If the document currently has conflicts, put will fail. If the database specifies a maximum document size and the document exceeds it, put will fail and raise a DocumentTooBig exception. :param uuid: The uuid of a user :type uuid: str :param doc: A Document with new content. :type doc: leap.soledad.common.couch.CouchDocument :return: A deferred which fires with the new revision identifier for the document if the Document object has being updated, or which fails with the correspondent exception if there was any error. """ # TODO: that should be implemented with paisley url = self._mail_couch_url + "/user-%s" % (uuid,) try: db = CouchDatabase.open_database(url, create=False) return defer.succeed(db.put_doc(doc)) except Exception as e: return defer.fail(e)
def test_two_concurrent_syncs_do_not_overlap_no_docs(self): self.startServer() # ensure remote db exists before syncing db = CouchDatabase.open_database( urljoin(self.couch_url, 'user-' + self.user), create=True, ensure_ddocs=True) sol = self._soledad_instance( user=self.user, server_url=self.getURL()) d1 = sol.sync() d2 = sol.sync() def _assert_syncs_do_not_overlap(thearg): # recover sync times sync_times = [] for key in sol._dbsyncer.sync_times: sync_times.append(sol._dbsyncer.sync_times[key]) sync_times.sort(key=lambda s: s['start']) self.assertTrue( (sync_times[0]['start'] < sync_times[0]['end'] and sync_times[0]['end'] < sync_times[1]['start'] and sync_times[1]['start'] < sync_times[1]['end'])) db.delete_database() db.close() sol.close() d = defer.gatherResults([d1, d2]) d.addBoth(_assert_syncs_do_not_overlap) return d
def setUp(self): # the order of the following initializations is crucial because of # dependencies. # XXX explain better CouchDBTestCase.setUp(self) self._couch_url = 'http://localhost:' + str(self.wrapper.port) self.tempdir = tempfile.mkdtemp(prefix="leap_tests-") TestCaseWithServer.setUp(self) # create the databases CouchDatabase.open_database(urljoin(self._couch_url, 'shared'), create=True, ensure_ddocs=True) CouchDatabase.open_database(urljoin(self._couch_url, 'tokens'), create=True, ensure_ddocs=True) self._state = CouchServerState(self._couch_url)
def setUp(self): TestCaseWithServer.setUp(self) CouchDBTestCase.setUp(self) self.tempdir = tempfile.mkdtemp(prefix="leap_tests-") self._couch_url = 'http://localhost:' + str(self.wrapper.port) # create the databases CouchDatabase.open_database( urljoin(self._couch_url, 'shared'), create=True, ensure_ddocs=True) CouchDatabase.open_database( urljoin(self._couch_url, 'tokens'), create=True, ensure_ddocs=True) self._state = CouchServerState( self._couch_url, 'shared', 'tokens')
def test_two_concurrent_syncs_do_not_overlap_no_docs(self): self.startServer() # ensure remote db exists before syncing db = CouchDatabase.open_database(urljoin(self.couch_url, 'user-' + self.user), create=True) sol = self._soledad_instance(user=self.user, server_url=self.getURL()) d1 = sol.sync() d2 = sol.sync() def _assert_syncs_do_not_overlap(thearg): # recover sync times sync_times = [] for key in sol._dbsyncer.sync_times: sync_times.append(sol._dbsyncer.sync_times[key]) sync_times.sort(key=lambda s: s['start']) self.assertTrue( (sync_times[0]['start'] < sync_times[0]['end'] and sync_times[0]['end'] < sync_times[1]['start'] and sync_times[1]['start'] < sync_times[1]['end'])) db.delete_database() db.close() sol.close() d = defer.gatherResults([d1, d2]) d.addBoth(_assert_syncs_do_not_overlap) return d
def find_max_upload_size(db_uri): db = CouchDatabase.open_database(db_uri, False) couch_db = Database(db_uri) logger.debug('Database URI: %s' % db_uri) # delete eventual leftover from last run if 'largedoc' in couch_db: delete_doc(couch_db) # phase 1: increase upload size exponentially logger.info('Starting phase 1: increasing size exponentially.') size = 1 #import ipdb; ipdb.set_trace() while True: if upload(db, size, couch_db): size *= 2 else: break # phase 2: binary search for maximum value unable = size able = size / 2 logger.info('Starting phase 2: binary search for maximum value.') while unable - able > 1: size = able + ((unable - able) / 2) if upload(db, size, couch_db): able = size else: unable = size return able
def tearDown(self): CouchDBTestCase.tearDown(self) TestCaseWithServer.tearDown(self) # delete remote database db = CouchDatabase.open_database(urljoin(self._couch_url, 'shared'), create=True, ensure_ddocs=True) db.delete_database()
def setUp(self): TestCaseWithServer.setUp(self) CouchDBTestCase.setUp(self) self.user = ('user-%s' % uuid4().hex) self.db = CouchDatabase.open_database(urljoin(self.couch_url, 'user-' + self.user), create=True, replica_uid='replica') self.startTwistedServer()
def setUp(self): # the order of the following initializations is crucial because of # dependencies. # XXX explain better CouchDBTestCase.setUp(self) self._couch_url = 'http://localhost:' + str(self.wrapper.port) self.tempdir = tempfile.mkdtemp(prefix="leap_tests-") TestCaseWithServer.setUp(self) # create the databases CouchDatabase.open_database( urljoin(self._couch_url, 'shared'), create=True, ensure_ddocs=True) CouchDatabase.open_database( urljoin(self._couch_url, 'tokens'), create=True, ensure_ddocs=True) self._state = CouchServerState(self._couch_url)
def tearDown(self): # delete remote database db = CouchDatabase.open_database( urljoin(self._couch_url, 'shared'), create=True, ensure_ddocs=True) db.delete_database() CouchDBTestCase.tearDown(self) TestCaseWithServer.tearDown(self)
def setUp(self): TestCaseWithServer.setUp(self) CouchDBTestCase.setUp(self) self._couch_url = 'http://localhost:' + str(self.wrapper.port) self.db = CouchDatabase.open_database(urljoin(self._couch_url, 'user-user-uuid'), create=True, replica_uid='replica', ensure_ddocs=True) self.tempdir = tempfile.mkdtemp(prefix="leap_tests-")
def setUp(self): TestCaseWithServer.setUp(self) CouchDBTestCase.setUp(self) self._couch_url = 'http://localhost:' + str(self.wrapper.port) self.db = CouchDatabase.open_database( urljoin(self._couch_url, 'user-user-uuid'), create=True, replica_uid='replica', ensure_ddocs=True) self.tempdir = tempfile.mkdtemp(prefix="leap_tests-")
def _create_database(self, replica_uid=None, dbname=None): """ Create db and append to a list, allowing test to close it later """ dbname = dbname or ('test-%s' % uuid4().hex) db = CouchDatabase.open_database(urljoin(self.couch_url, dbname), True, replica_uid=replica_uid or 'test') self.dbs.append(db) return db
def test_encrypted_sym_sync(self): """ Test the complete syncing chain between two soledad dbs using a Soledad server backed by a couch database. """ self.startServer() # instantiate soledad and create a document sol1 = self._soledad_instance( # token is verified in test_target.make_token_soledad_app auth_token='auth-token' ) _, doclist = sol1.get_all_docs() self.assertEqual([], doclist) doc1 = sol1.create_doc(json.loads(simple_doc)) # ensure remote db exists before syncing db = CouchDatabase.open_database( urljoin(self._couch_url, 'user-user-uuid'), create=True, ensure_ddocs=True) # sync with server sol1._server_url = self.getURL() sol1.sync() # assert doc was sent to couch db _, doclist = db.get_all_docs() self.assertEqual(1, len(doclist)) couchdoc = doclist[0] # assert document structure in couch server self.assertEqual(doc1.doc_id, couchdoc.doc_id) self.assertEqual(doc1.rev, couchdoc.rev) self.assertEqual(6, len(couchdoc.content)) self.assertTrue(crypto.ENC_JSON_KEY in couchdoc.content) self.assertTrue(crypto.ENC_SCHEME_KEY in couchdoc.content) self.assertTrue(crypto.ENC_METHOD_KEY in couchdoc.content) self.assertTrue(crypto.ENC_IV_KEY in couchdoc.content) self.assertTrue(crypto.MAC_KEY in couchdoc.content) self.assertTrue(crypto.MAC_METHOD_KEY in couchdoc.content) # instantiate soledad with empty db, but with same secrets path sol2 = self._soledad_instance(prefix='x', auth_token='auth-token') _, doclist = sol2.get_all_docs() self.assertEqual([], doclist) sol2._secrets_path = sol1.secrets_path sol2._load_secrets() sol2._set_secret_id(sol1._secret_id) # sync the new instance sol2._server_url = self.getURL() sol2.sync() _, doclist = sol2.get_all_docs() self.assertEqual(1, len(doclist)) doc2 = doclist[0] # assert incoming doc is equal to the first sent doc self.assertEqual(doc1, doc2) db.delete_database() db.close() sol1.close() sol2.close()
def test_encrypted_sym_sync(self): """ Test the complete syncing chain between two soledad dbs using a Soledad server backed by a couch database. """ self.startServer() # instantiate soledad and create a document sol1 = self._soledad_instance( # token is verified in test_target.make_token_soledad_app auth_token='auth-token') _, doclist = sol1.get_all_docs() self.assertEqual([], doclist) doc1 = sol1.create_doc(json.loads(simple_doc)) # ensure remote db exists before syncing db = CouchDatabase.open_database(urljoin(self._couch_url, 'user-user-uuid'), create=True, ensure_ddocs=True) # sync with server sol1._server_url = self.getURL() sol1.sync() # assert doc was sent to couch db _, doclist = db.get_all_docs() self.assertEqual(1, len(doclist)) couchdoc = doclist[0] # assert document structure in couch server self.assertEqual(doc1.doc_id, couchdoc.doc_id) self.assertEqual(doc1.rev, couchdoc.rev) self.assertEqual(6, len(couchdoc.content)) self.assertTrue(crypto.ENC_JSON_KEY in couchdoc.content) self.assertTrue(crypto.ENC_SCHEME_KEY in couchdoc.content) self.assertTrue(crypto.ENC_METHOD_KEY in couchdoc.content) self.assertTrue(crypto.ENC_IV_KEY in couchdoc.content) self.assertTrue(crypto.MAC_KEY in couchdoc.content) self.assertTrue(crypto.MAC_METHOD_KEY in couchdoc.content) # instantiate soledad with empty db, but with same secrets path sol2 = self._soledad_instance(prefix='x', auth_token='auth-token') _, doclist = sol2.get_all_docs() self.assertEqual([], doclist) sol2._secrets_path = sol1.secrets_path sol2._load_secrets() sol2._set_secret_id(sol1._secret_id) # sync the new instance sol2._server_url = self.getURL() sol2.sync() _, doclist = sol2.get_all_docs() self.assertEqual(1, len(doclist)) doc2 = doclist[0] # assert incoming doc is equal to the first sent doc self.assertEqual(doc1, doc2) db.delete_database() db.close() sol1.close() sol2.close()
def setUp(self): TestCaseWithServer.setUp(self) CouchDBTestCase.setUp(self) self.user = ('user-%s' % uuid4().hex) self.db = CouchDatabase.open_database( urljoin(self.couch_url, 'user-' + self.user), create=True, replica_uid='replica', ensure_ddocs=True) self.tempdir = tempfile.mkdtemp(prefix="leap_tests-") self.startTwistedServer()
def _create_database(self, replica_uid=None, dbname=None): """ Create db and append to a list, allowing test to close it later """ dbname = dbname or ('test-%s' % uuid4().hex) db = CouchDatabase.open_database( urljoin(self.couch_url, dbname), True, replica_uid=replica_uid or 'test') self.dbs.append(db) return db
def open_database(self, dbname): """ Open a couch database. :param dbname: The name of the database to open. :type dbname: str :return: The SoledadBackend object. :rtype: SoledadBackend """ url = urljoin(self.couch_url, dbname) db = CouchDatabase.open_database(url, create=False) return db
def test_touch_updates_remote_representation(self): self.startTwistedServer() user = '******' + uuid4().hex server_url = 'http://%s:%d' % (self.server_address) client = self._soledad_instance(user=user, server_url=server_url) deprecated_client = deprecate_client_crypto( self._soledad_instance(user=user, server_url=server_url)) self.make_app() remote = self.request_state._create_database(replica_uid=client.uuid) remote = CouchDatabase.open_database( urljoin(self.couch_url, 'user-' + user), create=True) # ensure remote db is empty gen, docs = remote.get_all_docs() assert gen == 0 assert len(docs) == 0 # create a doc with deprecated client and sync yield deprecated_client.create_doc(json.loads(simple_doc)) yield deprecated_client.sync() # check for doc in remote db gen, docs = remote.get_all_docs() assert gen == 1 assert len(docs) == 1 doc = docs.pop() content = doc.content assert common_crypto.ENC_JSON_KEY in content assert common_crypto.ENC_SCHEME_KEY in content assert common_crypto.ENC_METHOD_KEY in content assert common_crypto.ENC_IV_KEY in content assert common_crypto.MAC_KEY in content assert common_crypto.MAC_METHOD_KEY in content # "touch" the document with a newer client and synx _, docs = yield client.get_all_docs() yield client.put_doc(doc) yield client.sync() # check for newer representation of doc in remote db gen, docs = remote.get_all_docs() assert gen == 2 assert len(docs) == 1 doc = docs.pop() content = doc.content assert len(content) == 1 assert 'raw' in content
def setUp(self): # the order of the following initializations is crucial because of # dependencies. # XXX explain better CouchDBTestCase.setUp(self) self.tempdir = tempfile.mkdtemp(prefix="leap_tests-") TestCaseWithServer.setUp(self) # create the databases db = CouchDatabase.open_database( urljoin(self.couch_url, ('shared-%s' % (uuid4().hex))), create=True, ensure_ddocs=True) self.addCleanup(db.delete_database) self._state = CouchServerState(self.couch_url) self._state.open_database = mock.Mock(return_value=db)
def test_sync_many_small_files(self): """ Test if Soledad can sync many smallfiles. """ number_of_docs = 100 self.startServer() # instantiate soledad and create a document sol1 = self._soledad_instance( # token is verified in test_target.make_token_soledad_app auth_token='auth-token' ) _, doclist = sol1.get_all_docs() self.assertEqual([], doclist) # create many small files for i in range(0, number_of_docs): sol1.create_doc(json.loads(simple_doc)) # ensure remote db exists before syncing db = CouchDatabase.open_database( urljoin(self._couch_url, 'user-user-uuid'), create=True, ensure_ddocs=True) # sync with server sol1._server_url = self.getURL() sol1.sync() # instantiate soledad with empty db, but with same secrets path sol2 = self._soledad_instance(prefix='x', auth_token='auth-token') _, doclist = sol2.get_all_docs() self.assertEqual([], doclist) sol2._secrets_path = sol1.secrets_path sol2._load_secrets() sol2._set_secret_id(sol1._secret_id) # sync the new instance sol2._server_url = self.getURL() sol2.sync() _, doclist = sol2.get_all_docs() self.assertEqual(number_of_docs, len(doclist)) # assert incoming docs are equal to sent docs for doc in doclist: self.assertEqual(sol1.get_doc(doc.doc_id), doc) # delete remote database db.delete_database() db.close() sol1.close() sol2.close()
def test_sync_very_large_files(self): """ Test if Soledad can sync very large files. """ # define the size of the "very large file" length = 100*(10**6) # 100 MB self.startServer() # instantiate soledad and create a document sol1 = self._soledad_instance( # token is verified in test_target.make_token_soledad_app auth_token='auth-token' ) _, doclist = sol1.get_all_docs() self.assertEqual([], doclist) content = binascii.hexlify(os.urandom(length/2)) # len() == length doc1 = sol1.create_doc({'data': content}) # ensure remote db exists before syncing db = CouchDatabase.open_database( urljoin(self._couch_url, 'user-user-uuid'), create=True, ensure_ddocs=True) # sync with server sol1._server_url = self.getURL() sol1.sync() # instantiate soledad with empty db, but with same secrets path sol2 = self._soledad_instance(prefix='x', auth_token='auth-token') _, doclist = sol2.get_all_docs() self.assertEqual([], doclist) sol2._secrets_path = sol1.secrets_path sol2._load_secrets() sol2._set_secret_id(sol1._secret_id) # sync the new instance sol2._server_url = self.getURL() sol2.sync() _, doclist = sol2.get_all_docs() self.assertEqual(1, len(doclist)) doc2 = doclist[0] # assert incoming doc is equal to the first sent doc self.assertEqual(doc1, doc2) # delete remote database db.delete_database() db.close() sol1.close() sol2.close()
def test_sync_many_small_files(self): """ Test if Soledad can sync many smallfiles. """ number_of_docs = 100 self.startServer() # instantiate soledad and create a document sol1 = self._soledad_instance( # token is verified in test_target.make_token_soledad_app auth_token='auth-token') _, doclist = sol1.get_all_docs() self.assertEqual([], doclist) # create many small files for i in range(0, number_of_docs): sol1.create_doc(json.loads(simple_doc)) # ensure remote db exists before syncing db = CouchDatabase.open_database(urljoin(self._couch_url, 'user-user-uuid'), create=True, ensure_ddocs=True) # sync with server sol1._server_url = self.getURL() sol1.sync() # instantiate soledad with empty db, but with same secrets path sol2 = self._soledad_instance(prefix='x', auth_token='auth-token') _, doclist = sol2.get_all_docs() self.assertEqual([], doclist) sol2._secrets_path = sol1.secrets_path sol2._load_secrets() sol2._set_secret_id(sol1._secret_id) # sync the new instance sol2._server_url = self.getURL() sol2.sync() _, doclist = sol2.get_all_docs() self.assertEqual(number_of_docs, len(doclist)) # assert incoming docs are equal to sent docs for doc in doclist: self.assertEqual(sol1.get_doc(doc.doc_id), doc) # delete remote database db.delete_database() db.close() sol1.close() sol2.close()
def test_sync_very_large_files(self): """ Test if Soledad can sync very large files. """ # define the size of the "very large file" length = 100 * (10**6) # 100 MB self.startServer() # instantiate soledad and create a document sol1 = self._soledad_instance( # token is verified in test_target.make_token_soledad_app auth_token='auth-token') _, doclist = sol1.get_all_docs() self.assertEqual([], doclist) content = binascii.hexlify(os.urandom(length / 2)) # len() == length doc1 = sol1.create_doc({'data': content}) # ensure remote db exists before syncing db = CouchDatabase.open_database(urljoin(self._couch_url, 'user-user-uuid'), create=True, ensure_ddocs=True) # sync with server sol1._server_url = self.getURL() sol1.sync() # instantiate soledad with empty db, but with same secrets path sol2 = self._soledad_instance(prefix='x', auth_token='auth-token') _, doclist = sol2.get_all_docs() self.assertEqual([], doclist) sol2._secrets_path = sol1.secrets_path sol2._load_secrets() sol2._set_secret_id(sol1._secret_id) # sync the new instance sol2._server_url = self.getURL() sol2.sync() _, doclist = sol2.get_all_docs() self.assertEqual(1, len(doclist)) doc2 = doclist[0] # assert incoming doc is equal to the first sent doc self.assertEqual(doc1, doc2) # delete remote database db.delete_database() db.close() sol1.close() sol2.close()
UUID = some-uuid PAYLOAD = /path/to/payload/file NUM_DOCS = 100 """ import os from ConfigParser import ConfigParser from leap.soledad.common.couch import CouchDatabase parser = ConfigParser() parser.read('defaults.conf') UUID = parser.get('client', 'uuid') PAYLOAD = parser.get('sync', 'payload') NUM_DOCS = int(parser.get('sync', 'num_docs')) db = CouchDatabase.open_database( 'http://127.0.0.1:5984/user-%s' % UUID, False) # should create database? payload = None if os.path.isfile(PAYLOAD): with open(PAYLOAD, 'r') as f: payload = f.read() for i in xrange(NUM_DOCS): db.create_doc({'payload': payload}) db.close()
def _couch_ensure_database(self, dbname): db = CouchDatabase.open_database(self._couch_url + '/' + dbname, create=True, ensure_ddocs=True) return db, db._replica_uid
def _create_database(self, dbname): return CouchDatabase.open_database( urljoin(self._couch_url, dbname), True, replica_uid=dbname, ensure_ddocs=True)
def get_u1db_database(dbname, port): return CouchDatabase.open_database( 'http://127.0.0.1:%d/%s' % (port, dbname), True, ensure_ddocs=True)
from leap.soledad.common.couch import CouchDatabase if len(sys.argv) != 2: print 'Usage: %s <uuid>' % sys.argv[0] exit(1) uuid = sys.argv[1] # get couch url cp = ConfigParser() cp.read('/etc/soledad/soledad-server.conf') url = cp.get('soledad-server', 'couch_url') # access user db dbname = 'user-%s' % uuid db = CouchDatabase(url, dbname) # get replica info replica_uid = db._replica_uid gen, docs = db.get_all_docs() print "dbname: %s" % dbname print "replica_uid: %s" % replica_uid print "generation: %d" % gen # get relevant docs schemes = map(lambda d: d.content['_enc_scheme'], docs) pubenc = filter(lambda d: d.content['_enc_scheme'] == 'pubkey', docs) print "total number of docs: %d" % len(docs) print "pubkey encrypted docs: %d" % len(pubenc)
def _test_encrypted_sym_sync(self, passphrase=u'123', doc_size=2, number_of_docs=1): """ Test the complete syncing chain between two soledad dbs using a Soledad server backed by a couch database. """ self.startTwistedServer() user = '******' + uuid4().hex # this will store all docs ids to avoid get_all_docs created_ids = [] # instantiate soledad and create a document sol1 = self._soledad_instance( user=user, # token is verified in test_target.make_token_soledad_app auth_token='auth-token', passphrase=passphrase) # instantiate another soledad using the same secret as the previous # one (so we can correctly verify the mac of the synced document) sol2 = self._soledad_instance(user=user, prefix='x', auth_token='auth-token', secrets_path=sol1._secrets_path, passphrase=passphrase) # ensure remote db exists before syncing db = CouchDatabase.open_database(urljoin(self.couch_url, 'user-' + user), create=True) def _db1AssertEmptyDocList(results): _, doclist = results self.assertEqual([], doclist) def _db1CreateDocs(results): deferreds = [] for i in xrange(number_of_docs): content = binascii.hexlify(os.urandom(doc_size / 2)) d = sol1.create_doc({'data': content}) d.addCallback(created_ids.append) deferreds.append(d) return defer.DeferredList(deferreds) def _db1AssertDocsSyncedToServer(results): self.assertEqual(number_of_docs, len(created_ids)) for soldoc in created_ids: couchdoc = db.get_doc(soldoc.doc_id) self.assertTrue(couchdoc) # assert document structure in couch server self.assertEqual(soldoc.doc_id, couchdoc.doc_id) self.assertEqual(soldoc.rev, couchdoc.rev) couch_content = couchdoc.content.keys() self.assertEqual(6, len(couch_content)) self.assertTrue(crypto.ENC_JSON_KEY in couch_content) self.assertTrue(crypto.ENC_SCHEME_KEY in couch_content) self.assertTrue(crypto.ENC_METHOD_KEY in couch_content) self.assertTrue(crypto.ENC_IV_KEY in couch_content) self.assertTrue(crypto.MAC_KEY in couch_content) self.assertTrue(crypto.MAC_METHOD_KEY in couch_content) d = sol1.get_all_docs() d.addCallback(_db1AssertEmptyDocList) d.addCallback(_db1CreateDocs) d.addCallback(lambda _: sol1.sync()) d.addCallback(_db1AssertDocsSyncedToServer) def _db2AssertEmptyDocList(results): _, doclist = results self.assertEqual([], doclist) def _getAllDocsFromBothDbs(results): d1 = sol1.get_all_docs() d2 = sol2.get_all_docs() return defer.DeferredList([d1, d2]) d.addCallback(lambda _: sol2.get_all_docs()) d.addCallback(_db2AssertEmptyDocList) d.addCallback(lambda _: sol2.sync()) d.addCallback(_getAllDocsFromBothDbs) def _assertDocSyncedFromDb1ToDb2(results): r1, r2 = results _, (gen1, doclist1) = r1 _, (gen2, doclist2) = r2 self.assertEqual(number_of_docs, gen1) self.assertEqual(number_of_docs, gen2) self.assertEqual(number_of_docs, len(doclist1)) self.assertEqual(number_of_docs, len(doclist2)) self.assertEqual(doclist1[0], doclist2[0]) d.addCallback(_assertDocSyncedFromDb1ToDb2) def _cleanUp(results): db.delete_database() db.close() sol1.close() sol2.close() d.addCallback(_cleanUp) return d
def couch_database(couch_url, uuid): db = CouchDatabase(couch_url, "user-%s" % (uuid,)) return db
def setup(self): if self._create: return CouchDatabase.open_database( url=self._remote_db_url, create=True, replica_uid=None) else: _request('put', self._remote_db_url, do=False)
def setup(self): return CouchDatabase.open_database(url=self._remote_db_url, create=True, replica_uid=None)
def _test_encrypted_sym_sync(self, passphrase=u'123', doc_size=2, number_of_docs=1): """ Test the complete syncing chain between two soledad dbs using a Soledad server backed by a couch database. """ self.startTwistedServer() user = '******' + uuid4().hex # instantiate soledad and create a document sol1 = self._soledad_instance( user=user, # token is verified in test_target.make_token_soledad_app auth_token='auth-token', passphrase=passphrase) # instantiate another soledad using the same secret as the previous # one (so we can correctly verify the mac of the synced document) sol2 = self._soledad_instance( user=user, prefix='x', auth_token='auth-token', secrets_path=sol1._secrets_path, passphrase=passphrase) # ensure remote db exists before syncing db = CouchDatabase.open_database( urljoin(self.couch_url, 'user-' + user), create=True, ensure_ddocs=True) def _db1AssertEmptyDocList(results): _, doclist = results self.assertEqual([], doclist) def _db1CreateDocs(results): deferreds = [] for i in xrange(number_of_docs): content = binascii.hexlify(os.urandom(doc_size / 2)) deferreds.append(sol1.create_doc({'data': content})) return defer.DeferredList(deferreds) def _db1AssertDocsSyncedToServer(results): _, sol_doclist = results self.assertEqual(number_of_docs, len(sol_doclist)) # assert doc was sent to couch db _, couch_doclist = db.get_all_docs() self.assertEqual(number_of_docs, len(couch_doclist)) for i in xrange(number_of_docs): soldoc = sol_doclist.pop() couchdoc = couch_doclist.pop() # assert document structure in couch server self.assertEqual(soldoc.doc_id, couchdoc.doc_id) self.assertEqual(soldoc.rev, couchdoc.rev) self.assertEqual(6, len(couchdoc.content)) self.assertTrue(crypto.ENC_JSON_KEY in couchdoc.content) self.assertTrue(crypto.ENC_SCHEME_KEY in couchdoc.content) self.assertTrue(crypto.ENC_METHOD_KEY in couchdoc.content) self.assertTrue(crypto.ENC_IV_KEY in couchdoc.content) self.assertTrue(crypto.MAC_KEY in couchdoc.content) self.assertTrue(crypto.MAC_METHOD_KEY in couchdoc.content) d = sol1.get_all_docs() d.addCallback(_db1AssertEmptyDocList) d.addCallback(_db1CreateDocs) d.addCallback(lambda _: sol1.sync()) d.addCallback(lambda _: sol1.get_all_docs()) d.addCallback(_db1AssertDocsSyncedToServer) def _db2AssertEmptyDocList(results): _, doclist = results self.assertEqual([], doclist) def _getAllDocsFromBothDbs(results): d1 = sol1.get_all_docs() d2 = sol2.get_all_docs() return defer.DeferredList([d1, d2]) d.addCallback(lambda _: sol2.get_all_docs()) d.addCallback(_db2AssertEmptyDocList) d.addCallback(lambda _: sol2.sync()) d.addCallback(_getAllDocsFromBothDbs) def _assertDocSyncedFromDb1ToDb2(results): r1, r2 = results _, (gen1, doclist1) = r1 _, (gen2, doclist2) = r2 self.assertEqual(number_of_docs, gen1) self.assertEqual(number_of_docs, gen2) self.assertEqual(number_of_docs, len(doclist1)) self.assertEqual(number_of_docs, len(doclist2)) self.assertEqual(doclist1[0], doclist2[0]) d.addCallback(_assertDocSyncedFromDb1ToDb2) def _cleanUp(results): db.delete_database() db.close() sol1.close() sol2.close() d.addCallback(_cleanUp) return d
def _create_database(self, dbname): return CouchDatabase.open_database(urljoin(self._couch_url, dbname), True, replica_uid=dbname, ensure_ddocs=True)
def get_u1db_database(dbname, port): return CouchDatabase.open_database( 'http://127.0.0.1:%d/%s' % (port, dbname), True)
def _couch_ensure_database(self, dbname): db = CouchDatabase.open_database( self._couch_url + '/' + dbname, create=True, ensure_ddocs=True) return db, db._replica_uid
from leap.soledad.common.couch import CouchDatabase if len(sys.argv) != 2: print 'Usage: %s <uuid>' % sys.argv[0] exit(1) uuid = sys.argv[1] # get couch url cp = ConfigParser() cp.read('/etc/leap/soledad-server.conf') url = cp.get('soledad-server', 'couch_url') # access user db dbname = 'user-%s' % uuid db = CouchDatabase(url, dbname) # get replica info replica_uid = db._replica_uid gen, docs = db.get_all_docs() print "dbname: %s" % dbname print "replica_uid: %s" % replica_uid print "generation: %d" % gen # get relevant docs schemes = map(lambda d: d.content['_enc_scheme'], docs) pubenc = filter(lambda d: d.content['_enc_scheme'] == 'pubkey', docs) print "total number of docs: %d" % len(docs) print "pubkey encrypted docs: %d" % len(pubenc)