def setUpClass(cls): # noqa """Creates a kafka and mongo cluster instance """ cls.zk = ZookeeperFixture.instance() cls.server = KafkaFixture.instance(0, cls.zk.host, cls.zk.port) cls.client = KafkaClient(cls.server.host, cls.server.port) cls.max_insertions = 100 cls.flag = start_cluster() #Clear our config file config = os.environ.get('CONFIG', "config.txt") open(config, 'w').close if cls.flag: try: cls.mongo_conn = Connection("%s:27217" % (socket.gethostname())) cls.mongo_db = cls.mongo_conn['test']['test'] except ConnectionFailure: print("Cannot connect locally!") cls.flag = False if cls.flag: cls.mongo_db = cls.mongo_conn['test']['test'] cls.conn = Connector( ("%s:27217" % (socket.gethostname())), config, "%s:%s" % (cls.server.host, cls.server.port), ['test.test'], '_id', None, "/home/tyler/doc_managers/kafka_doc_manager/kafka_doc_manager.py", None) cls.conn.start()
def setUpClass(cls): # noqa """Creates a kafka and mongo cluster instance """ cls.zk = ZookeeperFixture.instance() cls.server = KafkaFixture.instance(0, cls.zk.host, cls.zk.port) cls.client = KafkaClient(cls.server.host, cls.server.port) cls.max_insertions = 100 cls.flag = start_cluster() #Clear our config file config = os.environ.get('CONFIG', "config.txt") open(config, 'w').close if cls.flag: try: cls.mongo_conn = Connection("%s:27217" % (socket.gethostname())) cls.mongo_db = cls.mongo_conn['test']['test'] except ConnectionFailure: print("Cannot connect locally!") cls.flag = False if cls.flag: cls.mongo_db = cls.mongo_conn['test']['test'] cls.conn = Connector(("%s:27217" % (socket.gethostname())), config, "%s:%s" % (cls.server.host, cls.server.port), ['test.test'], '_id', None, "/home/tyler/doc_managers/kafka_doc_manager/kafka_doc_manager.py", None) cls.conn.start()
def setUpClass(cls): start_single_mongod_instance("30000", "/MC", "MC_log") cls.mongo_doc = DocManager('localhost:30000') cls.mongo_doc._remove() cls.flag = start_cluster() if cls.flag: cls.conn = Connection('localhost:' + PORTS_ONE['MONGOS'], replicaSet="demo-repl")
def setUpClass(cls): """ Initializes the cluster """ use_mongos = True if MAIN_ADDRESS.split(":")[1] != "27217": use_mongos = False cls.flag = start_cluster(use_mongos=use_mongos)
def setUpClass(cls): """ Starts the cluster """ cls.elastic_doc = DocManager('http://localhost:9200', auto_commit=False) cls.elastic_doc._remove() cls.flag = start_cluster() if cls.flag: cls.conn = Connection('localhost:' + PORTS_ONE['MONGOS'], replicaSet="demo-repl")
def setUpClass(cls): cls.flag = start_cluster() if cls.flag: cls.conn = Connection('localhost:' + PORTS_ONE['MAIN'], replicaSet="demo-repl") # Creating a Solr object with an invalid URL # doesn't create an exception cls.solr_conn = Solr('http://localhost:8983/solr') try: cls.solr_conn.commit() except (SolrError, MissingSchema): cls.err_msg = "Cannot connect to Solr!" cls.flag = False if cls.flag: cls.solr_conn.delete(q='*:*') else: cls.err_msg = "Shards cannot be added to mongos"
def setUpClass(cls): """ Initializes the cluster """ cls.AUTH_KEY = None cls.flag = True if AUTH_FILE: # We want to get the credentials from this file try: key = (open(AUTH_FILE)).read() re.sub(r'\s', '', key) cls.AUTH_KEY = key except IOError: print('Could not parse authentication file!') cls.flag = False cls.err_msg = "Could not read key file!" if not start_cluster(key_file=AUTH_FILE): cls.flag = False cls.err_msg = "Shards cannot be added to mongos"
def setUpClass(cls): """ Initializes the cluster """ use_mongos = True if PORTS_ONE['MONGOS'] != "27217": use_mongos = False cls.flag = start_cluster(use_mongos=use_mongos) if cls.flag: cls.conn = Connection('localhost:' + PORTS_ONE['MONGOS'], replicaSet="demo-repl") timer = Timer(60, abort_test) cls.connector = Connector('localhost:' + PORTS_ONE["MONGOS"], 'config.txt', None, ['test.test'], '_id', None, None) cls.synchronizer = cls.connector.doc_manager timer.start() cls.connector.start() while len(cls.connector.shard_set) == 0: pass timer.cancel()
while (len(s.search('Pauline', rows=NUMBER_OF_DOCS * 2)) != 0): time.sleep(15) a = s.search('Pauline', rows=NUMBER_OF_DOCS * 2) self.assertEqual(len(a), 0) a = s.search('Paul', rows=NUMBER_OF_DOCS * 2) self.assertEqual(len(a), NUMBER_OF_DOCS) print("PASSED TEST STRESSED ROLBACK") def abort_test(self): print("TEST FAILED") sys.exit(1) if __name__ == '__main__': os.system('rm config.txt; touch config.txt') s.delete(q='*:*') parser = OptionParser() #-m is for the main address, which is a host:port pair, ideally of the #mongos. For non sharded clusters, it can be the primary. parser.add_option("-m", "--main", action="store", type="string", dest="main_addr", default="27217") (options, args) = parser.parse_args() PORTS_ONE['MAIN'] = options.main_addr start_cluster() conn = Connection('localhost:' + PORTS_ONE['MAIN'], replicaSet="demo-repl") unittest.main(argv=[sys.argv[0]])
parser = OptionParser() #-m is for the main address, which is a host:port pair, ideally of the #mongos. For non sharded clusters, it can be the primary. parser.add_option("-m", "--main", action="store", type="string", dest="main_addr", default="27217") (options, args) = parser.parse_args() PORTS_ONE['MONGOS'] = options.main_addr connection = Connector('localhost:' + PORTS_ONE["MONGOS"], 'config.txt', None, ['test.test'], '_id', None, None) doc_manager = connection.doc_manager if options.main_addr != "27217": start_cluster(use_mongos=False) else: start_cluster() conn = Connection('localhost:' + PORTS_ONE['MONGOS'], replicaSet="demo-repl") t = Timer(60, abort_test) t.start() connection.start() while len(connection.shard_set) == 0: pass t.cancel() unittest.main(argv=[sys.argv[0]]) connection.join()
def test_rollback(self): """Test rollback in oplog_manager. Assertion failure if it doesn't pass We force a rollback by inserting a doc, killing the primary, inserting another doc, killing the new primary, and then restarting both. """ os.system('rm config.txt; touch config.txt') start_cluster() test_oplog, primary_conn, mongos, oplog_coll = self.get_new_oplog() solr = DocManager() test_oplog.doc_manager = solr solr._delete() # equivalent to solr.delete(q='*: *') obj1 = ObjectId('4ff74db3f646462b38000001') mongos['test']['test'].remove({}) mongos['test']['test'].insert({'_id': obj1, 'name': 'paulie'}, safe=1) while (mongos['test']['test'].find().count() != 1): time.sleep(1) cutoff_ts = test_oplog.get_last_oplog_timestamp() obj2 = ObjectId('4ff74db3f646462b38000002') first_doc = {'name': 'paulie', '_ts': bson_ts_to_long(cutoff_ts), 'ns': 'test.test', '_id': obj1} #try kill one, try restarting killMongoProc(primary_conn.host, PORTS_ONE['PRIMARY']) new_primary_conn = Connection('localhost', int(PORTS_ONE['SECONDARY'])) admin = new_primary_conn['admin'] while admin.command("isMaster")['ismaster'] is False: time.sleep(1) time.sleep(5) count = 0 while True: try: current_conn = mongos['test']['test'] current_conn.insert({'_id': obj2, 'name': 'paul'}, safe=1) break except: count += 1 if count > 60: string = 'Call to insert doc failed too many times' logging.error(string) sys.exit(1) time.sleep(1) continue while (mongos['test']['test'].find().count() != 2): print(mongos['test']['test'].find().count()) time.sleep(1) killMongoProc(primary_conn.host, PORTS_ONE['SECONDARY']) startMongoProc(PORTS_ONE['PRIMARY'], "demo-repl", "/replset1a", "/replset1a.log", None) #wait for master to be established while primary_conn['admin'].command("isMaster")['ismaster'] is False: time.sleep(1) startMongoProc(PORTS_ONE['SECONDARY'], "demo-repl", "/replset1b", "/replset1b.log", None) #wait for secondary to be established admin = new_primary_conn['admin'] while admin.command("replSetGetStatus")['myState'] != 2: time.sleep(1) while retry_until_ok(mongos['test']['test'].find().count) != 1: time.sleep(1) self.assertEqual(str(new_primary_conn.port), PORTS_ONE['SECONDARY']) self.assertEqual(str(primary_conn.port), PORTS_ONE['PRIMARY']) last_ts = test_oplog.get_last_oplog_timestamp() second_doc = {'name': 'paul', '_ts': bson_ts_to_long(last_ts), 'ns': 'test.test', '_id': obj2} test_oplog.doc_manager.upsert(first_doc) test_oplog.doc_manager.upsert(second_doc) test_oplog.rollback() test_oplog.doc_manager.commit() results = solr._search() assert(len(results) == 1) results_doc = results[0] self.assertEqual(results_doc['name'], 'paulie') self.assertTrue(results_doc['_ts'] <= bson_ts_to_long(cutoff_ts)) #test_oplog.join() print("PASSED TEST ROLLBACK")
dest="main_addr", default="27217") #-a is for the auth address parser.add_option("-a", "--auth", action="store", type="string", dest="auth_file", default="") #-u is for the auth username parser.add_option("-u", "--username", action="store", type="string", dest="auth_user", default="__system") (options, args) = parser.parse_args() PORTS_ONE["MAIN"] = options.main_addr if options.auth_file != "": start_cluster(key_file=options.auth_file) try: file = open(options.auth_file) key = file.read() re.sub(r'\s', '', key) AUTH_KEY = key AUTH_USERNAME = options.auth_user except: # logger.error('Could not parse authentication file!') exit(1) else: start_cluster() conn = Connection('localhost:' + PORTS_ONE['MAIN'], replicaSet="demo-repl") unittest.main(argv=[sys.argv[0]])
self.assertEqual(len(a), 0) a = s.search('Paul', rows=NUMBER_OF_DOCS * 2) self.assertEqual(len(a), NUMBER_OF_DOCS) print("PASSED TEST STRESSED ROLBACK") def abort_test(self): print("TEST FAILED") sys.exit(1) if __name__ == '__main__': os.system('rm config.txt; touch config.txt') s.delete(q='*:*') parser = OptionParser() #-m is for the main address, which is a host:port pair, ideally of the #mongos. For non sharded clusters, it can be the primary. parser.add_option("-m", "--main", action="store", type="string", dest="main_addr", default="27217") (options, args) = parser.parse_args() PORTS_ONE['MAIN'] = options.main_addr start_cluster() conn = Connection('localhost:' + PORTS_ONE['MAIN'], replicaSet="demo-repl") unittest.main(argv=[sys.argv[0]])
print ("PASSED INITIAL DUMP TEST") def abort_test(self): print("TEST FAILED") sys.exit(1) if __name__ == '__main__': os.system('rm config.txt; touch config.txt') parser = OptionParser() #-m is for the main address, which is a host:port pair, ideally of the #mongos. For non sharded clusters, it can be the primary. parser.add_option("-m", "--main", action="store", type="string", dest="main_addr", default="27217") (options, args) = parser.parse_args() PORTS_ONE['MONGOS'] = options.main_addr connector = Connector('localhost:' + PORTS_ONE["MONGOS"], 'config.txt', None, ['test.test'], '_id', None, None) doc_manager = connector.doc_manager if options.main_addr != "27217": start_cluster(use_mongos=False) else: start_cluster() conn = Connection('localhost:' + PORTS_ONE['MONGOS'], replicaSet="demo-repl") unittest.main(argv=[sys.argv[0]]) connector.join()
type="string", dest="auth_file", default="") #-u is for the auth username parser.add_option("-u", "--username", action="store", type="string", dest="auth_user", default="__system") (options, args) = parser.parse_args() if options.auth_file != "": start_cluster(sharded=True, key_file=options.auth_file) try: file = open(options.auth_file) key = file.read() re.sub(r'\s', '', key) AUTH_KEY = key AUTH_USERNAME = options.auth_user except: # logger.error('Could not parse authentication file!') exit(1) else: start_cluster(sharded=True) conn = Connection('localhost:' + PORTS_ONE['MONGOS']) unittest.main(argv=[sys.argv[0]])
def test_rollback(self): """Test rollback in oplog_manager. Assertion failure if it doesn't pass We force a rollback by inserting a doc, killing the primary, inserting another doc, killing the new primary, and then restarting both. """ os.system('rm config.txt; touch config.txt') start_cluster() test_oplog, primary_conn, mongos, oplog_coll = self.get_new_oplog() solr = DocManager() test_oplog.doc_manager = solr solr._delete() # equivalent to solr.delete(q='*: *') obj1 = ObjectId('4ff74db3f646462b38000001') mongos['test']['test'].remove({}) mongos['test']['test'].insert({'_id': obj1, 'name': 'paulie'}, safe=1) while (mongos['test']['test'].find().count() != 1): time.sleep(1) cutoff_ts = test_oplog.get_last_oplog_timestamp() obj2 = ObjectId('4ff74db3f646462b38000002') first_doc = { 'name': 'paulie', '_ts': bson_ts_to_long(cutoff_ts), 'ns': 'test.test', '_id': obj1 } #try kill one, try restarting killMongoProc(primary_conn.host, PORTS_ONE['PRIMARY']) new_primary_conn = Connection('localhost', int(PORTS_ONE['SECONDARY'])) admin = new_primary_conn['admin'] while admin.command("isMaster")['ismaster'] is False: time.sleep(1) time.sleep(5) count = 0 while True: try: current_conn = mongos['test']['test'] current_conn.insert({'_id': obj2, 'name': 'paul'}, safe=1) break except: count += 1 if count > 60: string = 'Call to insert doc failed too many times' logging.error(string) sys.exit(1) time.sleep(1) continue while (mongos['test']['test'].find().count() != 2): print(mongos['test']['test'].find().count()) time.sleep(1) killMongoProc(primary_conn.host, PORTS_ONE['SECONDARY']) startMongoProc(PORTS_ONE['PRIMARY'], "demo-repl", "/replset1a", "/replset1a.log", None) #wait for master to be established while primary_conn['admin'].command("isMaster")['ismaster'] is False: time.sleep(1) startMongoProc(PORTS_ONE['SECONDARY'], "demo-repl", "/replset1b", "/replset1b.log", None) #wait for secondary to be established admin = new_primary_conn['admin'] while admin.command("replSetGetStatus")['myState'] != 2: time.sleep(1) while retry_until_ok(mongos['test']['test'].find().count) != 1: time.sleep(1) self.assertEqual(str(new_primary_conn.port), PORTS_ONE['SECONDARY']) self.assertEqual(str(primary_conn.port), PORTS_ONE['PRIMARY']) last_ts = test_oplog.get_last_oplog_timestamp() second_doc = { 'name': 'paul', '_ts': bson_ts_to_long(last_ts), 'ns': 'test.test', '_id': obj2 } test_oplog.doc_manager.upsert(first_doc) test_oplog.doc_manager.upsert(second_doc) test_oplog.rollback() test_oplog.doc_manager.commit() results = solr._search() assert (len(results) == 1) results_doc = results[0] self.assertEqual(results_doc['name'], 'paulie') self.assertTrue(results_doc['_ts'] <= bson_ts_to_long(cutoff_ts)) #test_oplog.join() print("PASSED TEST ROLLBACK")
default="") #-u is for the auth username parser.add_option("-u", "--username", action="store", type="string", dest="auth_user", default="__system") (options, args) = parser.parse_args() PORTS_ONE["MAIN"] = options.main_addr if options.auth_file != "": start_cluster(key_file=options.auth_file) try: file = open(options.auth_file) key = file.read() re.sub(r'\s', '', key) AUTH_KEY = key AUTH_USERNAME = options.auth_user except: # logger.error('Could not parse authentication file!') exit(1) else: start_cluster() conn = Connection('localhost:' + PORTS_ONE['MAIN'], replicaSet="demo-repl") unittest.main(argv=[sys.argv[0]])
os.system('rm config.txt; touch config.txt') parser = OptionParser() # -a is to specify the auth file. parser.add_option("-a", "--auth", action="store", type="string", dest="auth_file", default="") #-u is for the auth username parser.add_option("-u", "--username", action="store", type="string", dest="auth_user", default="__system") (options, args) = parser.parse_args() if options.auth_file != "": start_cluster(sharded=True, key_file=options.auth_file) try: file = open(options.auth_file) key = file.read() re.sub(r'\s', '', key) AUTH_KEY = key AUTH_USERNAME = options.auth_user except: # logger.error('Could not parse authentication file!') exit(1) else: start_cluster(sharded=True) conn = Connection('localhost:' + PORTS_ONE['MONGOS']) unittest.main(argv=[sys.argv[0]])