def setUp(self): self._driver = FakeDriver() self._storage = FakeStorage(SequentialThreadingHandler()) self._zk_client = FakeClient(storage=self._storage) self._zk_client.start() self._framework_id = mesos_pb2.FrameworkID() self._framework_id.value = "framework_id_0" self._offer = mesos_pb2.Offer() self._offer.id.value = "offer_id_0" self._offer.framework_id.value = self._framework_id.value self._offer.slave_id.value = "slave_id_0" self._offer.hostname = "localhost" resources = create_resources(cpus=4, mem=512 * 3, ports=set([10000, 10001, 10002])) self._offer.resources.extend(resources) self._framework_user = "******" self._zk_url = "zk://host/mysos/test" self._cluster = MySQLCluster("cluster0", "user", "pass", 3) self._tmpdir = tempfile.mkdtemp() self._state_provider = LocalStateProvider(self._tmpdir) framework_info = mesos_pb2.FrameworkInfo(user=getpass.getuser(), name="mysos", checkpoint=False) self._state = Scheduler(framework_info)
def test_invalid_arguments(self): client = FakeClient() client.start() manager = ClusterManager(client, "/home/my_cluster") with pytest.raises(ValueError) as e: manager.promote_member("123") assert e.value.message == 'Invalid member_id: 123'
def setup(self, request): self._driver = FakeDriver() self._storage = FakeStorage(SequentialThreadingHandler()) self._zk_client = FakeClient(storage=self._storage) self._zk_client.start() self._offer = mesos_pb2.Offer() self._offer.id.value = "offer_id_0" self._offer.framework_id.value = "framework_id_0" self._offer.slave_id.value = "slave_id_0" self._offer.hostname = "localhost" # Enough memory and ports to fit three tasks. resources = create_resources(cpus=4, mem=512 * 3, ports=set([10000, 10001, 10002])) self._offer.resources.extend(resources) self._framework_user = "******" # Some tests use the default launcher; some don't. self._zk_url = "zk://host/mysos/test" self._cluster = MySQLCluster("cluster0", "user", "pass", 3) # Construct the state provider based on the test parameter. if request.param == LocalStateProvider: tmpdir = tempfile.mkdtemp() self._state_provider = LocalStateProvider(tmpdir) request.addfinalizer(lambda: shutil.rmtree(tmpdir, True) ) # Clean up after ourselves. elif request.param == ZooKeeperStateProvider: self._state_provider = ZooKeeperStateProvider( self._zk_client, "/mysos/test") self._launcher = MySQLClusterLauncher( self._driver, self._cluster, self._state_provider, self._zk_url, self._zk_client, self._framework_user, "./executor.pex", "cmd.sh", Amount(5, Time.SECONDS), "/etc/mysos/admin_keyfile.yml", query_interval=Amount(150, Time.MILLISECONDS)) # Short interval. self._elected = threading.Event() self._launchers = [self._launcher] # See teardown(). request.addfinalizer(self.teardown)
def test_scheduler_runs(): """ Verifies that the scheduler successfully launches 3 "no-op" MySQL tasks. NOTE: Due to the limitation of zake the scheduler's ZK operations are not propagated to executors in separate processes but they are unit-tested separately. """ import mesos.native # Make sure fake_mysos_executor.pex is available to be fetched by Mesos slave. assert os.path.isfile('dist/fake_mysos_executor.pex') storage = FakeStorage(SequentialThreadingHandler()) zk_client = FakeClient(storage=storage) zk_client.start() zk_url = "zk://fake_host/home/mysos/clusters" cluster_name = "test_cluster" num_nodes = 3 state_provider = LocalStateProvider(safe_mkdtemp()) framework_info = FrameworkInfo(user=getpass.getuser(), name="mysos", checkpoint=False) state = Scheduler(framework_info) scheduler = MysosScheduler(state, state_provider, getpass.getuser(), os.path.abspath("dist/fake_mysos_executor.pex"), "./fake_mysos_executor.pex", zk_client, zk_url, Amount(40, Time.SECONDS), "/fakepath", gen_encryption_key()) scheduler_driver = mesos.native.MesosSchedulerDriver( scheduler, framework_info, "local") scheduler_driver.start() # Wait until the scheduler is connected and becomes available. assert scheduler.connected.wait(30) scheduler.create_cluster(cluster_name, "mysql_user", num_nodes) # A slave is promoted to be the master. deadline( lambda: wait_for_master( get_cluster_path(posixpath.join(zk_url, 'discover'), cluster_name), zk_client), Amount(40, Time.SECONDS)) assert scheduler_driver.stop() == DRIVER_STOPPED
def from_task(self, task, sandbox): data = json.loads(task.data) cluster_name, port, zk_url = data['cluster'], data['port'], data[ 'zk_url'] _, servers, path = zookeeper.parse(zk_url) zk_client = FakeClient() zk_client.start() self_instance = ServiceInstance( Endpoint(socket.gethostbyname(socket.gethostname()), port)) task_control = self._task_control_provider.from_task(task, sandbox) return MysosTaskRunner(self_instance, zk_client, posixpath.join(path, cluster_name), NoopPackageInstaller(), task_control, Fake())
async def test_functional(): """Test as much of the whole stack as we can.""" config = { 'deadman': { 'plugins': 'zgres#zookeeper\nzgres#apt\nzgres#ec2-snapshot\nzgres#ec2\nzgres#follow-the-leader\nzgres#select-furthest-ahead-replica', }, 'apt': { 'postgresql_cluster_name': 'main', 'postgresql_version': '9.5', }, } zk = FakeClient() with mock.patch('zgres.zookeeper.KazooClient') as KazooClient, \ mock.patch('zgres.ec2.boto.utils.get_instance_metadata'): KazooClient.return_value = zk app = deadman.App(config)
def test_return_immediately_when_blocking_on_empty_queue_and_available_task_comes_in( self ): client = FakeClient() client.start() queue = ZKDelayDeadlineQueue(client, "/") """ Set up several threads waiting for work; insert several pieces of work; make sure each thread finishes. """ tpe = ThreadPoolExecutor() def time_get(): queue = ZKDelayDeadlineQueue(client, "/") start_time = time.time() with queue.get(timeout=1.0) as si: pass return time.time() - start_time, si fut1 = tpe.submit(time_get) fut2 = tpe.submit(time_get) fut3 = tpe.submit(time_get) begin = time.time() si1 = make_si(wait_until=begin, bounce_by=begin) queue.put(si1) si2 = make_si(wait_until=begin + 0.01, bounce_by=begin + 0.01) queue.put(si2) si3 = make_si(wait_until=begin + 0.02, bounce_by=begin + 0.02) queue.put(si3) times = sorted([x.result(timeout=2.0) for x in [fut1, fut2, fut3]]) assert times[0][0] < 0.011 assert times[0][1] == si1 assert 0.009 < times[1][0] < 0.021 assert times[1][1] == si2 assert 0.019 < times[2][0] < 0.031 assert times[2][1] == si3
def setUp(self): self._storage = FakeStorage(SequentialThreadingHandler()) self._client = FakeClient(storage=self._storage) self._client.start() self._state_provider = ZooKeeperStateProvider(self._client, '/mysos')
def test_list_deploy_queue(mock_delay_deadline_queue_class, mock_kazoo_client): mock_request = mock.Mock() settings.system_paasta_config = mock.create_autospec(SystemPaastaConfig) mock_kazoo_client.return_value = FakeClient() available_service_instance = ServiceInstance( service="fake_service1", instance="fake_instance1", watcher="worker0", bounce_by=1577952000, wait_until=1577952000, enqueue_time=1577952000, bounce_start_time=1577952000, failures=1, processed_count=2, ) unavailable_service_instance = ServiceInstance( service="fake_service2", instance="fake_instance2", watcher="worker1", bounce_by=1577952100, wait_until=1577952200, enqueue_time=1577952100, bounce_start_time=1577952100, failures=2, processed_count=3, ) mock_delay_deadline_queue = mock_delay_deadline_queue_class.return_value mock_delay_deadline_queue.get_available_service_instances.return_value = [ (mock.Mock(), available_service_instance) ] mock_delay_deadline_queue.get_unavailable_service_instances.return_value = [ (mock.Mock(), mock.Mock(), unavailable_service_instance) ] output = deploy_queue.list_deploy_queue(mock_request) assert output == { "available_service_instances": [{ "service": "fake_service1", "instance": "fake_instance1", "watcher": "worker0", "bounce_by": 1577952000, "wait_until": 1577952000, "enqueue_time": 1577952000, "bounce_start_time": 1577952000, "failures": 1, "processed_count": 2, }], "unavailable_service_instances": [{ "service": "fake_service2", "instance": "fake_instance2", "watcher": "worker1", "bounce_by": 1577952100, "wait_until": 1577952200, "enqueue_time": 1577952100, "bounce_start_time": 1577952100, "failures": 2, "processed_count": 3, }], }
def panoptes_mock_kazoo_client(**kwargs): return FakeClient()
def setUp(self): self._storage = FakeStorage(SequentialThreadingHandler()) self._client = FakeClient(storage=self._storage) self._client.start() self._self_instance = ServiceInstance(Endpoint("host", 10000)) self._state_manager = FakeStateManager()
def setUp(self): self.storage = FakeStorage(SequentialThreadingHandler()) self.client = FakeClient(storage=self.storage) self.client.start()
def multiple_queues(self): client = FakeClient() client.start() yield [ZKDelayDeadlineQueue(client, "/") for _ in range(5)]
def queue(self): client = FakeClient() client.start() yield ZKDelayDeadlineQueue(client, "/")