def test_tally_queues(self, redis_client):
        qd = ','

        queue = 'queue'
        expected = random.randint(1, 10)
        for q in queue.split(qd):
            for _ in range(expected):
                redis_client.lpush(q, 'jobHash')

        scaler = autoscaler.Autoscaler(redis_client,
                                       queues=queue,
                                       queue_delim=qd)

        scaler.tally_queues()
        assert scaler.redis_keys == {queue: expected}

        queue = 'predict,track,train'
        expected = random.randint(1, 10)
        for q in queue.split(qd):
            for _ in range(expected):
                redis_client.lpush(q, 'jobHash')

        scaler = autoscaler.Autoscaler(redis_client,
                                       queues=queue,
                                       queue_delim=qd)
        scaler.tally_queues()

        expected_keys = {q: expected for q in queue.split(qd)}
        assert scaler.redis_keys == expected_keys
    def test_scale(self, mocker, redis_client):
        mocker.patch('kubernetes.config.load_incluster_config')
        mocker.patch('kubernetes.client.AppsV1Api', DummyKubernetes)
        mocker.patch('kubernetes.client.BatchV1Api', DummyKubernetes)

        scale_kwargs = {
            'namespace': 'namespace',
            'name': 'test',
        }

        queues = 'predict,track'
        qd = ','

        scaler = autoscaler.Autoscaler(redis_client,
                                       queues=queues,
                                       queue_delim=qd)

        # test successful scale
        for resource_type in scaler.managed_resource_types:
            scaler.scale(resource_type=resource_type, **scale_kwargs)

        # test failed scale
        def bad_scale_resource(*args, **kwargs):
            raise kubernetes.client.rest.ApiException('thrown on purpose')

        scaler.scale_resource = bad_scale_resource
        for resource_type in scaler.managed_resource_types:
            scaler.scale(resource_type=resource_type, **scale_kwargs)
    def test_scale_resource(self, mocker, redis_client):
        # pylint: disable=E1111
        mocker.patch('kubernetes.config.load_incluster_config')
        mocker.patch('kubernetes.client.AppsV1Api', DummyKubernetes)
        mocker.patch('kubernetes.client.BatchV1Api', DummyKubernetes)

        namespace = 'dummy-namespace'
        name = 'dummy-name'
        scaler = autoscaler.Autoscaler(redis_client, 'queue')

        # if desired == current, no action is taken.
        res = scaler.scale_resource(1, 1, 'deployment', namespace, name)
        assert not res

        # scale a job
        res = scaler.scale_resource(2, 1, 'job', namespace, name)
        assert res

        # scale a deployment
        res = scaler.scale_resource(2, 1, 'deployment', namespace, name)
        assert res

        # bad resource type
        with pytest.raises(ValueError):
            scaler.scale_resource(2, 1, 'badvalue', namespace, name)
    def test_get_current_pods(self, mocker, redis_client):
        mocker.patch('kubernetes.config.load_incluster_config')
        mocker.patch('kubernetes.client.AppsV1Api', DummyKubernetes)
        mocker.patch('kubernetes.client.BatchV1Api', DummyKubernetes)

        scaler = autoscaler.Autoscaler(redis_client, 'queue')

        # test invalid resource_type
        with pytest.raises(ValueError):
            scaler.get_current_pods('namespace', 'bad_type', 'pod')

        deployed_pods = scaler.get_current_pods('ns', 'deployment', 'pod1')
        assert deployed_pods == 4

        deployed_pods = scaler.get_current_pods('ns', 'deployment', 'pod2')
        assert deployed_pods == 8

        deployed_pods = scaler.get_current_pods('ns', 'deployment', 'pod2',
                                                True)
        assert deployed_pods == 8

        deployed_pods = scaler.get_current_pods('ns', 'deployment', 'pod1',
                                                True)
        assert deployed_pods == 0

        deployed_pods = scaler.get_current_pods('ns', 'job', 'pod1')
        assert deployed_pods == 1

        deployed_pods = scaler.get_current_pods('ns', 'job', 'pod2')
        assert deployed_pods == 2
    def test_list_namespaced_job(self, mocker, redis_client):
        mocker.patch('kubernetes.config.load_incluster_config')
        mocker.patch('kubernetes.client.BatchV1Api', DummyKubernetes)

        scaler = autoscaler.Autoscaler(redis_client, 'queue')
        # test ApiException is logged and thrown
        mocker.patch('kubernetes.client.BatchV1Api.list_namespaced_job',
                     kube_error)
        with pytest.raises(kubernetes.client.rest.ApiException):
            scaler.list_namespaced_job('ns')
    def test_patch_namespaced_job(self, mocker, redis_client):
        mocker.patch('kubernetes.config.load_incluster_config')
        mocker.patch('kubernetes.client.BatchV1Api', DummyKubernetes)

        spec = {'spec': {'parallelism': 1}}
        scaler = autoscaler.Autoscaler(redis_client, 'queue')
        # test successful patch
        scaler.patch_namespaced_job('job', 'ns', spec)
        # test ApiException is logged and thrown
        mocker.patch('kubernetes.client.BatchV1Api.patch_namespaced_job',
                     kube_error)
        with pytest.raises(kubernetes.client.rest.ApiException):
            scaler.patch_namespaced_job('job', 'ns', spec)
 def test_get_desired_pods(self, redis_client):
     # key, keys_per_pod, min_pods, max_pods, current_pods
     scaler = autoscaler.Autoscaler(redis_client, 'queue')
     scaler.redis_keys['queue'] = 10
     # desired_pods is > max_pods
     desired_pods = scaler.get_desired_pods('queue', 2, 0, 2, 1)
     assert desired_pods == 2
     # desired_pods is < min_pods
     desired_pods = scaler.get_desired_pods('queue', 5, 9, 10, 0)
     assert desired_pods == 9
     # desired_pods is in range
     desired_pods = scaler.get_desired_pods('queue', 3, 0, 5, 1)
     assert desired_pods == 3
     # desired_pods is in range, current_pods exist
     desired_pods = scaler.get_desired_pods('queue', 10, 0, 5, 3)
     assert desired_pods == 3
    logger.addHandler(fh)
    logging.getLogger('kubernetes.client.rest').setLevel(logging.INFO)


if __name__ == '__main__':
    initialize_logger()

    _logger = logging.getLogger(__file__)

    REDIS_CLIENT = autoscaler.redis.RedisClient(
        host=decouple.config('REDIS_HOST', cast=str, default='redis-master'),
        port=decouple.config('REDIS_PORT', default=6379, cast=int),
        backoff=decouple.config('REDIS_INTERVAL', default=1, cast=int))

    SCALER = autoscaler.Autoscaler(
        redis_client=REDIS_CLIENT,
        queues=decouple.config('QUEUES', default='predict,track', cast=str),
        queue_delim=decouple.config('QUEUE_DELIMITER', ',', cast=str))

    INTERVAL = decouple.config('INTERVAL', default=5, cast=int)

    RESOURCE_NAMESPACE = decouple.config('RESOURCE_NAMESPACE',
                                         default='default')
    RESOURCE_TYPE = decouple.config('RESOURCE_TYPE', default='deployment')
    RESOURCE_NAME = decouple.config('RESOURCE_NAME')

    MIN_PODS = decouple.config('MIN_PODS', default=0, cast=int)
    MAX_PODS = decouple.config('MAX_PODS', default=1, cast=int)
    KEYS_PER_POD = decouple.config('KEYS_PER_POD', default=1, cast=int)

    while True:
        try: