def test_run(self):
     logging_to_console_and_syslog("*********Creating consumer instances.*******************")
     self.create_consumers()
     logging_to_console_and_syslog("***************Creating producer instance.************")
     time.sleep(10)
     self.create_producer_and_produce_jobs()
     time.sleep(120)
     logging_to_console_and_syslog("Validating if the consumer successfully dequeued messages.")
     redis_instance = RedisInterface(threading.current_thread().getName())
     self.assertEqual(redis_instance.get_current_enqueue_count(),
                      redis_instance.get_current_dequeue_count())
     logging_to_console_and_syslog("enqueue_count={},dequeue_count={}"
                                   .format(redis_instance.get_current_enqueue_count(),
                                           redis_instance.get_current_dequeue_count()))
예제 #2
0
 def perform_enqueue_dequeue(self):
     logging_to_console_and_syslog(
         "Validating producer instance to be not null.")
     self.create_producer_thread()
     time.sleep(30)
     logging_to_console_and_syslog(
         "Validating if the Producer successfully enqueued the messages.")
     redis_instance = RedisInterface("Producer")
     self.assertEqual(
         redis_instance.get_current_enqueue_count().decode('utf8'),
         str(TestJobDispatcher.max_number_of_jobs))
     logging_to_console_and_syslog(
         "enqueue_count={},max_number_of_jobs={}".format(
             redis_instance.get_current_enqueue_count(),
             TestJobDispatcher.max_number_of_jobs))
예제 #3
0
 def create_local_consumer2(self):
     c = None
     redis_instance = RedisInterface(threading.current_thread().getName())
     conf = {
         'bootstrap.servers': "localhost:9092",
         'group.id': "video-file-name",
         'session.timeout.ms': 6000,
         'auto.offset.reset': 'earliest'
     }
     while redis_instance.get_current_enqueue_count() != \
             redis_instance.get_current_dequeue_count():
         if not c:
             c = Consumer(conf)
             c.subscribe(["video-file-name"], on_assign=print_assignment)
         msg = c.poll(timeout=1.0)
         if msg is None or msg.error():
             continue
         else:
             logging_to_console_and_syslog(
                 '%% %s [%d] at offset %d with key %s:\n' %
                 (msg.topic(), msg.partition(), msg.offset(), str(
                     msg.key())))
             logging_to_console_and_syslog("msg.value()={}".format(
                 msg.value()))
             redis_instance.increment_dequeue_count()
             c.close()
             c = None
             time.sleep(5)
 def perform_enqueue_dequeue(self, msgq_type, perform_subscription=False):
     logging_to_console_and_syslog(
         "Creating producer instance and producing jobs.")
     self.create_producer_and_produce_jobs(msgq_type)
     time.sleep(10)
     logging_to_console_and_syslog(
         "Creating consumer threads to consume jobs.")
     self.create_consumers(msgq_type, perform_subscription)
     time.sleep(120)
     logging_to_console_and_syslog(
         "Validating if the consumer successfully dequeued messages.")
     redis_instance = RedisInterface(threading.current_thread().getName())
     self.assertEqual(redis_instance.get_current_enqueue_count(),
                      redis_instance.get_current_dequeue_count())
     logging_to_console_and_syslog(
         "enqueue_count={},dequeue_count={}".format(
             redis_instance.get_current_enqueue_count(),
             redis_instance.get_current_dequeue_count()))
예제 #5
0
    def create_local_consumer(self):
        redis_instance = RedisInterface(threading.current_thread().getName())
        consumer_instance = ConfluentKafkaMsgQAPI(
            is_consumer=True,
            thread_identifier=threading.current_thread().getName(),
            perform_subscription=True)
        while redis_instance.get_current_enqueue_count() != \
                redis_instance.get_current_dequeue_count():

            message = consumer_instance.dequeue()
            if message is None or message.error():
                continue
            else:
                logging_to_console_and_syslog(
                    "Consumer {}: Dequeued Message = {}".format(
                        threading.current_thread().getName(), message))
                redis_instance.increment_dequeue_count()
                redis_instance.write_an_event_in_redis_db(
                    "Consumer {}: Dequeued Message = {}".format(
                        threading.current_thread().getName(), message))
        consumer_instance.cleanup()
class AutoScaler:
    def __init__(self):
        self.docker_instance = None
        self.min_threshold = -1
        self.max_threshold = -1
        self.auto_scale_service_name = None
        self.auto_scale_time_interval = 10
        self.scale_down_count = 0
        self.scale_down_count_max_threshold = 0
        self.redis_instance = RedisInterface("AutoScaler")
        self.__load_environment_variables()
        self.__perform_auto_scaling()

    def __load_environment_variables(self):
        while self.min_threshold is -1 or \
                self.max_threshold is -1 or \
                not self.auto_scale_service_name:
            time.sleep(1)
            self.min_threshold = int(os.getenv("min_threshold_key",
                                               default=-1))
            self.max_threshold = int(os.getenv("max_threshold_key",
                                               default=-1))
            self.scale_down_count_max_threshold = int(
                os.getenv("scale_down_count_max_threshold_key", default=60))
            self.auto_scale_time_interval = int(
                os.getenv("auto_scale_time_interval_key", default=10))
            self.auto_scale_service_name = os.getenv(
                "auto_scale_service_name_key", default=None)

        logging_to_console_and_syslog(
            ("min_threshold={}".format(self.min_threshold)))
        logging_to_console_and_syslog(
            ("max_threshold={}".format(self.max_threshold)))
        logging_to_console_and_syslog(("auto_scale_service_name={}".format(
            self.auto_scale_service_name)))
        logging_to_console_and_syslog(("auto_scale_time_interval={}".format(
            self.auto_scale_time_interval)))

    def __perform_scale_down_operation(self):
        current_number_of_docker_instances = \
            self.docker_instance.get_current_number_of_containers_per_service()
        if current_number_of_docker_instances == self.max_threshold and \
                current_number_of_docker_instances - 30 >= self.min_threshold:
            self.docker_instance.scale(current_number_of_docker_instances - 30)
        elif current_number_of_docker_instances <= self.max_threshold // 2 and \
                current_number_of_docker_instances - 20 >= self.min_threshold:
            self.docker_instance.scale(current_number_of_docker_instances - 20)
        elif current_number_of_docker_instances <= self.max_threshold // 4 and \
                current_number_of_docker_instances - 10 >= self.min_threshold:
            self.docker_instance.scale(current_number_of_docker_instances - 10)
        elif current_number_of_docker_instances - 1 >= self.min_threshold:
            self.docker_instance.scale(current_number_of_docker_instances - 1)

    def __perform_scale_up_operation(self, jobs_in_pipe):
        current_number_of_docker_instances = \
            self.docker_instance.get_current_number_of_containers_per_service()
        if 0 < jobs_in_pipe <= 10 and \
                current_number_of_docker_instances + 1 < self.max_threshold:
            self.docker_instance.scale(current_number_of_docker_instances + 1)
        elif 11 < jobs_in_pipe <= 50:
            if current_number_of_docker_instances + 10 < self.max_threshold:
                self.docker_instance.scale(current_number_of_docker_instances +
                                           10)
            else:
                self.docker_instance.scale(current_number_of_docker_instances +
                                           self.max_threshold -
                                           current_number_of_docker_instances)
        elif 50 < jobs_in_pipe <= 100:
            if current_number_of_docker_instances + 20 < self.max_threshold:
                self.docker_instance.scale(current_number_of_docker_instances +
                                           20)
            else:
                self.docker_instance.scale(current_number_of_docker_instances +
                                           self.max_threshold -
                                           current_number_of_docker_instances)
        else:
            if current_number_of_docker_instances + 30 < self.max_threshold:
                self.docker_instance.scale(current_number_of_docker_instances +
                                           30)
            else:
                self.docker_instance.scale(current_number_of_docker_instances +
                                           self.max_threshold -
                                           current_number_of_docker_instances)

    def __perform_auto_scaling(self):
        """
        Wake up every pre-specified time interval and do the following:
        Read the current total_job_done_count from Redis.
        Read the current total_job_to_be_done_count from Redis.
        Compute the difference between total_job_to_be_done_count and total_job_done_count.
        This gives the total number of jobs in the pipe.
        Now, count the current number of consumer instances (containers) for the specified docker service.
        if the current count of the number of instances is greater than max_threshold, then,
            return False
        if the total number of jobs in the pipe is 0:
            if current count of the number of instance is equal to max_threshold:
                scale down by 30
            elif current count of the number of instance is above max_threshold//2:
                scale down by 20
            elif current count of the number of instance is above max_threshold//4:
                scale down by 10
            else:
                scale down by 1
        elif the total number of jobs in the pipe is between 1-10:
            scale up by 1
        elif total number of jobs in the pipe is between 11-50:
            scale up by 10
        elif total number of jobs in the pipe is between 51-100:
            scale up by 20
        elif total number of jobs in the pipe is between 101-200:
            scale up by 30
        :return:
        """
        self.docker_instance = DockerService(self.auto_scale_service_name)
        while True:
            time.sleep(self.auto_scale_time_interval)
            current_job_to_be_done_count = int(
                self.redis_instance.get_current_enqueue_count())
            current_job_done_count = int(
                self.redis_instance.get_current_dequeue_count())
            jobs_in_pipe = current_job_to_be_done_count - current_job_done_count
            logging_to_console_and_syslog("current_job_to_be_done_count={},"
                                          "current_job_done_count={},"
                                          "jobs_in_pipe={}.".format(
                                              current_job_to_be_done_count,
                                              current_job_done_count,
                                              jobs_in_pipe))
            if jobs_in_pipe <= 0:
                if self.scale_down_count == self.scale_down_count_max_threshold:
                    logging_to_console_and_syslog(
                        "Performing scale down operation.")
                    self.__perform_scale_down_operation()
                    self.scale_down_count = 0
                else:
                    self.scale_down_count += 1
                    logging_to_console_and_syslog(
                        "Bumping up self.scale_down_count to {}.".format(
                            self.scale_down_count))
            else:
                logging_to_console_and_syslog("Performing scale up operation.")
                self.__perform_scale_up_operation(jobs_in_pipe)
                self.scale_down_count = 0

    def cleanup(self):
        pass