def instantiate_objects(self):
     self.consumer_instance = ProducerConsumerAPI(
         is_consumer=True,
         thread_identifier="Consumer_{}".format(self.cont_id),
         type_of_messaging_queue=self.producer_consumer_queue_type)
     self.data_parser_instance = DataParserInterface()
     self.redis_instance = RedisInterface("Consumer_{}".format(
         self.cont_id))
Exemplo n.º 2
0
 def __init__(self):
     self.before={}
     self.after={}
     self.video_file_path = None
     self.producer_consumer_type = None
     self.redis_instance = RedisInterface("Producer")
     self.load_environment_variables()
     self.producer_instance = ProducerConsumerAPI(is_producer=True,
                                                  thread_identifier="Producer",
                                                  type_of_messaging_queue=self.producer_consumer_type)
 def __init__(self):
     self.docker_instance = None
     self.min_threshold = -1
     self.max_threshold = -1
     self.auto_scale_service_name = None
     self.auto_scale_time_interval = 10
     self.scale_down_count = 0
     self.scale_down_count_max_threshold = 0
     self.redis_instance = RedisInterface("AutoScaler")
     self.__load_environment_variables()
     self.__perform_auto_scaling()
 def post_messages(self):
     self.redis_instance = RedisInterface("Producer")
     messages = [
         str(x)
         for x in range(TestMachineLearningWorkers.max_number_of_jobs)
     ]
     for message in messages:
         self.producer_instance.enqueue(message)
         event = "Producer: Successfully posted a message = {} into msgQ.".format(
             message)
         self.redis_instance.write_an_event_in_redis_db(event)
         self.redis_instance.increment_enqueue_count()
     return True
Exemplo n.º 5
0
 def perform_enqueue_dequeue(self):
     logging_to_console_and_syslog(
         "Validating producer instance to be not null.")
     self.create_producer_thread()
     time.sleep(30)
     logging_to_console_and_syslog(
         "Validating if the Producer successfully enqueued the messages.")
     redis_instance = RedisInterface("Producer")
     self.assertEqual(
         redis_instance.get_current_enqueue_count().decode('utf8'),
         str(TestJobDispatcher.max_number_of_jobs))
     logging_to_console_and_syslog(
         "enqueue_count={},max_number_of_jobs={}".format(
             redis_instance.get_current_enqueue_count(),
             TestJobDispatcher.max_number_of_jobs))
 def __init__(self):
     logging_to_console_and_syslog(
         "**********Initializing PyTorch Parser ***********")
     self.hostname = os.popen("cat /etc/hostname").read()
     self.cont_id = os.popen(
         "cat /proc/self/cgroup | head -n 1 | cut -d '/' -f3").read()
     PyTorchParser.redis_instance = RedisInterface("BriefCam+{}".format(
         self.cont_id))
 def validate_machine_learning_workers(self):
     self.create_machine_learning_worker_thread()
     time.sleep(30)
     self.create_producer_and_produce_jobs(
         ProducerConsumerAPI.kafkaMsgQType)
     time.sleep(5)
     logging_to_console_and_syslog(
         "Validating if the machine learning workers"
         " successfully enqueued the messages.")
     redis_instance = RedisInterface("Consumer")
     self.assertEqual(
         redis_instance.get_current_dequeue_count().decode('utf8'),
         str(TestMachineLearningWorkers.max_number_of_jobs))
     logging_to_console_and_syslog(
         "dequeue_count={},max_number_of_jobs={}".format(
             redis_instance.get_current_dequeue_count(),
             TestMachineLearningWorkers.max_number_of_jobs))
Exemplo n.º 8
0
class DirectoryWatch:
    def __init__(self):
        self.before={}
        self.after={}
        self.video_file_path = None
        self.producer_consumer_type = None
        self.redis_instance = RedisInterface("Producer")
        self.load_environment_variables()
        self.producer_instance = ProducerConsumerAPI(is_producer=True,
                                                     thread_identifier="Producer",
                                                     type_of_messaging_queue=self.producer_consumer_type)

    def load_environment_variables(self):
        while self.video_file_path is None or \
              self.producer_consumer_type is None:
            time.sleep(1)
            self.video_file_path = os.getenv("video_file_path_key", default=None)
            self.producer_consumer_type = os.getenv("producer_consumer_queue_type_key", default=None)
        logging_to_console_and_syslog(("video_file_path={}".format(self.video_file_path)))
        logging_to_console_and_syslog(("producer_consumer_type={}".format(self.producer_consumer_type)))

    def cleanup(self):
        self.producer_instance.cleanup()

    def process_new_file(self,file_name):
        # post the file_name into the producer queue.
        self.producer_instance.enqueue(file_name)
        event = "Producer: Successfully posted a message = {} into msgQ.".format(file_name)
        self.redis_instance.write_an_event_in_redis_db(event)
        self.redis_instance.increment_enqueue_count()

    def watch_a_directory(self):
        self.before = {}
        while True:
            time.sleep(1)
            self.after = dict([(f, None) for f in os.listdir(self.video_file_path)])
            added = [f for f in self.after if not f in self.before]
            removed = [f for f in self.before if not f in self.after]
            if added:
                logging_to_console_and_syslog("Added: " + str(added))
                for filename in added:
                    self.process_new_file(filename)
            if removed:
                logging_to_console_and_syslog("Removed: " + str(removed))
            self.before = self.after
 def __connect(self):
     """
     This method tries to connect to the messaging queue.
     :return:
     """
     if self.message_queue_instance is None:
         try:
             if self.type_of_messaging_queue == ProducerConsumerAPI.kafkaMsgQType:
                 self.message_queue_instance = KafkaMsgQAPI(
                     is_producer=self.is_producer,
                     is_consumer=self.is_consumer,
                     perform_subscription=self.perform_subscription,
                     thread_identifier=self.thread_identifier)
             elif self.type_of_messaging_queue == ProducerConsumerAPI.rabbitMsgQType:
                 self.message_queue_instance = RabbitMsgQAPI(
                     is_producer=self.is_producer,
                     is_consumer=self.is_consumer,
                     perform_subscription=self.perform_subscription,
                     thread_identifier=self.thread_identifier)
             elif self.type_of_messaging_queue == ProducerConsumerAPI.confluentKafkaMsgQType:
                 self.message_queue_instance = ConfluentKafkaMsgQAPI(
                     is_producer=self.is_producer,
                     is_consumer=self.is_consumer,
                     perform_subscription=self.perform_subscription,
                     thread_identifier=self.thread_identifier)
             if not self.redis_instance:
                 if self.is_producer:
                     self.redis_instance = RedisInterface(
                         "Producer{}".format(self.thread_identifier))
                 elif self.is_consumer:
                     self.redis_instance = RedisInterface(
                         "Consumer{}".format(self.thread_identifier))
         except:
             print("Exception in user code:")
             print("-" * 60)
             traceback.print_exc(file=sys.stdout)
             print("-" * 60)
             time.sleep(5)
         else:
             logging_to_console_and_syslog(
                 "ProducerConsumerAPI: Successfully "
                 "created producer instance for messageQ type ={}".format(
                     self.type_of_messaging_queue))
Exemplo n.º 10
0
    def run_consumer_instance():
        logging_to_console_and_syslog("Starting {}".format(
            threading.current_thread().getName()))
        t = threading.currentThread()
        redis_instance = RedisInterface("Consumer{}".format(
            threading.current_thread().getName()))

        consumer_instance = ConfluentKafkaMsgQAPI(
            is_consumer=True,
            thread_identifier=threading.current_thread().getName(),
            perform_subscription=True)
        while getattr(t, "do_run", True):
            t = threading.currentThread()
            message = consumer_instance.dequeue()
            if message:
                logging_to_console_and_syslog(
                    "Consumer {}: Dequeued Message = {}".format(
                        threading.current_thread().getName(), message))
                redis_instance.increment_dequeue_count()
                redis_instance.write_an_event_in_redis_db(
                    "Consumer {}: Dequeued Message = {}".format(
                        threading.current_thread().getName(), message))
            time.sleep(5)
        consumer_instance.cleanup()
        logging_to_console_and_syslog("Consumer {}: Exiting".format(
            threading.current_thread().getName()))
Exemplo n.º 11
0
 def create_local_consumer2(self):
     c = None
     redis_instance = RedisInterface(threading.current_thread().getName())
     conf = {
         'bootstrap.servers': "localhost:9092",
         'group.id': "video-file-name",
         'session.timeout.ms': 6000,
         'auto.offset.reset': 'earliest'
     }
     while redis_instance.get_current_enqueue_count() != \
             redis_instance.get_current_dequeue_count():
         if not c:
             c = Consumer(conf)
             c.subscribe(["video-file-name"], on_assign=print_assignment)
         msg = c.poll(timeout=1.0)
         if msg is None or msg.error():
             continue
         else:
             logging_to_console_and_syslog(
                 '%% %s [%d] at offset %d with key %s:\n' %
                 (msg.topic(), msg.partition(), msg.offset(), str(
                     msg.key())))
             logging_to_console_and_syslog("msg.value()={}".format(
                 msg.value()))
             redis_instance.increment_dequeue_count()
             c.close()
             c = None
             time.sleep(5)
class MachineLearningWorker:
    def __init__(self):
        self.hostname = os.popen("cat /etc/hostname").read()
        self.cont_id = os.popen(
            "cat /proc/self/cgroup | head -n 1 | cut -d '/' -f3").read()
        self.producer_consumer_queue_type = None
        self.load_environment_variables()
        self.consumer_instance = None
        self.data_parser_instance = None
        self.redis_instance = None
        self.instantiate_objects()

    def load_environment_variables(self):
        while self.producer_consumer_queue_type is None:
            time.sleep(1)
            self.producer_consumer_queue_type = os.getenv(
                "producer_consumer_queue_type_key", default=None)

        logging_to_console_and_syslog(
            ("producer_consumer_queue_type={}".format(
                self.producer_consumer_queue_type)))

    def instantiate_objects(self):
        self.consumer_instance = ProducerConsumerAPI(
            is_consumer=True,
            thread_identifier="Consumer_{}".format(self.cont_id),
            type_of_messaging_queue=self.producer_consumer_queue_type)
        self.data_parser_instance = DataParserInterface()
        self.redis_instance = RedisInterface("Consumer_{}".format(
            self.cont_id))

    def cleanup(self):
        self.consumer_instance.cleanup()

    def process_job(self, message):
        self.data_parser_instance.process_job(message)

    def dequeue_and_process_jobs(self):
        message = self.consumer_instance.dequeue()
        if message:
            try:
                event = "Consumer: Successfully dequeued a message = {} from msgQ.".format(
                    message)
                self.redis_instance.write_an_event_in_redis_db(event)
                self.redis_instance.increment_dequeue_count()
                start_time = datetime.now()
                self.process_job(message)
                time_elapsed = datetime.now() - start_time
                event = 'Time taken to process {} = (hh:mm:ss.ms) {}'.format(
                    message, time_elapsed)
                self.redis_instance.write_an_event_in_redis_db(event)
            except:
                print("Exception in dequeue_and_process_jobs:")
                print("-" * 60)
                traceback.print_exc(file=sys.stdout)
                print("-" * 60)
                self.cleanup()
                self.instantiate_objects()
 def post_messages(self):
     messages = [str(x) for x in range(100)]
     redis_instance = RedisInterface("Producer")
     for message in messages:
         status = self.producer_instance.enqueue(message)
         while not status:
             status = self.producer_instance.enqueue(message)
         event = "Producer: Successfully posted a message = {} into Kafka.".format(message)
         redis_instance.write_an_event_in_redis_db(event)
         redis_instance.increment_enqueue_count()
     self.producer_instance.cleanup()
     return True
Exemplo n.º 14
0
    def create_local_consumer(self):
        redis_instance = RedisInterface(threading.current_thread().getName())
        consumer_instance = ConfluentKafkaMsgQAPI(
            is_consumer=True,
            thread_identifier=threading.current_thread().getName(),
            perform_subscription=True)
        while redis_instance.get_current_enqueue_count() != \
                redis_instance.get_current_dequeue_count():

            message = consumer_instance.dequeue()
            if message is None or message.error():
                continue
            else:
                logging_to_console_and_syslog(
                    "Consumer {}: Dequeued Message = {}".format(
                        threading.current_thread().getName(), message))
                redis_instance.increment_dequeue_count()
                redis_instance.write_an_event_in_redis_db(
                    "Consumer {}: Dequeued Message = {}".format(
                        threading.current_thread().getName(), message))
        consumer_instance.cleanup()
 def test_run(self):
     logging_to_console_and_syslog("*********Creating consumer instances.*******************")
     self.create_consumers()
     logging_to_console_and_syslog("***************Creating producer instance.************")
     time.sleep(10)
     self.create_producer_and_produce_jobs()
     time.sleep(120)
     logging_to_console_and_syslog("Validating if the consumer successfully dequeued messages.")
     redis_instance = RedisInterface(threading.current_thread().getName())
     self.assertEqual(redis_instance.get_current_enqueue_count(),
                      redis_instance.get_current_dequeue_count())
     logging_to_console_and_syslog("enqueue_count={},dequeue_count={}"
                                   .format(redis_instance.get_current_enqueue_count(),
                                           redis_instance.get_current_dequeue_count()))
 def __init__(self):
     logging_to_console_and_syslog("**********Initializing Briefcam Parser ***********")
     self.case_name = None
     self.case_url = None
     self.browser_loc = None
     self.username = None
     self.password = None
     self.image_directory = None
     self.process = None
     self.browser_ready = False
     self.browser_name = None
     self.max_retry_attempts = 0
     self.sleep_time = 1
     self.time_for_browser_to_open = 60
     self.time_between_input_character = 0
     self.redis_log_keyname = None
     self.video_file_path = None
     self.total_job_done_count_redis_name = None
     self.hostname = os.popen("cat /etc/hostname").read()
     self.cont_id = os.popen("cat /proc/self/cgroup | head -n 1 | cut -d '/' -f3").read()
     BriefCamParser.redis_instance = RedisInterface("BriefCam+{}".format(self.cont_id))
     self.import_environment_variables()
     self.prepare_browser()
 def perform_enqueue_dequeue(self, msgq_type, perform_subscription=False):
     logging_to_console_and_syslog(
         "Creating producer instance and producing jobs.")
     self.create_producer_and_produce_jobs(msgq_type)
     time.sleep(10)
     logging_to_console_and_syslog(
         "Creating consumer threads to consume jobs.")
     self.create_consumers(msgq_type, perform_subscription)
     time.sleep(120)
     logging_to_console_and_syslog(
         "Validating if the consumer successfully dequeued messages.")
     redis_instance = RedisInterface(threading.current_thread().getName())
     self.assertEqual(redis_instance.get_current_enqueue_count(),
                      redis_instance.get_current_dequeue_count())
     logging_to_console_and_syslog(
         "enqueue_count={},dequeue_count={}".format(
             redis_instance.get_current_enqueue_count(),
             redis_instance.get_current_dequeue_count()))
Exemplo n.º 18
0
    def test_auto_scaler(self):

        logging_to_console_and_syslog(
            "******************test_auto_scaler******************")
        self.redis_instance = RedisInterface(TestAutoScaler.service_name)
        self.redis_instance.set_the_key_in_redis_db("enqueue", 1)
        self.redis_instance.set_the_key_in_redis_db("dequeue", 1)

        self.create_auto_scaler_thread()
        #SCALE UP
        jobs_in_pipe = 10
        logging_to_console_and_syslog(
            "************Testing SCALE UP Jobs in pipe = {} ****************".
            format(jobs_in_pipe))
        self.validate_auto_scaler(jobs_in_pipe, scale_up=True)
        #SCALE DOWN
        jobs_in_pipe = 0
        logging_to_console_and_syslog(
            "************Testing SCALE DOWN Jobs in pipe = {} ****************"
            .format(jobs_in_pipe))
        self.validate_auto_scaler(jobs_in_pipe, scale_up=False)
        #SCALE UP
        jobs_in_pipe = 20
        logging_to_console_and_syslog(
            "************Testing SCALE UP Jobs in pipe = {} ****************".
            format(jobs_in_pipe))

        self.validate_auto_scaler(jobs_in_pipe, scale_up=True)
        #SCALE DOWN
        jobs_in_pipe = 0
        logging_to_console_and_syslog(
            "************Testing SCALE DOWN Jobs in pipe = {} ****************"
            .format(jobs_in_pipe))

        self.validate_auto_scaler(jobs_in_pipe, scale_up=False)
        #SCALE UP
        jobs_in_pipe = 51
        logging_to_console_and_syslog(
            "************Testing SCALE UP Jobs in pipe = {} ****************".
            format(jobs_in_pipe))

        self.validate_auto_scaler(jobs_in_pipe, scale_up=True)
        #SCALE DOWN
        jobs_in_pipe = 0
        logging_to_console_and_syslog(
            "************Testing SCALE DOWN Jobs in pipe = {} ****************"
            .format(jobs_in_pipe))

        self.validate_auto_scaler(jobs_in_pipe, scale_up=False)
        #SCALE UP
        jobs_in_pipe = 101
        logging_to_console_and_syslog(
            "************Testing SCALE UP Jobs in pipe = {} ****************".
            format(jobs_in_pipe))

        self.validate_auto_scaler(jobs_in_pipe, scale_up=True)
        #SCALE DOWN
        jobs_in_pipe = 0
        logging_to_console_and_syslog(
            "************Testing SCALE DOWN Jobs in pipe = {} ****************"
            .format(jobs_in_pipe))

        self.validate_auto_scaler(jobs_in_pipe, scale_up=False)
Exemplo n.º 19
0
class TestAutoScaler(unittest.TestCase):
    service_to_be_tested = 'machine_learning_workers'
    service_name = 'test_auto_scaler'
    sleep_time = 5
    min_threshold = 1
    max_threshold = 100
    retry_attempts = 3
    total_number_of_iterations = 3

    def setUp(self):
        os.environ["redis_log_keyname_key"] = "briefcam"
        os.environ["total_job_enqueued_count_redis_name_key"] = "enqueue"
        os.environ["total_job_dequeued_count_redis_name_key"] = "dequeue"
        os.environ["redis_server_hostname_key"] = "localhost"
        os.environ["redis_server_port_key"] = "6379"
        os.environ["min_threshold_key"] = str(TestAutoScaler.min_threshold)
        os.environ["max_threshold_key"] = str(TestAutoScaler.max_threshold)
        os.environ["auto_scale_time_interval_key"] = str(
            TestAutoScaler.sleep_time)
        os.environ[
            "auto_scale_service_name_key"] = TestAutoScaler.service_to_be_tested
        self.__create_docker_stack()
        self.auto_scaler = None

    @staticmethod
    def run_auto_scaler():
        logging_to_console_and_syslog("Instantiating AutoScaler...")
        auto_scaler = AutoScaler()
        logging_to_console_and_syslog("Shutting down AutoScaler...")

    def __create_docker_stack(self):
        completedProcess = subprocess.run([
            "docker", "stack", "deploy", "-c", "docker-compose.yml",
            TestAutoScaler.service_name
        ],
                                          stdout=subprocess.PIPE)
        self.assertIsNotNone(completedProcess)
        logging_to_console_and_syslog(completedProcess.stdout.decode('utf8'))
        time.sleep(30)

    def create_auto_scaler_thread(self):
        self.auto_scaler_thread = threading.Thread(
            name="{}{}".format("thread", 1),
            target=TestAutoScaler.run_auto_scaler)
        self.auto_scaler_thread.do_run = True
        self.auto_scaler_thread.name = "{}_{}".format("auto_scaler_thread", 1)
        self.auto_scaler_thread.start()
        time.sleep(30)

    def get_current_count(self, service_name):
        docker_svc_instance = DockerService(service_name)
        self.assertIsNotNone(docker_svc_instance)
        service_id = docker_svc_instance.get_service_id_from_service_name()
        self.assertIsNotNone(service_id)
        logging_to_console_and_syslog("get_service_id_from_service_name:"
                                      "service_name={},service_id={}".format(
                                          service_name, service_id))
        current_container_count = docker_svc_instance.get_current_number_of_containers_per_service(
        )
        return current_container_count

    def validate_auto_scaler(self, jobs_in_pipe, scale_up):
        self.redis_instance.set_the_key_in_redis_db("enqueue", jobs_in_pipe)
        service_name = '{}_{}'.format(TestAutoScaler.service_name,
                                      TestAutoScaler.service_to_be_tested)
        expected_container_count = 0

        for index in range(TestAutoScaler.total_number_of_iterations):
            current_container_count = self.get_current_count(service_name)
            if index == 0:
                # Start off from the current value of the total number of containers.
                expected_container_count = current_container_count
            if current_container_count != expected_container_count:
                for count in range(TestAutoScaler.retry_attempts):
                    time.sleep(1)
                    current_container_count = self.get_current_count(
                        service_name)
                    logging_to_console_and_syslog(
                        "Iteration={},"
                        "Retrying after a second."
                        "current_container_count={},"
                        "expected_container_count={}".format(
                            index, current_container_count,
                            expected_container_count))
                    if current_container_count == expected_container_count:
                        break
            logging_to_console_and_syslog(
                "Iteration={},"
                "get_current_number_of_containers_per_service:"
                "current_container_count={},"
                "expected_container_count={}".format(index,
                                                     current_container_count,
                                                     expected_container_count))

            self.assertEqual(current_container_count, expected_container_count)

            if scale_up:
                if 0 < jobs_in_pipe <= 10 and \
                        current_container_count + 1 < TestAutoScaler.max_threshold:
                    expected_container_count += 1
                elif 11 < jobs_in_pipe <= 50:
                    if current_container_count + 10 < TestAutoScaler.max_threshold:
                        expected_container_count += 10
                    else:
                        expected_container_count += TestAutoScaler.max_threshold - current_container_count
                elif 50 < jobs_in_pipe <= 100:
                    if current_container_count + 20 < TestAutoScaler.max_threshold:
                        expected_container_count += 20
                    else:
                        expected_container_count += TestAutoScaler.max_threshold - current_container_count
                elif 100 < jobs_in_pipe <= 200:
                    if current_container_count + 30 < TestAutoScaler.max_threshold:
                        expected_container_count += 30
                    else:
                        expected_container_count += TestAutoScaler.max_threshold - current_container_count
            else:
                if current_container_count == TestAutoScaler.max_threshold and \
                        current_container_count - 30 >= TestAutoScaler.min_threshold:
                    expected_container_count -= 30
                elif current_container_count <= TestAutoScaler.max_threshold // 2 and \
                        current_container_count - 20 >= TestAutoScaler.min_threshold:
                    expected_container_count -= 20
                elif current_container_count <= TestAutoScaler.max_threshold // 4 and \
                        current_container_count - 10 >= TestAutoScaler.min_threshold:
                    expected_container_count -= 10
                elif current_container_count - 1 >= TestAutoScaler.min_threshold:
                    expected_container_count -= 1

            logging_to_console_and_syslog("Sleeping for {} seconds.".format(
                TestAutoScaler.sleep_time))
            time.sleep(TestAutoScaler.sleep_time)
            logging_to_console_and_syslog("Waking up after {} seconds.".format(
                TestAutoScaler.sleep_time))

    def test_auto_scaler(self):

        logging_to_console_and_syslog(
            "******************test_auto_scaler******************")
        self.redis_instance = RedisInterface(TestAutoScaler.service_name)
        self.redis_instance.set_the_key_in_redis_db("enqueue", 1)
        self.redis_instance.set_the_key_in_redis_db("dequeue", 1)

        self.create_auto_scaler_thread()
        #SCALE UP
        jobs_in_pipe = 10
        logging_to_console_and_syslog(
            "************Testing SCALE UP Jobs in pipe = {} ****************".
            format(jobs_in_pipe))
        self.validate_auto_scaler(jobs_in_pipe, scale_up=True)
        #SCALE DOWN
        jobs_in_pipe = 0
        logging_to_console_and_syslog(
            "************Testing SCALE DOWN Jobs in pipe = {} ****************"
            .format(jobs_in_pipe))
        self.validate_auto_scaler(jobs_in_pipe, scale_up=False)
        #SCALE UP
        jobs_in_pipe = 20
        logging_to_console_and_syslog(
            "************Testing SCALE UP Jobs in pipe = {} ****************".
            format(jobs_in_pipe))

        self.validate_auto_scaler(jobs_in_pipe, scale_up=True)
        #SCALE DOWN
        jobs_in_pipe = 0
        logging_to_console_and_syslog(
            "************Testing SCALE DOWN Jobs in pipe = {} ****************"
            .format(jobs_in_pipe))

        self.validate_auto_scaler(jobs_in_pipe, scale_up=False)
        #SCALE UP
        jobs_in_pipe = 51
        logging_to_console_and_syslog(
            "************Testing SCALE UP Jobs in pipe = {} ****************".
            format(jobs_in_pipe))

        self.validate_auto_scaler(jobs_in_pipe, scale_up=True)
        #SCALE DOWN
        jobs_in_pipe = 0
        logging_to_console_and_syslog(
            "************Testing SCALE DOWN Jobs in pipe = {} ****************"
            .format(jobs_in_pipe))

        self.validate_auto_scaler(jobs_in_pipe, scale_up=False)
        #SCALE UP
        jobs_in_pipe = 101
        logging_to_console_and_syslog(
            "************Testing SCALE UP Jobs in pipe = {} ****************".
            format(jobs_in_pipe))

        self.validate_auto_scaler(jobs_in_pipe, scale_up=True)
        #SCALE DOWN
        jobs_in_pipe = 0
        logging_to_console_and_syslog(
            "************Testing SCALE DOWN Jobs in pipe = {} ****************"
            .format(jobs_in_pipe))

        self.validate_auto_scaler(jobs_in_pipe, scale_up=False)

    def __tear_down_docker_stack(self):
        completedProcess = subprocess.run(
            ["docker", "stack", "rm", TestAutoScaler.service_name],
            stdout=subprocess.PIPE)
        self.assertIsNotNone(completedProcess)
        logging_to_console_and_syslog(completedProcess.stdout.decode('utf8'))

    def tearDown(self):
        self.__tear_down_docker_stack()
        self.auto_scaler_thread.join(1.0)
        try:
            docker_api_interface_instance = DockerAPIInterface(
                image_name=TestAutoScaler.service_to_be_tested)
            docker_api_interface_instance.stop_docker_container_by_name()
        except:
            logging_to_console_and_syslog(
                "Caught an exception while stopping {}".format(
                    TestAutoScaler.service_to_be_tested))
            print("Exception in user code:")
            print("-" * 60)
            traceback.print_exc(file=sys.stdout)
            print("-" * 60)
class ProducerConsumerAPI:
    """
    This is a factory design pattern.
    This class produces messages into
    1. Kafka Queue.
    2. Rabbit Message Queue.
    """
    rabbitMsgQType = "Rabbit"
    kafkaMsgQType = "Kafka"
    confluentKafkaMsgQType = "ConfluentKafka"

    def __init__(self,
                 is_producer=False,
                 is_consumer=False,
                 perform_subscription=False,
                 type_of_messaging_queue=None,
                 thread_identifier=None):
        self.message_queue_instance = None
        self.redis_instance = None
        self.is_producer = is_producer
        self.is_consumer = is_consumer
        self.perform_subscription = perform_subscription
        self.type_of_messaging_queue = type_of_messaging_queue
        self.thread_identifier = thread_identifier
        self.read_environment_variables()
        #self.__connect()

    def read_environment_variables(self):
        """
        This method is used to read the environment variables defined in the OS.
        :return:
        """
        while self.type_of_messaging_queue is None:
            time.sleep(2)
            logging_to_console_and_syslog(
                "ProducerConsumerAPI: "
                "Trying to read the environment variables...")
            self.type_of_messaging_queue = os.getenv(
                "type_of_messaging_queue_key", default=None)
        logging_to_console_and_syslog("ProducerConsumerAPI:"
                                      "type_of_messaging_queue={}".format(
                                          self.type_of_messaging_queue))

    def __connect(self):
        """
        This method tries to connect to the messaging queue.
        :return:
        """
        if self.message_queue_instance is None:
            try:
                if self.type_of_messaging_queue == ProducerConsumerAPI.kafkaMsgQType:
                    self.message_queue_instance = KafkaMsgQAPI(
                        is_producer=self.is_producer,
                        is_consumer=self.is_consumer,
                        perform_subscription=self.perform_subscription,
                        thread_identifier=self.thread_identifier)
                elif self.type_of_messaging_queue == ProducerConsumerAPI.rabbitMsgQType:
                    self.message_queue_instance = RabbitMsgQAPI(
                        is_producer=self.is_producer,
                        is_consumer=self.is_consumer,
                        perform_subscription=self.perform_subscription,
                        thread_identifier=self.thread_identifier)
                elif self.type_of_messaging_queue == ProducerConsumerAPI.confluentKafkaMsgQType:
                    self.message_queue_instance = ConfluentKafkaMsgQAPI(
                        is_producer=self.is_producer,
                        is_consumer=self.is_consumer,
                        perform_subscription=self.perform_subscription,
                        thread_identifier=self.thread_identifier)
                if not self.redis_instance:
                    if self.is_producer:
                        self.redis_instance = RedisInterface(
                            "Producer{}".format(self.thread_identifier))
                    elif self.is_consumer:
                        self.redis_instance = RedisInterface(
                            "Consumer{}".format(self.thread_identifier))
            except:
                print("Exception in user code:")
                print("-" * 60)
                traceback.print_exc(file=sys.stdout)
                print("-" * 60)
                time.sleep(5)
            else:
                logging_to_console_and_syslog(
                    "ProducerConsumerAPI: Successfully "
                    "created producer instance for messageQ type ={}".format(
                        self.type_of_messaging_queue))

    def enqueue(self, filename):
        """
        This method tries to post a message.
        :param filename:
        :return True or False:
        """
        status = False

        if filename is None or len(filename) == 0:
            logging_to_console_and_syslog("filename is None or invalid")
            return status

        if self.message_queue_instance is None:
            self.__connect()

        if hasattr(self.message_queue_instance, 'enqueue'):
            status = self.message_queue_instance.enqueue(filename)
            event = "Producer: Successfully posted a message = {} into msgQ. Status={}".format(
                filename, status)
            self.redis_instance.write_an_event_in_redis_db(event)
            self.redis_instance.increment_enqueue_count()

        return status

    def dequeue(self):
        """
        This method tries to post a message.
        :return Freezes the current context and yeilds a message:
        Please make sure to iterate this over to unfreeze the context.
        """
        if self.message_queue_instance is None:
            self.__connect()
        msg = None
        if hasattr(self.message_queue_instance, 'dequeue'):
            msg = self.message_queue_instance.dequeue()
            if msg:
                self.redis_instance.increment_dequeue_count()
                self.redis_instance.write_an_event_in_redis_db(
                    "Consumer {}: Dequeued Message = {}".format(
                        self.thread_identifier, msg))
                self.cleanup()
        return msg

    def cleanup(self):
        if self.message_queue_instance:
            self.message_queue_instance.cleanup()
            self.message_queue_instance = None
class TestMachineLearningWorkers(unittest.TestCase):

    max_number_of_jobs = 100
    directory_name = 'test_files'

    def setUp(self):
        os.environ["broker_name_key"] = "{}:9094".format("mec-poc")
        os.environ["topic_key"] = "video-file-name"
        os.environ["redis_log_keyname_key"] = "briefcam"
        os.environ["total_job_enqueued_count_redis_name_key"] = "enqueue"
        os.environ["total_job_dequeued_count_redis_name_key"] = "dequeue"
        os.environ["redis_server_hostname_key"] = "mec-poc"
        os.environ["redis_server_port_key"] = "6379"
        os.environ[
            "producer_consumer_queue_type_key"] = ProducerConsumerAPI.kafkaMsgQType
        os.environ["data_parser_type_key"] = DataParserInterface.TensorFlow
        self.dirname = os.path.dirname(os.path.realpath(__file__))
        current_file_path_list = os.path.realpath(__file__).split('/')
        data_path_directory = '/'.join(current_file_path_list[:-1])
        os.environ["data_file_path_key"] = "{}/{}".format(
            data_path_directory, TestMachineLearningWorkers.directory_name)
        self.create_test_docker_container()
        self.machine_learning_worker_thread = None
        self.producer_instance = None

    @staticmethod
    def start_machine_learning_workers():
        logging_to_console_and_syslog("Starting {}".format(
            threading.current_thread().getName()))
        t = threading.currentThread()
        worker = MachineLearningWorker()
        while getattr(t, "do_run", True):
            #logging_to_console_and_syslog("***dequeue_and_process_jobs***")
            worker.dequeue_and_process_jobs()
        logging_to_console_and_syslog("Consumer {}: Exiting".format(
            threading.current_thread().getName()))

    def create_machine_learning_worker_thread(self):
        self.machine_learning_worker_thread = \
            threading.Thread(name="{}{}".format("thread", 1),
            target=TestMachineLearningWorkers.start_machine_learning_workers
            )
        self.machine_learning_worker_thread.do_run = True
        self.machine_learning_worker_thread.name = "{}_{}".format(
            "test_machine_learning_workers", 1)
        self.machine_learning_worker_thread.start()

    def post_messages(self):
        self.redis_instance = RedisInterface("Producer")
        messages = [
            str(x)
            for x in range(TestMachineLearningWorkers.max_number_of_jobs)
        ]
        for message in messages:
            self.producer_instance.enqueue(message)
            event = "Producer: Successfully posted a message = {} into msgQ.".format(
                message)
            self.redis_instance.write_an_event_in_redis_db(event)
            self.redis_instance.increment_enqueue_count()
        return True

    def create_producer_and_produce_jobs(self, msgq_type):
        self.producer_instance = ProducerConsumerAPI(
            is_producer=True,
            thread_identifier="Producer",
            type_of_messaging_queue=msgq_type)
        logging_to_console_and_syslog("Posting messages.")
        self.assertTrue(self.post_messages())

    def validate_machine_learning_workers(self):
        self.create_machine_learning_worker_thread()
        time.sleep(30)
        self.create_producer_and_produce_jobs(
            ProducerConsumerAPI.kafkaMsgQType)
        time.sleep(5)
        logging_to_console_and_syslog(
            "Validating if the machine learning workers"
            " successfully enqueued the messages.")
        redis_instance = RedisInterface("Consumer")
        self.assertEqual(
            redis_instance.get_current_dequeue_count().decode('utf8'),
            str(TestMachineLearningWorkers.max_number_of_jobs))
        logging_to_console_and_syslog(
            "dequeue_count={},max_number_of_jobs={}".format(
                redis_instance.get_current_dequeue_count(),
                TestMachineLearningWorkers.max_number_of_jobs))

    def test_run(self):
        logging_to_console_and_syslog(
            "Validating **************** Machine Learning workers *****************."
        )
        self.validate_machine_learning_workers()

    def create_test_docker_container(self):
        completedProcess = subprocess.run([
            "sudo", "docker-compose", "-f",
            "{}/docker-compose_wurstmeister_kafka.yml".format(
                self.dirname), "up", "-d"
        ],
                                          stdout=subprocess.PIPE)
        self.assertIsNotNone(completedProcess)
        self.assertIsNotNone(completedProcess.stdout)

    def delete_test_docker_container(self):
        completedProcess = subprocess.run([
            "sudo", "docker-compose", "-f",
            "{}/docker-compose_wurstmeister_kafka.yml".format(
                self.dirname), "down"
        ],
                                          stdout=subprocess.PIPE)
        self.assertIsNotNone(completedProcess)
        self.assertIsNotNone(completedProcess.stdout)

    def tearDown(self):
        self.delete_test_docker_container()
        subprocess.run(
            ['rm', '-rf', TestMachineLearningWorkers.directory_name],
            stdout=subprocess.PIPE)
        self.machine_learning_worker_thread.do_run = False
        time.sleep(1)
        logging_to_console_and_syslog("Trying to join thread.")
        self.machine_learning_worker_thread.join(1.0)
        time.sleep(1)
        if self.machine_learning_worker_thread.isAlive():
            try:
                logging_to_console_and_syslog("Trying to __stop thread.")
                self.machine_learning_worker_thread._stop()
            except:
                logging_to_console_and_syslog(
                    "Caught an exception while stopping thread.")
                docker_api_interface_instance = DockerAPIInterface(
                    image_name=self.dirname.split('/')[-2],
                    dockerfile_directory_name=self.dirname)
                docker_api_interface_instance.stop_docker_container_by_name()
class AutoScaler:
    def __init__(self):
        self.docker_instance = None
        self.min_threshold = -1
        self.max_threshold = -1
        self.auto_scale_service_name = None
        self.auto_scale_time_interval = 10
        self.scale_down_count = 0
        self.scale_down_count_max_threshold = 0
        self.redis_instance = RedisInterface("AutoScaler")
        self.__load_environment_variables()
        self.__perform_auto_scaling()

    def __load_environment_variables(self):
        while self.min_threshold is -1 or \
                self.max_threshold is -1 or \
                not self.auto_scale_service_name:
            time.sleep(1)
            self.min_threshold = int(os.getenv("min_threshold_key",
                                               default=-1))
            self.max_threshold = int(os.getenv("max_threshold_key",
                                               default=-1))
            self.scale_down_count_max_threshold = int(
                os.getenv("scale_down_count_max_threshold_key", default=60))
            self.auto_scale_time_interval = int(
                os.getenv("auto_scale_time_interval_key", default=10))
            self.auto_scale_service_name = os.getenv(
                "auto_scale_service_name_key", default=None)

        logging_to_console_and_syslog(
            ("min_threshold={}".format(self.min_threshold)))
        logging_to_console_and_syslog(
            ("max_threshold={}".format(self.max_threshold)))
        logging_to_console_and_syslog(("auto_scale_service_name={}".format(
            self.auto_scale_service_name)))
        logging_to_console_and_syslog(("auto_scale_time_interval={}".format(
            self.auto_scale_time_interval)))

    def __perform_scale_down_operation(self):
        current_number_of_docker_instances = \
            self.docker_instance.get_current_number_of_containers_per_service()
        if current_number_of_docker_instances == self.max_threshold and \
                current_number_of_docker_instances - 30 >= self.min_threshold:
            self.docker_instance.scale(current_number_of_docker_instances - 30)
        elif current_number_of_docker_instances <= self.max_threshold // 2 and \
                current_number_of_docker_instances - 20 >= self.min_threshold:
            self.docker_instance.scale(current_number_of_docker_instances - 20)
        elif current_number_of_docker_instances <= self.max_threshold // 4 and \
                current_number_of_docker_instances - 10 >= self.min_threshold:
            self.docker_instance.scale(current_number_of_docker_instances - 10)
        elif current_number_of_docker_instances - 1 >= self.min_threshold:
            self.docker_instance.scale(current_number_of_docker_instances - 1)

    def __perform_scale_up_operation(self, jobs_in_pipe):
        current_number_of_docker_instances = \
            self.docker_instance.get_current_number_of_containers_per_service()
        if 0 < jobs_in_pipe <= 10 and \
                current_number_of_docker_instances + 1 < self.max_threshold:
            self.docker_instance.scale(current_number_of_docker_instances + 1)
        elif 11 < jobs_in_pipe <= 50:
            if current_number_of_docker_instances + 10 < self.max_threshold:
                self.docker_instance.scale(current_number_of_docker_instances +
                                           10)
            else:
                self.docker_instance.scale(current_number_of_docker_instances +
                                           self.max_threshold -
                                           current_number_of_docker_instances)
        elif 50 < jobs_in_pipe <= 100:
            if current_number_of_docker_instances + 20 < self.max_threshold:
                self.docker_instance.scale(current_number_of_docker_instances +
                                           20)
            else:
                self.docker_instance.scale(current_number_of_docker_instances +
                                           self.max_threshold -
                                           current_number_of_docker_instances)
        else:
            if current_number_of_docker_instances + 30 < self.max_threshold:
                self.docker_instance.scale(current_number_of_docker_instances +
                                           30)
            else:
                self.docker_instance.scale(current_number_of_docker_instances +
                                           self.max_threshold -
                                           current_number_of_docker_instances)

    def __perform_auto_scaling(self):
        """
        Wake up every pre-specified time interval and do the following:
        Read the current total_job_done_count from Redis.
        Read the current total_job_to_be_done_count from Redis.
        Compute the difference between total_job_to_be_done_count and total_job_done_count.
        This gives the total number of jobs in the pipe.
        Now, count the current number of consumer instances (containers) for the specified docker service.
        if the current count of the number of instances is greater than max_threshold, then,
            return False
        if the total number of jobs in the pipe is 0:
            if current count of the number of instance is equal to max_threshold:
                scale down by 30
            elif current count of the number of instance is above max_threshold//2:
                scale down by 20
            elif current count of the number of instance is above max_threshold//4:
                scale down by 10
            else:
                scale down by 1
        elif the total number of jobs in the pipe is between 1-10:
            scale up by 1
        elif total number of jobs in the pipe is between 11-50:
            scale up by 10
        elif total number of jobs in the pipe is between 51-100:
            scale up by 20
        elif total number of jobs in the pipe is between 101-200:
            scale up by 30
        :return:
        """
        self.docker_instance = DockerService(self.auto_scale_service_name)
        while True:
            time.sleep(self.auto_scale_time_interval)
            current_job_to_be_done_count = int(
                self.redis_instance.get_current_enqueue_count())
            current_job_done_count = int(
                self.redis_instance.get_current_dequeue_count())
            jobs_in_pipe = current_job_to_be_done_count - current_job_done_count
            logging_to_console_and_syslog("current_job_to_be_done_count={},"
                                          "current_job_done_count={},"
                                          "jobs_in_pipe={}.".format(
                                              current_job_to_be_done_count,
                                              current_job_done_count,
                                              jobs_in_pipe))
            if jobs_in_pipe <= 0:
                if self.scale_down_count == self.scale_down_count_max_threshold:
                    logging_to_console_and_syslog(
                        "Performing scale down operation.")
                    self.__perform_scale_down_operation()
                    self.scale_down_count = 0
                else:
                    self.scale_down_count += 1
                    logging_to_console_and_syslog(
                        "Bumping up self.scale_down_count to {}.".format(
                            self.scale_down_count))
            else:
                logging_to_console_and_syslog("Performing scale up operation.")
                self.__perform_scale_up_operation(jobs_in_pipe)
                self.scale_down_count = 0

    def cleanup(self):
        pass