def setUp(self):
        super(TestJobHandler, self).setUp()
        self.sleep_patch = mock.patch('gevent.sleep')
        self.sleep_mock = self.sleep_patch.start()
        self.logger = mock.Mock()
        self.amqp_client = mock.Mock()
        self.message = mock.Mock()

        self.job_template_name = ['test-job']
        self.job_input = {'key1': 'value1', 'key2': 'value2'}
        self.device_list = ['device1']
        self.api_server_config = {
            'ips': ["1.2.3.4"],
            'port': "8082",
            'username': "******",
            'password': "******",
            'tenant': "default",
            'use_ssl': False
        }
        self.args = AttrDict({
            'api_server_ip': '127.0.0.1',
            'admin_user': '******',
            'admin_password': '******',
            'admin_tenant_name': 'test',
            'api_server_port': 8082,
            'api_server_use_ssl': False
        })

        self.job_handler = JobHandler(self.job_template_name, self.job_input,
                                      self.device_list, self.api_server_config,
                                      self.logger, self.amqp_client, self.args)
Exemplo n.º 2
0
 def device_send(self, job_template, job_input, is_delete, retry):
     config_str = json.dumps(job_input, sort_keys=True)
     self.push_config_state = PushConfigState.PUSH_STATE_IN_PROGRESS
     start_time = None
     config_size = 0
     forced_cfg_push = self.physical_router.forced_cfg_push
     try:
         config_size = len(config_str)
         current_config_hash = md5(config_str).hexdigest()
         if self.last_config_hash is None or (
                 current_config_hash != self.last_config_hash
                 or forced_cfg_push):
             self._logger.info(
                 "Config push for %s(%s) using job template %s, "
                 "forced_push %s" %
                 (self.physical_router.name, self.physical_router.uuid,
                  str(job_template), forced_cfg_push))
             self._logger.debug(
                 "Abstract config: %s" %
                 json.dumps(job_input, indent=4, sort_keys=True))
             device_manager = DeviceManager.get_instance()
             job_handler = JobHandler(
                 job_template, job_input, [self.physical_router.uuid],
                 device_manager.get_api_server_config(), self._logger,
                 device_manager._amqp_client,
                 self.physical_router.transaction_id,
                 self.physical_router.transaction_descr,
                 device_manager._args)
             self.commit_stats['total_commits_sent_since_up'] += 1
             start_time = time.time()
             job_handler.push(**device_manager.get_job_status_config())
             end_time = time.time()
             self.commit_stats['commit_status_message'] = 'success'
             self.commit_stats['last_commit_time'] = \
                 datetime.datetime.fromtimestamp(
                 end_time).strftime('%Y-%m-%d %H:%M:%S')
             self.commit_stats['last_commit_duration'] = str(end_time -
                                                             start_time)
             self.last_config_hash = current_config_hash
         else:
             self._logger.debug("not pushing since no config change"
                                " detected")
         self.push_config_state = PushConfigState.PUSH_STATE_SUCCESS
     except Exception as e:
         self._logger.error("Router %s: %s" %
                            (self.physical_router.management_ip, repr(e)))
         self._logger.error("Abstract config: %s" %
                            json.dumps(job_input, indent=4, sort_keys=True))
         self.commit_stats[
             'commit_status_message'] = 'failed to apply config,\
                                             router response: ' + e.message
         if start_time is not None:
             self.commit_stats['last_commit_time'] = \
                 datetime.datetime.fromtimestamp(
                 start_time).strftime('%Y-%m-%d %H:%M:%S')
             self.commit_stats['last_commit_duration'] = str(time.time() -
                                                             start_time)
         self.push_config_state = PushConfigState.PUSH_STATE_RETRY if retry\
             else PushConfigState.PUSH_STATE_FAILED
     return config_size
Exemplo n.º 3
0
 def device_send(self, job_template, job_input, is_delete, retry):
     config_str = json.dumps(job_input, sort_keys=True)
     self.push_config_state = PushConfigState.PUSH_STATE_IN_PROGRESS
     start_time = None
     config_size = 0
     try:
         config_size = len(config_str)
         current_config_hash = md5(config_str).hexdigest()
         if self.last_config_hash is None or\
                 current_config_hash != self.last_config_hash:
             self._logger.info("Config push for %s(%s) using job template %s" %
                       (self.physical_router.name, self.physical_router.uuid,
                        str(job_template)))
             self._logger.debug("Abstract config: %s" %
                                json.dumps(job_input, indent=4,
                                           sort_keys=True))
             device_manager = DeviceManager.get_instance()
             job_handler = JobHandler(job_template, job_input,
                                      None if is_delete else
                                      [self.physical_router.uuid],
                                      device_manager.get_api_server_config(),
                                      self._logger,
                                      device_manager._amqp_client,
                                      device_manager._args)
             self.commit_stats['total_commits_sent_since_up'] += 1
             start_time = time.time()
             job_handler.push(**device_manager.get_job_status_config())
             end_time = time.time()
             self.commit_stats['commit_status_message'] = 'success'
             self.commit_stats['last_commit_time'] = \
                     datetime.datetime.fromtimestamp(
                     end_time).strftime('%Y-%m-%d %H:%M:%S')
             self.commit_stats['last_commit_duration'] = str(
                     end_time - start_time)
             self.last_config_hash = current_config_hash
         else:
             self._logger.debug("not pushing since no config change"
                                " detected")
         self.push_config_state = PushConfigState.PUSH_STATE_SUCCESS
     except Exception as e:
         self._logger.error("Router %s: %s" %
                            (self.physical_router.management_ip, repr(e)))
         self._logger.error("Abstract config: %s" %
                            json.dumps(job_input, indent=4,
                                       sort_keys=True))
         self.commit_stats[
                 'commit_status_message'] = 'failed to apply config,\
                                             router response: ' + e.message
         if start_time is not None:
             self.commit_stats['last_commit_time'] = \
                     datetime.datetime.fromtimestamp(
                         start_time).strftime('%Y-%m-%d %H:%M:%S')
             self.commit_stats['last_commit_duration'] = str(
                     time.time() - start_time)
         self.push_config_state = PushConfigState.PUSH_STATE_RETRY if retry\
             else PushConfigState.PUSH_STATE_FAILED
     return config_size
Exemplo n.º 4
0
 def device_send(self, job_template, job_input, is_delete, retry):
     config_str = json.dumps(job_input, sort_keys=True)
     self.push_config_state = PushConfigState.PUSH_STATE_IN_PROGRESS
     start_time = None
     config_size = 0
     try:
         config_size = len(config_str)
         current_config_hash = md5(config_str).hexdigest()
         if self.last_config_hash is None or\
                 current_config_hash != self.last_config_hash:
             self._logger.info("config push for %s(%s) using job template %s" %
                       (self.physical_router.name, self.physical_router.uuid,
                        str(job_template)))
             self._logger.debug("playbook send message: %s" %
                                json.dumps(job_input, indent=4,
                                           sort_keys=True))
             device_manager = DeviceManager.get_instance()
             job_handler = JobHandler(job_template, job_input,
                                      None if is_delete else
                                      [self.physical_router.uuid],
                                      device_manager.get_analytics_config(),
                                      device_manager.get_vnc(), self._logger)
             self.commit_stats['total_commits_sent_since_up'] += 1
             start_time = time.time()
             job_handler.push()
             end_time = time.time()
             self.commit_stats['commit_status_message'] = 'success'
             self.commit_stats['last_commit_time'] = \
                     datetime.datetime.fromtimestamp(
                     end_time).strftime('%Y-%m-%d %H:%M:%S')
             self.commit_stats['last_commit_duration'] = str(
                     end_time - start_time)
             self.last_config_hash = current_config_hash
         else:
             self._logger.debug("not pushing since no config change"
                                " detected")
         self.push_config_state = PushConfigState.PUSH_STATE_SUCCESS
     except Exception as e:
         self._logger.error("Router %s: %s" %
                            (self.physical_router.management_ip, e.message))
         self.commit_stats[
                 'commit_status_message'] = 'failed to apply config,\
                                             router response: ' + e.message
         if start_time is not None:
             self.commit_stats['last_commit_time'] = \
                     datetime.datetime.fromtimestamp(
                         start_time).strftime('%Y-%m-%d %H:%M:%S')
             self.commit_stats['last_commit_duration'] = str(
                     time.time() - start_time)
         self.push_config_state = PushConfigState.PUSH_STATE_RETRY if retry\
             else PushConfigState.PUSH_STATE_FAILED
     return config_size
def redo_regular_job(job_id):
    """
    запускает выполнение задачи с заданным job_id
    :param job_id:
    :return:
    """
    JobHandler(job_id)
Exemplo n.º 6
0
    def start_job(self):
        try:
            # create job UVE and log
            msg = "Starting execution for job with template id %s" \
                  " and execution id %s"
            self._logger.debug(msg)
            
            # read the job template object
            job_template = self.job_utils.read_job_template()

            # spawn job greenlets
            job_handler = JobHandler(self._logger, self._vnc_api,
                                     job_template, self.job_execution_id,
                                     self.job_data, self.job_params,
                                     self.job_utils, self.device_json,
                                     self.auth_token)
            result_handler = JobResultHandler(job_template,
                                              self.job_execution_id,
                                              self._logger, self.job_utils)
            if job_template.get_job_template_multi_device_job():
                self.handle_multi_device_job(job_handler, result_handler)
            else:
                self.handle_single_job(job_handler, result_handler)

            timestamp = strftime("%Y-%m-%d %H:%M:%S", gmtime())
            result_handler.create_job_summary_log(timestamp)

        except JobException as e:
            self._logger.error("Job Exception recieved: %s" % e.msg)
            self._logger.error("%s" % traceback.print_stack())        
        except Exception as e:
            self._logger.error("Error while executing job %s " % repr(e))
            self._logger.error("%s" % traceback.print_stack())
Exemplo n.º 7
0
def do_regular_job(job_id, task_type, arguments, manager_type):
    """
    выполнение работы в нормальных условия, когда записи с таким айди нет,
    создает новую запись в БД и запускает работу @job_id
    :param job_id: id задания
    :param task_type: тип задачи
    :param arguments: словарь аргументов, файлы и пути к ним
    :param manager_type: тип вызвавшего менеджера локальный или по сети "local" или "net"
    :return: void
    """

    db = BaseDB(conf_dict)
    insert_tuple = (
        job_id,
        'processing',
        0,
        'step_1',
        ' '.join(map(str, arguments)),
        task_type,
        manager_type,
        '',
        datetime.datetime.now().strftime(DATE_FORMAT),
        '',
    )
    db.insert_into_table(insert_tuple)
    with JobHandler(job_id, conf_dict) as job:
        job.run_job()
Exemplo n.º 8
0
    def start_job(self):
        try:
            # create job UVE and log
            msg = "Starting execution for job with template id %s" \
                  " and execution id %s" % (self.job_template_id,
                                            self.job_execution_id)
            self._logger.debug(msg)
            self.result_handler = JobResultHandler(self.job_template_id,
                                                   self.job_execution_id,
                                                   self._logger,
                                                   self.job_utils,
                                                   self.job_log_utils)

            # read the job template object
            job_template = self.job_utils.read_job_template()

            timestamp = int(round(time.time() * 1000))
            self.job_log_utils.send_job_execution_uve(job_template.fq_name,
                                                      self.job_execution_id,
                                                      timestamp, 0)
            self.job_log_utils.send_job_log(job_template.fq_name,
                                            self.job_execution_id,
                                            msg,
                                            JobStatus.STARTING.value,
                                            timestamp=timestamp)

            # spawn job greenlets
            job_handler = JobHandler(self._logger, self._vnc_api, job_template,
                                     self.job_execution_id, self.job_data,
                                     self.job_params, self.job_utils,
                                     self.device_json, self.auth_token,
                                     self.job_log_utils, self.sandesh_args)
            if job_template.get_job_template_multi_device_job():
                self.handle_multi_device_job(job_handler, self.result_handler)
            else:
                self.handle_single_job(job_handler, self.result_handler)

            # create job completion log and update job UVE
            self.result_handler.create_job_summary_log(job_template.fq_name)

            # in case of failures, exit the job manager process with failure
            if self.result_handler.job_result_status == JobStatus.FAILURE:
                sys.exit(self.result_handler.job_summary_message)
        except JobException as e:
            self._logger.error("Job Exception recieved: %s" % e.msg)
            self._logger.error("%s" % traceback.print_stack())
            self.result_handler.update_job_status(JobStatus.FAILURE, e.msg)
            self.result_handler.create_job_summary_log(job_template.fq_name)
            sys.exit(e.msg)
        except Exception as e:
            self._logger.error("Error while executing job %s " % repr(e))
            self._logger.error("%s" % traceback.print_stack())
            self.result_handler.update_job_status(JobStatus.FAILURE, e.message)
            self.result_handler.create_job_summary_log(job_template.fq_name)
            sys.exit(e.message)
        finally:
            # need to wait for the last job log and uve update to complete
            # via sandesh
            time.sleep(5)
Exemplo n.º 9
0
def redo_regular_job(job_id):
    """
    запускает выполнение задачи с заданным job_id
    :param job_id:
    :return:
    """
    with JobHandler(job_id, conf_dict) as job:
        job.run_job()
Exemplo n.º 10
0
class TestJobHandler(MockServerMixin):

    job_handler = JobHandler('96e2154893e4610', 1, 1)
    urls = [
        'https://www.goodurl.com/image.png',
        'https://www.goodurl.com',
        ]

    def test_submit(self):
        new_job = self.job_handler.submit(self.urls)
        assert self.job_handler.jobs.get(new_job['id'], None)

    def test_process_concurrently(self):
        self.job_handler.reset()
        new_job = self.job_handler._make_job(self.urls)
        job = self.job_handler._process_concurrently(new_job)
        # After processing, jbo fields should indicate a completion.
        assert job['id'] == new_job['id']
        assert job['created'] == new_job['created']
        assert job['finished'] is not None
        assert job['status'] == 'complete'
        # _process_concurrently manipulates the job in place.
        assert job == new_job

    def test_handle_futures(self):
        self.job_handler.reset()
        new_job = self.job_handler._make_job(self.urls)
        func = self.job_handler._process_image_url
        with futures.ThreadPoolExecutor(3) as executor:
            to_do = conc.submit_to_executor(executor, func, self.urls)
        # After completion, there should be no more pending urls
        # The bad url should fail and the image url should succeed.
        job = self.job_handler._handle_futures(new_job, to_do)
        assert job['uploaded']['pending'] == []
        assert job['uploaded']['complete'] == ['https://i.imgur.com/0yco6KM.gif']
        assert job['uploaded']['fail'] == ['https://www.goodurl.com']
        assert self.job_handler.images == ['https://i.imgur.com/0yco6KM.gif']
        # _handle_futures manipulates the job in place.
        assert job == new_job

    def test_process_image_url(self):
        # A good url should result in a imgur link
        link = self.job_handler._process_image_url(self.urls[0])
        assert link == 'https://i.imgur.com/0yco6KM.gif'
        # A bad url should result in an exception
        with pytest.raises(IOError):
            link = self.job_handler._process_image_url(self.urls[1])
    
    def test_upload_to_imgur(self):
        valid_base64 = 'dGhpcyBpcyBiYXNlIDY0'
        invalid_base64 = '$%^&'
        # Given a valid base64 string, the request should be made.
        resp = self.job_handler._upload_to_imgur(valid_base64)
        assert resp.status_code == 200
        # Otherwise raise an exception before making the request to imgur.
        with pytest.raises(ValueError):
            resp = self.job_handler._upload_to_imgur(invalid_base64)
Exemplo n.º 11
0
    def setUp(self):
        super(TestJobHandler, self).setUp()
        self.post_patch = mock.patch(
            'opserver_util.OpServerUtils.post_url_http')
        self.sleep_patch = mock.patch('gevent.sleep')
        self.post_mock = self.post_patch.start()
        self.sleep_mock = self.sleep_patch.start()
        self.vnc_api = mock.Mock()
        self.logger = mock.Mock()

        self.job_type = ['test-job']
        self.job_input = {'key1': 'value1', 'key2': 'value2'}
        self.device_list = ['device1']
        self.analytic_config = {'ip': '1.2.3.4', 'port': '8082'}

        self.job_handler = JobHandler(self.job_type, self.job_input,
                                      self.device_list, self.analytic_config,
                                      self.vnc_api, self.logger)
 def device_send(self, job_template, job_input, retry):
     config_str = json.dumps(job_input)
     self.push_config_state = PushConfigState.PUSH_STATE_INIT
     start_time = None
     config_size = 0
     try:
         self._logger.debug("playbook send message: %s" % config_str)
         config_size = len(config_str)
         device_manager = DeviceManager.get_instance()
         job_handler = JobHandler(job_template, job_input,
                                  [self.physical_router.uuid],
                                  device_manager.get_analytics_config(),
                                  device_manager.get_vnc(), self._logger)
         self.commit_stats['total_commits_sent_since_up'] += 1
         start_time = time.time()
         job_handler.push()
         end_time = time.time()
         self.commit_stats['commit_status_message'] = 'success'
         self.commit_stats['last_commit_time'] = \
                 datetime.datetime.fromtimestamp(
                 end_time).strftime('%Y-%m-%d %H:%M:%S')
         self.commit_stats['last_commit_duration'] = str(
                 end_time - start_time)
         self.push_config_state = PushConfigState.PUSH_STATE_SUCCESS
     except Exception as e:
         self._logger.error("Router %s: %s" %
                            (self.physical_router.management_ip, e.message))
         self.commit_stats[
                 'commit_status_message'] = 'failed to apply config,\
                                             router response: ' + e.message
         if start_time is not None:
             self.commit_stats['last_commit_time'] = \
                     datetime.datetime.fromtimestamp(
                         start_time).strftime('%Y-%m-%d %H:%M:%S')
             self.commit_stats['last_commit_duration'] = str(
                     time.time() - start_time)
         self.push_config_state = PushConfigState.PUSH_STATE_RETRY if retry\
             else PushConfigState.PUSH_STATE_FAILED
     return config_size
Exemplo n.º 13
0
def main(client_id, num_threads, conc_reqs):
    app = Flask(__name__)
    api = Api(app)

    # This collects the defined resources automatically as new ones are defined.
    resources = ((obj, obj.URLs) for _, obj in inspect.getmembers(
        sys.modules[__name__], inspect.isclass) if obj.__module__ is __name__)

    job_handler = JobHandler(client_id, num_threads, conc_reqs)
    for resource, urls in resources:
        api.add_resource(resource, *urls, resource_class_args=(job_handler, ))

    app.run(debug=False, host='0.0.0.0')
Exemplo n.º 14
0
def main():
    global job_generator, connection
    config = parse_config()
    connection = connect_db(config)
    event_handler = ResultHandler(connection)
    job_generator = JobHandler(connection, event_handler)

    try:
        eventlet = gevent.spawn(event_handler.result_queue_handler)
        joblet = gevent.spawn(job_generator.job_updater)
        gevent.joinall([joblet, eventlet])
    except gevent.GreenletExit as err:
        logger.warning(err)
    except Exception as err:
        logger.warning(err)
Exemplo n.º 15
0
    def start_job(self):
        try:
            # create job UVE and log
            msg = "Starting execution for job with template id %s" \
                  " and execution id %s"
            self._logger.debug(msg)
            timestamp = strftime("%Y-%m-%d %H:%M:%S", gmtime())
            self.job_utils.send_job_execution_uve(timestamp, 0)
            self.job_utils.send_job_log(msg, JobStatus.STARTING,
                                        timestamp=timestamp)
            # read the job template object
            job_template = self.job_utils.read_job_template()

            # spawn job greenlets
            job_handler = JobHandler(self._logger, self._vnc_api,
                                     job_template, self.job_execution_id,
                                     self.job_data, self.job_params,
                                     self.job_utils)
            result_handler = JobResultHandler(job_template,
                                              self.job_execution_id,
                                              self._logger, self.job_utils)
            if job_template.get_job_template_multi_device_job():
                self.handle_multi_device_job(job_handler, result_handler)
            else:
                self.handle_single_job(job_handler, result_handler)

            # update job uve
            timestamp = strftime("%Y-%m-%d %H:%M:%S", gmtime())
            self.job_utils.send_job_execution_uve(timestamp, 100)
            result_handler.create_job_summary_log(timestamp)
        except JobException as e:
            self.mark_job_failure(e.message, self.job_execution_id)
        except Exception as e:
            self._logger.error("Error while executing job %s " % repr(e))
            self._logger.error(traceback.print_stack())
            self.mark_job_failure(e.message)
Exemplo n.º 16
0
class MissForestImputationSlurm(MissForestImputation):
    """private class, missforest subclass for SLURM machines"""
    def __init__(self, mf_params, rf_params, partition, n_nodes, n_cores, node_features, memory, time):
        super().__init__(**mf_params)
        self.params = rf_params
        self.n_nodes = n_nodes
        self.node_features = node_features
        self.handler = JobHandler(partition, n_cores, memory, time)

    def miss_forest_imputation(self, matrix_for_impute):
        self.matrix_for_impute = matrix_for_impute
        self.raw_fill()

        vari_node = self.split_var()
        self.previous_iter_matrix = np.copy(self.initial_guess_matrix)
        self.cur_iter_matrix = np.copy(self.initial_guess_matrix)
        cur_iter = 1
        
        while True:
            if cur_iter > self.max_iter:
                self.result_matrix = self.previous_iter_matrix
                return
            print("iteration " + str(cur_iter))
            
            for i in range(len(vari_node)):
                cur_X = self.cur_iter_matrix
                x_path = self.handler.tmp_X_file
                with open(x_path, 'wb') as tmp:
                    pickle.dump(cur_X, tmp)
                for j in range(len(vari_node[i])):
                    #Prepare the jobs
                    cur_vari = vari_node[i][j]
                    cur_vart = []
                    cur_obsi = []
                    cur_misi = []
                    for k in range(len(vari_node[i][j])):
                        cur_vart.append(self.vart_[cur_vari[k]])
                        cur_obsi.append(self.obsi[cur_vari[k]])
                        cur_misi.append(self.misi[cur_vari[k]])

                    argument_path = self.handler.get_arguments_varidx_file(i, j)
                    result_path = self.handler.get_results_varidx_file(i, j)
                    rf = RandomForest(self.params)
                    with open(argument_path, 'wb') as tmp:
                        argument_object = MissForestImputationSlurmArgumentObject(rf, cur_vart, cur_vari, cur_obsi, cur_misi)
                        pickle.dump(argument_object, tmp)
                    with open(result_path, 'wb') as tmp:
                        # argument_object.results.done = False
                        pickle.dump(argument_object.results, tmp)
                    
                    # write job.sh and submit
                    command_shell = self.handler.get_command_shell(x_path, argument_path, result_path)
                    command_shell =' '.join(command_shell)
                    with open(self.handler.shell_script_path, 'w') as tmp:
                        tmp.writelines('#!/bin/bash\n')
                        tmp.writelines(command_shell)
                    command = self.handler.get_command(i, j, cur_iter)
                    subprocess.call(command)
                
                finish = False
                finished_ind = [False]*len(vari_node[i])
                # finished_count = 0
                while finish == False:
                    time.sleep(0.1)
                    finish = True
                    for j in range(len(vari_node[i])):
                        if finished_ind[j] == True:
                            continue
                            
                        cur_vari = vari_node[i][j]
                        cur_obsi = []
                        cur_misi = []
                        for k in range(len(vari_node[i][j])):
                            cur_obsi.append(self.obsi[cur_vari[k]])
                            cur_misi.append(self.misi[cur_vari[k]])
                            
                        result_path = self.handler.get_results_varidx_file(i, j)
                        try:
                            with open(result_path,'rb') as tmp:
                                cur_result = pickle.load(tmp)
                                if cur_result.done == False:
                                    finish = False
                                    break
                                else:
                                    for k in range(len(cur_vari)):
                                        self.cur_iter_matrix[cur_misi[k],cur_vari[k]] = cur_result.imp_list[k]
                                    finished_ind[j] = True

                            # if finished_ind.count(True) > finished_count:
                            #     finished_count = finished_ind.count(True)
                            #     print(finished_count, "/", len(finished_ind), "finished!")
                                
                        except Exception as e:
                            finish = False
                            break

            if self.check_converge() == True:
                self.result_matrix = self.previous_iter_matrix
                return
                
            #Update the previous_iter_matrix
            self.previous_iter_matrix = np.copy(self.cur_iter_matrix)
            
            cur_iter = cur_iter + 1

    def split_var(self):
        #[NODES,[JOBS,[FEATURE]],]
    
        vari_node = []
        cur_node_idx = 0
        cur_job_idx = 0
        
        cur_jobs = []
        cur_vari = []
        
        for var in self.vari:
            cur_vari.append(var)
            if len(cur_vari) == self.node_features:
                cur_jobs.append(cur_vari)
                cur_vari = []
                if len(cur_jobs) == self.n_nodes:
                    vari_node.append(cur_jobs)
                    cur_jobs = []
        
        if len(cur_vari) > 0:
            cur_jobs.append(cur_vari)
        if len(cur_jobs) > 0:
            vari_node.append(cur_jobs)
            
        print(np.shape(vari_node))
        return vari_node
Exemplo n.º 17
0
 def __init__(self, mf_params, rf_params, partition, n_nodes, n_cores, node_features, memory, time):
     super().__init__(**mf_params)
     self.params = rf_params
     self.n_nodes = n_nodes
     self.node_features = node_features
     self.handler = JobHandler(partition, n_cores, memory, time)
Exemplo n.º 18
0
class TestJobHandler(unittest.TestCase):
    JOB_SUCCESS = AttrDict({ "job_status": "SUCCESS" })
    JOB_FAILURE = AttrDict({ "job_status": "FAILURE" })
    JOB_IN_PROGRESS = AttrDict({ "job_status": "IN_PROGRESS" })

    TIMEOUT = 15
    MAX_RETRIES = 30

    def setUp(self):
        super(TestJobHandler, self).setUp()
        self.sleep_patch = mock.patch('gevent.sleep')
        self.sleep_mock = self.sleep_patch.start()
        self.logger = mock.Mock()
        self.amqp_client = mock.Mock()
        self.message = mock.Mock()

        self.job_template_name = ['test-job']
        self.job_input = {'key1': 'value1', 'key2': 'value2'}
        self.device_list = ['device1']
        self.api_server_config = {
            'ips': ["1.2.3.4"],
            'port': "8082",
            'username': "******",
            'password': "******",
            'tenant': "default",
            'use_ssl': False
        }
        self.args = AttrDict({
            'api_server_ip': '127.0.0.1',
            'admin_user': '******',
            'admin_password': '******',
            'admin_tenant_name': 'test',
            'api_server_port': 8082,
            'api_server_use_ssl': False
        })

        self.job_handler = JobHandler(self.job_template_name, self.job_input,
                                      self.device_list, self.api_server_config,
                                      self.logger, self.amqp_client, self.args)
    # end setUp

    def tearDown(self):
        super(TestJobHandler, self).tearDown()
        self.sleep_patch.stop()
    # end tearDown

    def test_job_executed_successfully(self):
        def side_effect(*_):
            self.assertFalse(self.job_handler._is_job_done())
            self.assertEqual(self.job_handler.get_job_status(),
                             JobStatus.STARTING)
            _, kwargs = self.amqp_client.add_consumer.call_args_list[0]
            callback = kwargs['callback']
            callback(self.JOB_SUCCESS, self.message)
        # end side_effect

        self.sleep_mock.side_effect = side_effect
        self.assertFalse(self.job_handler._is_job_done())
        self.job_handler.push(self.TIMEOUT, self.MAX_RETRIES)

        self.amqp_client.add_consumer.assert_called_once()
        self.amqp_client.publish.assert_called_once()

        args, kwargs = self.amqp_client.publish.call_args_list[0]
        job_payload = args[0]
        job_execution_id = job_payload.get('job_execution_id')
        self.assertEqual(args[1], JobHandler.JOB_REQUEST_EXCHANGE)
        self.assertEqual(kwargs['routing_key'], JobHandler.JOB_REQUEST_ROUTING_KEY)

        args, kwargs = self.amqp_client.add_consumer.call_args_list[0]
        self.assertEqual(args[0], JobHandler.JOB_STATUS_CONSUMER + job_execution_id)
        self.assertEqual(args[1], JobHandler.JOB_STATUS_EXCHANGE)
        self.assertEqual(kwargs['routing_key'], JobHandler.JOB_STATUS_ROUTING_KEY + job_execution_id)
        self.assertEqual(kwargs['auto_delete'], True)

        self.message.ack.assert_called_once()
        self.amqp_client.remove_consumer.assert_called_once()

        self.assertTrue(self.job_handler._is_job_done())
        self.assertEqual(self.job_handler.get_job_status(), JobStatus.SUCCESS)
    # end test_job_executed_successfully

    def test_job_stays_in_progress_then_completes(self):
        job_statuses = [self.JOB_IN_PROGRESS,
                        self.JOB_IN_PROGRESS,
                        self.JOB_SUCCESS]

        def side_effect(*_):
            side_effect.counter += 1
            _, kwargs = self.amqp_client.add_consumer.call_args_list[0]
            callback = kwargs['callback']
            callback(job_statuses[side_effect.counter-1], self.message)
        # end side_effect
        side_effect.counter = 0

        self.sleep_mock.side_effect = side_effect

        self.assertFalse(self.job_handler._is_job_done())
        self.job_handler.push(self.TIMEOUT, self.MAX_RETRIES)

        self.amqp_client.publish.assert_called_once()

        self.assertEqual(side_effect.counter, 3)
        self.assertTrue(self.job_handler._is_job_done())
        self.assertEqual(self.job_handler.get_job_status(), JobStatus.SUCCESS)
    # end test_job_stays_in_progress_then_completes

    def test_job_failed(self):
        job_statuses = [self.JOB_IN_PROGRESS,
                        self.JOB_FAILURE]

        def side_effect(*_):
            side_effect.counter += 1
            _, kwargs = self.amqp_client.add_consumer.call_args_list[0]
            callback = kwargs['callback']
            callback(job_statuses[side_effect.counter-1], self.message)
        # end side_effect
        side_effect.counter = 0

        self.sleep_mock.side_effect = side_effect

        self.assertFalse(self.job_handler._is_job_done())
        self.assertRaises(Exception, self.job_handler.push, self.TIMEOUT, self.MAX_RETRIES)

        self.amqp_client.publish.assert_called_once()

        self.assertEqual(side_effect.counter, 2)
        self.assertTrue(self.job_handler._is_job_done())
        self.assertEqual(self.job_handler.get_job_status(), JobStatus.FAILURE)
    # end test_job_failed

    def test_execute_job_throws(self):
        self.assertFalse(self.job_handler._is_job_done())
        self.amqp_client.publish.side_effect = \
            cfgm_common.exceptions.TimeOutError(500)

        self.assertRaises(Exception, self.job_handler.push, self.TIMEOUT, self.MAX_RETRIES)

        self.amqp_client.publish.assert_called_once()

        self.sleep_mock.assert_not_called()
        self.assertTrue(self.job_handler._is_job_done())
        self.assertEqual(self.job_handler.get_job_status(), JobStatus.FAILURE)
    # end test_execute_job_throws

    def test_max_retries_done(self):
        job_statuses = [self.JOB_IN_PROGRESS,
                        self.JOB_IN_PROGRESS,
                        self.JOB_IN_PROGRESS]

        def side_effect(*_):
            side_effect.counter += 1
            _, kwargs = self.amqp_client.add_consumer.call_args_list[0]
            callback = kwargs['callback']
            callback(job_statuses[side_effect.counter-1], self.message)
        # end side_effect
        side_effect.counter = 0

        self.sleep_mock.side_effect = side_effect

        self.assertFalse(self.job_handler._is_job_done())
        self.assertRaises(Exception, self.job_handler.push, self.TIMEOUT, 3)

        self.amqp_client.publish.assert_called_once()

        self.assertEqual(side_effect.counter, 3)
        self.assertTrue(self.job_handler._is_job_done())
        self.assertEqual(self.job_handler.get_job_status(), JobStatus.FAILURE)
Exemplo n.º 19
0
class TestJobHandler(unittest.TestCase):
    NO_LOG_RESPONSE = """
    {
        "value": []
    }
    """
    JOB_LOG_RESPONSE = """
    {
        "value": [
            {
                "MessageTS": 12345,
                "MessageType": "JobLog"
            }
        ]
    }
    """
    TIMEOUT = 15

    def setUp(self):
        super(TestJobHandler, self).setUp()
        self.post_patch = mock.patch(
            'opserver_util.OpServerUtils.post_url_http')
        self.sleep_patch = mock.patch('gevent.sleep')
        self.post_mock = self.post_patch.start()
        self.sleep_mock = self.sleep_patch.start()
        self.vnc_api = mock.Mock()
        self.logger = mock.Mock()

        self.job_type = ['test-job']
        self.job_input = {'key1': 'value1', 'key2': 'value2'}
        self.device_list = ['device1']
        self.analytic_config = {'ip': '1.2.3.4', 'port': '8082'}

        self.job_handler = JobHandler(self.job_type, self.job_input,
                                      self.device_list, self.analytic_config,
                                      self.vnc_api, self.logger)

    # end setUp

    def tearDown(self):
        super(TestJobHandler, self).tearDown()
        self.post_patch.stop()
        self.sleep_patch.stop()

    # end tearDown

    def test_job_executed_successfully(self):
        self.assertFalse(self.job_handler.is_job_done())
        self.vnc_api.execute_job.return_value = {'job_execution_id': 'job-1'}
        self.post_mock.return_value = self.JOB_LOG_RESPONSE

        self.assertEqual(self.job_handler.get_job_status(), JobStatus.INIT)
        self.job_handler.push()

        self.vnc_api.execute_job.assert_called_with(
            job_template_fq_name=self.job_type,
            job_input=self.job_input,
            device_list=self.device_list)
        self.assertEqual(self.post_mock.call_args[0][1],
                         'http://1.2.3.4:8082/analytics/query')
        self.assertEqual(self.post_mock.call_count, 1)
        self.assertTrue(self.job_handler.is_job_done())
        self.assertEqual(self.job_handler.get_job_status(), JobStatus.COMPLETE)
        self.assertEqual(self.sleep_mock.call_args_list, [])

    # end test_job_executed_successfully

    def test_job_stays_in_progress_then_completes(self):
        return_values = [
            self.NO_LOG_RESPONSE, self.NO_LOG_RESPONSE, self.JOB_LOG_RESPONSE
        ]

        def side_effect(*_):
            self.assertFalse(self.job_handler.is_job_done())
            self.assertEqual(self.job_handler.get_job_status(),
                             JobStatus.IN_PROGRESS)
            side_effect.counter += 1
            return return_values[side_effect.counter - 1]

        # end side_effect
        side_effect.counter = 0

        self.assertFalse(self.job_handler.is_job_done())
        self.vnc_api.execute_job.return_value = {'job_execution_id': 'job-1'}
        self.post_mock.side_effect = side_effect

        self.assertEqual(self.job_handler.get_job_status(), JobStatus.INIT)
        self.job_handler.push()

        self.vnc_api.execute_job.assert_called_with(
            job_template_fq_name=self.job_type,
            job_input=self.job_input,
            device_list=self.device_list)
        self.assertEqual(self.post_mock.call_count, 3)
        self.assertTrue(self.job_handler.is_job_done())
        self.assertEqual(self.job_handler.get_job_status(), JobStatus.COMPLETE)
        self.assertEqual(self.sleep_mock.call_args_list,
                         [mock.call(self.TIMEOUT)])

    # end test_job_stays_in_progress_then_completes

    def test_job_failed(self):
        self.assertFalse(self.job_handler.is_job_done())
        self.vnc_api.execute_job.return_value = {'job_execution_id': 'job-1'}
        self.post_mock.side_effect = [
            self.NO_LOG_RESPONSE, self.JOB_LOG_RESPONSE
        ]

        self.assertEqual(self.job_handler.get_job_status(), JobStatus.INIT)
        self.assertRaises(Exception, self.job_handler.push)

        self.vnc_api.execute_job.assert_called_with(
            job_template_fq_name=self.job_type,
            job_input=self.job_input,
            device_list=self.device_list)
        self.assertEqual(self.post_mock.call_count, 2)
        self.assertTrue(self.job_handler.is_job_done())
        self.assertEqual(self.job_handler.get_job_status(), JobStatus.FAILED)
        self.assertEqual(self.sleep_mock.call_args_list, [])

    # end test_job_failed

    def test_execute_job_throws(self):
        self.assertFalse(self.job_handler.is_job_done())
        self.vnc_api.execute_job.side_effect = \
            cfgm_common.exceptions.HttpError(500, "execute-job failed")

        self.assertEqual(self.job_handler.get_job_status(), JobStatus.INIT)
        self.assertRaises(Exception, self.job_handler.push)

        self.vnc_api.execute_job.assert_called_with(
            job_template_fq_name=self.job_type,
            job_input=self.job_input,
            device_list=self.device_list)
        self.post_mock.assert_not_called()
        self.assertTrue(self.job_handler.is_job_done())
        self.assertEqual(self.job_handler.get_job_status(), JobStatus.FAILED)
        self.sleep_mock.assert_not_called()

    # end test_execute_job_throws

    def test_check_status_throws(self):
        self.assertFalse(self.job_handler.is_job_done())
        self.vnc_api.execute_job.return_value = {'job_execution_id': 'job-1'}
        self.post_mock.side_effect = requests.exceptions.HTTPError()

        self.assertEqual(self.job_handler.get_job_status(), JobStatus.INIT)
        self.assertRaises(Exception, self.job_handler.push)

        self.vnc_api.execute_job.assert_called_with(
            job_template_fq_name=self.job_type,
            job_input=self.job_input,
            device_list=self.device_list)
        self.assertEqual(self.post_mock.call_count, 1)
        self.assertTrue(self.job_handler.is_job_done())
        self.assertEqual(self.job_handler.get_job_status(), JobStatus.FAILED)
        self.sleep_mock.assert_not_called()

    # end test_check_status_throws

    def test_max_retries_done(self):
        self.assertFalse(self.job_handler.is_job_done())
        self.vnc_api.execute_job.return_value = {'job_execution_id': 'job-1'}
        self.post_mock.side_effect = [
            self.NO_LOG_RESPONSE, self.NO_LOG_RESPONSE, self.NO_LOG_RESPONSE,
            self.NO_LOG_RESPONSE, self.NO_LOG_RESPONSE, self.NO_LOG_RESPONSE
        ]

        self.assertEqual(self.job_handler.get_job_status(), JobStatus.INIT)
        self.assertRaises(Exception, self.job_handler.push, self.TIMEOUT, 3)

        self.vnc_api.execute_job.assert_called_with(
            job_template_fq_name=self.job_type,
            job_input=self.job_input,
            device_list=self.device_list)
        self.assertEqual(self.post_mock.call_count, 6)
        self.assertTrue(self.job_handler.is_job_done())
        self.assertEqual(self.job_handler.get_job_status(), JobStatus.FAILED)
        self.assertEqual(self.sleep_mock.call_args_list,
                         [mock.call(self.TIMEOUT),
                          mock.call(self.TIMEOUT)])
Exemplo n.º 20
0
    - bench_positions: categorizes bench players by category, then takes
    this number from each position to add to the final roster
    - path: filename of output file
    - output_format: takes arguments csv, pkl, or pickle. outputs dataframe
    in corresponding format
    '''

    # initial processing parameters
<<<<<<< HEAD
    base_params = {'min_year': 2000, 'max_year': 2014,
                   'history_steps': 7, 'min_player_games': 2,
                   'num_players': 9, 'path': 'output_test_ke.csv',
                   'bench_positions': {'Guard': 1, 'Wing': 1, 'Big': 1}
=======
    base_params = {'min_year': 2000, 'max_year': 2013,
                   'history_steps': 7, 'min_player_games': 2,
                   'num_players': 9, 'path': 'output_14yrs.pkl',
                   'bench_positions': {'Guard': 1, 'Wing': 1, 'Big': 1},
                   'output_format': 'pickle'
>>>>>>> origin/master
                   }

    # perform additional jobs with these parameters changed
    additional_search_params = []

    # create a list of jobs with specified parameters
    jobhandler = JobHandler(base_params, additional_search_params)

    # perform all processing jobs
    jobhandler.process_jobs()