Exemplo n.º 1
0
    def start_job(self):
        try:
            # create job UVE and log
            msg = "Starting execution for job with template id %s" \
                  " and execution id %s"
            self._logger.debug(msg)
            
            # read the job template object
            job_template = self.job_utils.read_job_template()

            # spawn job greenlets
            job_handler = JobHandler(self._logger, self._vnc_api,
                                     job_template, self.job_execution_id,
                                     self.job_data, self.job_params,
                                     self.job_utils, self.device_json,
                                     self.auth_token)
            result_handler = JobResultHandler(job_template,
                                              self.job_execution_id,
                                              self._logger, self.job_utils)
            if job_template.get_job_template_multi_device_job():
                self.handle_multi_device_job(job_handler, result_handler)
            else:
                self.handle_single_job(job_handler, result_handler)

            timestamp = strftime("%Y-%m-%d %H:%M:%S", gmtime())
            result_handler.create_job_summary_log(timestamp)

        except JobException as e:
            self._logger.error("Job Exception recieved: %s" % e.msg)
            self._logger.error("%s" % traceback.print_stack())        
        except Exception as e:
            self._logger.error("Error while executing job %s " % repr(e))
            self._logger.error("%s" % traceback.print_stack())
Exemplo n.º 2
0
 def device_send(self, job_template, job_input, is_delete, retry):
     config_str = json.dumps(job_input, sort_keys=True)
     self.push_config_state = PushConfigState.PUSH_STATE_IN_PROGRESS
     start_time = None
     config_size = 0
     forced_cfg_push = self.physical_router.forced_cfg_push
     try:
         config_size = len(config_str)
         current_config_hash = md5(config_str).hexdigest()
         if self.last_config_hash is None or (
                 current_config_hash != self.last_config_hash
                 or forced_cfg_push):
             self._logger.info(
                 "Config push for %s(%s) using job template %s, "
                 "forced_push %s" %
                 (self.physical_router.name, self.physical_router.uuid,
                  str(job_template), forced_cfg_push))
             self._logger.debug(
                 "Abstract config: %s" %
                 json.dumps(job_input, indent=4, sort_keys=True))
             device_manager = DeviceManager.get_instance()
             job_handler = JobHandler(
                 job_template, job_input, [self.physical_router.uuid],
                 device_manager.get_api_server_config(), self._logger,
                 device_manager._amqp_client,
                 self.physical_router.transaction_id,
                 self.physical_router.transaction_descr,
                 device_manager._args)
             self.commit_stats['total_commits_sent_since_up'] += 1
             start_time = time.time()
             job_handler.push(**device_manager.get_job_status_config())
             end_time = time.time()
             self.commit_stats['commit_status_message'] = 'success'
             self.commit_stats['last_commit_time'] = \
                 datetime.datetime.fromtimestamp(
                 end_time).strftime('%Y-%m-%d %H:%M:%S')
             self.commit_stats['last_commit_duration'] = str(end_time -
                                                             start_time)
             self.last_config_hash = current_config_hash
         else:
             self._logger.debug("not pushing since no config change"
                                " detected")
         self.push_config_state = PushConfigState.PUSH_STATE_SUCCESS
     except Exception as e:
         self._logger.error("Router %s: %s" %
                            (self.physical_router.management_ip, repr(e)))
         self._logger.error("Abstract config: %s" %
                            json.dumps(job_input, indent=4, sort_keys=True))
         self.commit_stats[
             'commit_status_message'] = 'failed to apply config,\
                                             router response: ' + e.message
         if start_time is not None:
             self.commit_stats['last_commit_time'] = \
                 datetime.datetime.fromtimestamp(
                 start_time).strftime('%Y-%m-%d %H:%M:%S')
             self.commit_stats['last_commit_duration'] = str(time.time() -
                                                             start_time)
         self.push_config_state = PushConfigState.PUSH_STATE_RETRY if retry\
             else PushConfigState.PUSH_STATE_FAILED
     return config_size
def redo_regular_job(job_id):
    """
    запускает выполнение задачи с заданным job_id
    :param job_id:
    :return:
    """
    JobHandler(job_id)
Exemplo n.º 4
0
def do_regular_job(job_id, task_type, arguments, manager_type):
    """
    выполнение работы в нормальных условия, когда записи с таким айди нет,
    создает новую запись в БД и запускает работу @job_id
    :param job_id: id задания
    :param task_type: тип задачи
    :param arguments: словарь аргументов, файлы и пути к ним
    :param manager_type: тип вызвавшего менеджера локальный или по сети "local" или "net"
    :return: void
    """

    db = BaseDB(conf_dict)
    insert_tuple = (
        job_id,
        'processing',
        0,
        'step_1',
        ' '.join(map(str, arguments)),
        task_type,
        manager_type,
        '',
        datetime.datetime.now().strftime(DATE_FORMAT),
        '',
    )
    db.insert_into_table(insert_tuple)
    with JobHandler(job_id, conf_dict) as job:
        job.run_job()
    def setUp(self):
        super(TestJobHandler, self).setUp()
        self.sleep_patch = mock.patch('gevent.sleep')
        self.sleep_mock = self.sleep_patch.start()
        self.logger = mock.Mock()
        self.amqp_client = mock.Mock()
        self.message = mock.Mock()

        self.job_template_name = ['test-job']
        self.job_input = {'key1': 'value1', 'key2': 'value2'}
        self.device_list = ['device1']
        self.api_server_config = {
            'ips': ["1.2.3.4"],
            'port': "8082",
            'username': "******",
            'password': "******",
            'tenant': "default",
            'use_ssl': False
        }
        self.args = AttrDict({
            'api_server_ip': '127.0.0.1',
            'admin_user': '******',
            'admin_password': '******',
            'admin_tenant_name': 'test',
            'api_server_port': 8082,
            'api_server_use_ssl': False
        })

        self.job_handler = JobHandler(self.job_template_name, self.job_input,
                                      self.device_list, self.api_server_config,
                                      self.logger, self.amqp_client, self.args)
Exemplo n.º 6
0
    def start_job(self):
        try:
            # create job UVE and log
            msg = "Starting execution for job with template id %s" \
                  " and execution id %s" % (self.job_template_id,
                                            self.job_execution_id)
            self._logger.debug(msg)
            self.result_handler = JobResultHandler(self.job_template_id,
                                                   self.job_execution_id,
                                                   self._logger,
                                                   self.job_utils,
                                                   self.job_log_utils)

            # read the job template object
            job_template = self.job_utils.read_job_template()

            timestamp = int(round(time.time() * 1000))
            self.job_log_utils.send_job_execution_uve(job_template.fq_name,
                                                      self.job_execution_id,
                                                      timestamp, 0)
            self.job_log_utils.send_job_log(job_template.fq_name,
                                            self.job_execution_id,
                                            msg,
                                            JobStatus.STARTING.value,
                                            timestamp=timestamp)

            # spawn job greenlets
            job_handler = JobHandler(self._logger, self._vnc_api, job_template,
                                     self.job_execution_id, self.job_data,
                                     self.job_params, self.job_utils,
                                     self.device_json, self.auth_token,
                                     self.job_log_utils, self.sandesh_args)
            if job_template.get_job_template_multi_device_job():
                self.handle_multi_device_job(job_handler, self.result_handler)
            else:
                self.handle_single_job(job_handler, self.result_handler)

            # create job completion log and update job UVE
            self.result_handler.create_job_summary_log(job_template.fq_name)

            # in case of failures, exit the job manager process with failure
            if self.result_handler.job_result_status == JobStatus.FAILURE:
                sys.exit(self.result_handler.job_summary_message)
        except JobException as e:
            self._logger.error("Job Exception recieved: %s" % e.msg)
            self._logger.error("%s" % traceback.print_stack())
            self.result_handler.update_job_status(JobStatus.FAILURE, e.msg)
            self.result_handler.create_job_summary_log(job_template.fq_name)
            sys.exit(e.msg)
        except Exception as e:
            self._logger.error("Error while executing job %s " % repr(e))
            self._logger.error("%s" % traceback.print_stack())
            self.result_handler.update_job_status(JobStatus.FAILURE, e.message)
            self.result_handler.create_job_summary_log(job_template.fq_name)
            sys.exit(e.message)
        finally:
            # need to wait for the last job log and uve update to complete
            # via sandesh
            time.sleep(5)
Exemplo n.º 7
0
def redo_regular_job(job_id):
    """
    запускает выполнение задачи с заданным job_id
    :param job_id:
    :return:
    """
    with JobHandler(job_id, conf_dict) as job:
        job.run_job()
Exemplo n.º 8
0
class TestJobHandler(MockServerMixin):

    job_handler = JobHandler('96e2154893e4610', 1, 1)
    urls = [
        'https://www.goodurl.com/image.png',
        'https://www.goodurl.com',
        ]

    def test_submit(self):
        new_job = self.job_handler.submit(self.urls)
        assert self.job_handler.jobs.get(new_job['id'], None)

    def test_process_concurrently(self):
        self.job_handler.reset()
        new_job = self.job_handler._make_job(self.urls)
        job = self.job_handler._process_concurrently(new_job)
        # After processing, jbo fields should indicate a completion.
        assert job['id'] == new_job['id']
        assert job['created'] == new_job['created']
        assert job['finished'] is not None
        assert job['status'] == 'complete'
        # _process_concurrently manipulates the job in place.
        assert job == new_job

    def test_handle_futures(self):
        self.job_handler.reset()
        new_job = self.job_handler._make_job(self.urls)
        func = self.job_handler._process_image_url
        with futures.ThreadPoolExecutor(3) as executor:
            to_do = conc.submit_to_executor(executor, func, self.urls)
        # After completion, there should be no more pending urls
        # The bad url should fail and the image url should succeed.
        job = self.job_handler._handle_futures(new_job, to_do)
        assert job['uploaded']['pending'] == []
        assert job['uploaded']['complete'] == ['https://i.imgur.com/0yco6KM.gif']
        assert job['uploaded']['fail'] == ['https://www.goodurl.com']
        assert self.job_handler.images == ['https://i.imgur.com/0yco6KM.gif']
        # _handle_futures manipulates the job in place.
        assert job == new_job

    def test_process_image_url(self):
        # A good url should result in a imgur link
        link = self.job_handler._process_image_url(self.urls[0])
        assert link == 'https://i.imgur.com/0yco6KM.gif'
        # A bad url should result in an exception
        with pytest.raises(IOError):
            link = self.job_handler._process_image_url(self.urls[1])
    
    def test_upload_to_imgur(self):
        valid_base64 = 'dGhpcyBpcyBiYXNlIDY0'
        invalid_base64 = '$%^&'
        # Given a valid base64 string, the request should be made.
        resp = self.job_handler._upload_to_imgur(valid_base64)
        assert resp.status_code == 200
        # Otherwise raise an exception before making the request to imgur.
        with pytest.raises(ValueError):
            resp = self.job_handler._upload_to_imgur(invalid_base64)
Exemplo n.º 9
0
 def device_send(self, job_template, job_input, is_delete, retry):
     config_str = json.dumps(job_input, sort_keys=True)
     self.push_config_state = PushConfigState.PUSH_STATE_IN_PROGRESS
     start_time = None
     config_size = 0
     try:
         config_size = len(config_str)
         current_config_hash = md5(config_str).hexdigest()
         if self.last_config_hash is None or\
                 current_config_hash != self.last_config_hash:
             self._logger.info("config push for %s(%s) using job template %s" %
                       (self.physical_router.name, self.physical_router.uuid,
                        str(job_template)))
             self._logger.debug("playbook send message: %s" %
                                json.dumps(job_input, indent=4,
                                           sort_keys=True))
             device_manager = DeviceManager.get_instance()
             job_handler = JobHandler(job_template, job_input,
                                      None if is_delete else
                                      [self.physical_router.uuid],
                                      device_manager.get_analytics_config(),
                                      device_manager.get_vnc(), self._logger)
             self.commit_stats['total_commits_sent_since_up'] += 1
             start_time = time.time()
             job_handler.push()
             end_time = time.time()
             self.commit_stats['commit_status_message'] = 'success'
             self.commit_stats['last_commit_time'] = \
                     datetime.datetime.fromtimestamp(
                     end_time).strftime('%Y-%m-%d %H:%M:%S')
             self.commit_stats['last_commit_duration'] = str(
                     end_time - start_time)
             self.last_config_hash = current_config_hash
         else:
             self._logger.debug("not pushing since no config change"
                                " detected")
         self.push_config_state = PushConfigState.PUSH_STATE_SUCCESS
     except Exception as e:
         self._logger.error("Router %s: %s" %
                            (self.physical_router.management_ip, e.message))
         self.commit_stats[
                 'commit_status_message'] = 'failed to apply config,\
                                             router response: ' + e.message
         if start_time is not None:
             self.commit_stats['last_commit_time'] = \
                     datetime.datetime.fromtimestamp(
                         start_time).strftime('%Y-%m-%d %H:%M:%S')
             self.commit_stats['last_commit_duration'] = str(
                     time.time() - start_time)
         self.push_config_state = PushConfigState.PUSH_STATE_RETRY if retry\
             else PushConfigState.PUSH_STATE_FAILED
     return config_size
Exemplo n.º 10
0
def main(client_id, num_threads, conc_reqs):
    app = Flask(__name__)
    api = Api(app)

    # This collects the defined resources automatically as new ones are defined.
    resources = ((obj, obj.URLs) for _, obj in inspect.getmembers(
        sys.modules[__name__], inspect.isclass) if obj.__module__ is __name__)

    job_handler = JobHandler(client_id, num_threads, conc_reqs)
    for resource, urls in resources:
        api.add_resource(resource, *urls, resource_class_args=(job_handler, ))

    app.run(debug=False, host='0.0.0.0')
Exemplo n.º 11
0
def main():
    global job_generator, connection
    config = parse_config()
    connection = connect_db(config)
    event_handler = ResultHandler(connection)
    job_generator = JobHandler(connection, event_handler)

    try:
        eventlet = gevent.spawn(event_handler.result_queue_handler)
        joblet = gevent.spawn(job_generator.job_updater)
        gevent.joinall([joblet, eventlet])
    except gevent.GreenletExit as err:
        logger.warning(err)
    except Exception as err:
        logger.warning(err)
Exemplo n.º 12
0
    def setUp(self):
        super(TestJobHandler, self).setUp()
        self.post_patch = mock.patch(
            'opserver_util.OpServerUtils.post_url_http')
        self.sleep_patch = mock.patch('gevent.sleep')
        self.post_mock = self.post_patch.start()
        self.sleep_mock = self.sleep_patch.start()
        self.vnc_api = mock.Mock()
        self.logger = mock.Mock()

        self.job_type = ['test-job']
        self.job_input = {'key1': 'value1', 'key2': 'value2'}
        self.device_list = ['device1']
        self.analytic_config = {'ip': '1.2.3.4', 'port': '8082'}

        self.job_handler = JobHandler(self.job_type, self.job_input,
                                      self.device_list, self.analytic_config,
                                      self.vnc_api, self.logger)
 def device_send(self, job_template, job_input, retry):
     config_str = json.dumps(job_input)
     self.push_config_state = PushConfigState.PUSH_STATE_INIT
     start_time = None
     config_size = 0
     try:
         self._logger.debug("playbook send message: %s" % config_str)
         config_size = len(config_str)
         device_manager = DeviceManager.get_instance()
         job_handler = JobHandler(job_template, job_input,
                                  [self.physical_router.uuid],
                                  device_manager.get_analytics_config(),
                                  device_manager.get_vnc(), self._logger)
         self.commit_stats['total_commits_sent_since_up'] += 1
         start_time = time.time()
         job_handler.push()
         end_time = time.time()
         self.commit_stats['commit_status_message'] = 'success'
         self.commit_stats['last_commit_time'] = \
                 datetime.datetime.fromtimestamp(
                 end_time).strftime('%Y-%m-%d %H:%M:%S')
         self.commit_stats['last_commit_duration'] = str(
                 end_time - start_time)
         self.push_config_state = PushConfigState.PUSH_STATE_SUCCESS
     except Exception as e:
         self._logger.error("Router %s: %s" %
                            (self.physical_router.management_ip, e.message))
         self.commit_stats[
                 'commit_status_message'] = 'failed to apply config,\
                                             router response: ' + e.message
         if start_time is not None:
             self.commit_stats['last_commit_time'] = \
                     datetime.datetime.fromtimestamp(
                         start_time).strftime('%Y-%m-%d %H:%M:%S')
             self.commit_stats['last_commit_duration'] = str(
                     time.time() - start_time)
         self.push_config_state = PushConfigState.PUSH_STATE_RETRY if retry\
             else PushConfigState.PUSH_STATE_FAILED
     return config_size
Exemplo n.º 14
0
    def start_job(self):
        try:
            # create job UVE and log
            msg = "Starting execution for job with template id %s" \
                  " and execution id %s"
            self._logger.debug(msg)
            timestamp = strftime("%Y-%m-%d %H:%M:%S", gmtime())
            self.job_utils.send_job_execution_uve(timestamp, 0)
            self.job_utils.send_job_log(msg, JobStatus.STARTING,
                                        timestamp=timestamp)
            # read the job template object
            job_template = self.job_utils.read_job_template()

            # spawn job greenlets
            job_handler = JobHandler(self._logger, self._vnc_api,
                                     job_template, self.job_execution_id,
                                     self.job_data, self.job_params,
                                     self.job_utils)
            result_handler = JobResultHandler(job_template,
                                              self.job_execution_id,
                                              self._logger, self.job_utils)
            if job_template.get_job_template_multi_device_job():
                self.handle_multi_device_job(job_handler, result_handler)
            else:
                self.handle_single_job(job_handler, result_handler)

            # update job uve
            timestamp = strftime("%Y-%m-%d %H:%M:%S", gmtime())
            self.job_utils.send_job_execution_uve(timestamp, 100)
            result_handler.create_job_summary_log(timestamp)
        except JobException as e:
            self.mark_job_failure(e.message, self.job_execution_id)
        except Exception as e:
            self._logger.error("Error while executing job %s " % repr(e))
            self._logger.error(traceback.print_stack())
            self.mark_job_failure(e.message)
Exemplo n.º 15
0
 def __init__(self, mf_params, rf_params, partition, n_nodes, n_cores, node_features, memory, time):
     super().__init__(**mf_params)
     self.params = rf_params
     self.n_nodes = n_nodes
     self.node_features = node_features
     self.handler = JobHandler(partition, n_cores, memory, time)
Exemplo n.º 16
0
    - bench_positions: categorizes bench players by category, then takes
    this number from each position to add to the final roster
    - path: filename of output file
    - output_format: takes arguments csv, pkl, or pickle. outputs dataframe
    in corresponding format
    '''

    # initial processing parameters
<<<<<<< HEAD
    base_params = {'min_year': 2000, 'max_year': 2014,
                   'history_steps': 7, 'min_player_games': 2,
                   'num_players': 9, 'path': 'output_test_ke.csv',
                   'bench_positions': {'Guard': 1, 'Wing': 1, 'Big': 1}
=======
    base_params = {'min_year': 2000, 'max_year': 2013,
                   'history_steps': 7, 'min_player_games': 2,
                   'num_players': 9, 'path': 'output_14yrs.pkl',
                   'bench_positions': {'Guard': 1, 'Wing': 1, 'Big': 1},
                   'output_format': 'pickle'
>>>>>>> origin/master
                   }

    # perform additional jobs with these parameters changed
    additional_search_params = []

    # create a list of jobs with specified parameters
    jobhandler = JobHandler(base_params, additional_search_params)

    # perform all processing jobs
    jobhandler.process_jobs()