def delete_all_installations_for_host(hostname, status=None): if not can_delete_install(current_user): abort(401) db_session = DBSession() host = get_host(db_session, hostname) if host is None: abort(404) try: install_jobs = db_session.query(InstallJob).filter( InstallJob.host_id == host.id, InstallJob.status == status).all() if not install_jobs: return jsonify(status="No record fits the delete criteria.") for install_job in install_jobs: db_session.delete(install_job) if status == JobStatus.FAILED: delete_install_job_dependencies(db_session, install_job.id) db_session.commit() return jsonify({'status': 'OK'}) except: logger.exception('delete_install_job() hit exception') return jsonify({'status': 'Failed: check system logs for details'})
def refresh_all(cls): """ Retrieves all the catalog data and SMU XML file data and updates the database. """ db_session = DBSession() catalog = SMUInfoLoader.get_catalog_from_cco() if len(catalog) > 0: system_option = SystemOption.get(db_session) try: # Remove all rows first db_session.query(CCOCatalog).delete() for platform in catalog: releases = catalog[platform] for release in releases: cco_catalog = CCOCatalog(platform=platform, release=release) db_session.add(cco_catalog) # Now, retrieve from SMU XML file SMUInfoLoader(platform, release, refresh=True) system_option.cco_lookup_time = datetime.datetime.utcnow() db_session.commit() return True except Exception: logger.exception('refresh_all hit exception') db_session.rollback() return False
def create_reservation(self, username, data, container_name=None): # TODO: """ ImmutableMultiDict([('images', 'mysql'), ('memoryRange', '1024'), ('cpuRange', '1'), ('start_date', ''), ('end_date', ''), ('time', 'now')]) container_name, nat_port, management_ip, docker_port, ram, cpu=None, start_time=None, end_time, container_username, container_password) :return: 0 on success, -1 on failure """ user = self.database.get_user_by_name(username) image = self.database.get_image_by_name(data['images']) ram = data.get('memoryRange', DEFAULT_RAM) cpu = data.get('cpuRange', DEFAULT_CPU) #pdb.set_trace() node = self.get_compute_node() if node is None: return -1 try: status, docker_port, con_name, con_id = self.run_container(image.image_name, node.ip_addr, ram, cpu, container_name) except Exception as e: logger.exception("Failed to run container of image={0} on node {1}".format(image.image_name, node.ip_addr)) return -2 if status is True: nat_port, management_ip = self.database.add_ssh_port_forward_rule(node.ip_addr, docker_port) status = self.database.add_container(container_id=con_id, container_name=con_name, nat_port=nat_port, management_ip=management_ip, docker_port=docker_port, ram=ram, cpu=cpu, username=DEFAULT_CONTAINER_USENAME, password=DEFAULT_CONTAINER_PASSWORD, node=node, image=image, user=user) if status is True: return 0 return -1
def sendmail(server, server_port, sender, recipient, message, use_authentication, username, password, secure_connection): try: msg = MIMEMultipart('alternative') msg['Subject'] = 'Notification from CSM Server' msg['From'] = sender msg['To'] = recipient part = MIMEText(message, 'html') msg.attach(part) if use_authentication: if secure_connection == SMTPSecureConnection.SSL: s = smtplib.SMTP_SSL(server, int(server_port)) elif secure_connection == SMTPSecureConnection.TLS: s = smtplib.SMTP(server, int(server_port)) s.starttls() s.login(username, password) else: if server_port is None or len(server_port) == 0: s = smtplib.SMTP(server) else: s = smtplib.SMTP(server, int(server_port)) s.sendmail(sender, recipient.split(","), msg.as_string()) s.close() except: logger.exception('sendmail hit exception')
def get_file_and_directory_dict(self, sub_directory=None): result_list = [] is_reachable = False if not SFTP_SUPPORTED: return result_list, is_reachable try: with pysftp.Connection(self.server.server_url, username=self.server.username, password=self.server.password) as sftp: remote_directory = concatenate_dirs(self.server.server_directory, sub_directory) if len(remote_directory) > 0: sftp.chdir(remote_directory) file_info_list = sftp.listdir() for file_info in file_info_list: file = {} lstatout = str(sftp.lstat(file_info)).split()[0] if 'd' in lstatout: if sub_directory is None or len(sub_directory) == 0: file['filename'] = file_info else: file['filename'] = sub_directory + '/' + file_info file['is_directory'] = True else: file['filename'] = file_info file['is_directory'] = False result_list.append(file) is_reachable = True except Exception as e: logger.exception('SFTPServer hit exception - ' + e.message) return result_list, is_reachable
def run(self): db_session = DBSession() try: system_option = SystemOption.get(db_session) inventory_hour = system_option.inventory_hour db_session.close() # Build a scheduler object that will look at absolute times scheduler = sched.scheduler(time.time, time.sleep) current_hour = datetime.datetime.now().hour # Put task for today at the designated hour. daily_time = datetime.time(inventory_hour) # If the scheduled time already passed, schedule it for tomorrow if current_hour > inventory_hour: first_time = datetime.datetime.combine(datetime.datetime.now() + datetime.timedelta(days=1), daily_time) else: first_time = datetime.datetime.combine(datetime.datetime.now(), daily_time) scheduler.enterabs(time.mktime(first_time.timetuple()), 1, self.scheduling, (scheduler, daily_time,)) scheduler.run() except: logger.exception('InventoryManagerScheduler hit exception') db_session.close()
def purge_install_job_history(self, db_session, entry_per_host): # Scanning the InstallJobHistory table for records that should be deleted. try: skip_count = 0 host_id = -1 install_jobs = db_session.query(InstallJobHistory) \ .order_by(InstallJobHistory.host_id, InstallJobHistory.created_time.desc()) for install_job in install_jobs: if install_job.host_id != host_id: host_id = install_job.host_id skip_count = 0 if skip_count >= entry_per_host: # Delete the session log directory try: if install_job.session_log is not None: shutil.rmtree(get_log_directory() + install_job.session_log) except: logger.exception( 'purge_install_job_history() hit exception - install job = %s', install_job.id) db_session.delete(install_job) skip_count += 1 db_session.commit() except: db_session.rollback() logger.exception('purge_install_job_history() hit exception')
def scheduling(self, scheduler, daily_time): # First, re-set up the scheduler for the next day the same time. It is important to have # this logic on the top so that if any error encountered below, the scheduling still works. t = datetime.datetime.combine(datetime.datetime.now() + datetime.timedelta(days=1), daily_time) scheduler.enterabs(time.mktime(t.timetuple()), 1, self.scheduling, (scheduler, daily_time,)) db_session = DBSession() try: system_option = SystemOption.get(db_session) # If software inventory is enabled, submit the inventory jobs if system_option.enable_inventory: inventory_jobs = db_session.query(InventoryJob).all() if len(inventory_jobs)> 0: for inventory_job in inventory_jobs: inventory_job.pending_submit = True db_session.commit() #Check if there is any housekeeping work to do self.perform_housekeeping_tasks(db_session, system_option) except: logger.exception('InventoryManagerScheduler hit exception') finally: db_session.close()
def scheduling(self, scheduler, daily_time): # First, re-set up the scheduler for the next day the same time. It is important to have # this logic on the top so that if any error encountered below, the scheduling still works. t = datetime.datetime.combine( datetime.datetime.now() + datetime.timedelta(days=1), daily_time) scheduler.enterabs(time.mktime(t.timetuple()), 1, self.scheduling, ( scheduler, daily_time, )) db_session = DBSession() try: system_option = SystemOption.get(db_session) # If software inventory is enabled, submit the inventory jobs if system_option.enable_inventory: inventory_jobs = db_session.query(InventoryJob).all() if len(inventory_jobs) > 0: for inventory_job in inventory_jobs: inventory_job.pending_submit = True db_session.commit() #Check if there is any housekeeping work to do self.perform_housekeeping_tasks(db_session, system_option) except: logger.exception('InventoryManagerScheduler hit exception') finally: db_session.close()
def get_file_and_directory_dict(self, sub_directory=None): result_list = [] is_reachable = False try: ftp = ftplib.FTP(self.server.server_url, user=self.server.username, passwd=self.server.password) remote_directory = concatenate_dirs(self.server.server_directory, sub_directory) if len(remote_directory) > 0: ftp.cwd(remote_directory) dirs, nondirs = self.listdir(ftp) for file_tuple in nondirs: result_list.append({'filename': file_tuple[0], 'is_directory': False}) for file_tuple in dirs: if sub_directory is None or len(sub_directory) == 0: result_list.append({'filename': file_tuple[0], 'is_directory': True}) else: result_list.append({'filename': os.path.join(sub_directory, file_tuple[0]), 'is_directory': True}) is_reachable = True except Exception as e: logger.exception('FTPServer hit exception - ' + e.message) return result_list, is_reachable
def get_file_and_directory_dict(self, sub_directory=None): result_list = [] is_reachable = True try: sftp_module = import_module('pysftp') if sftp_module is not None: with sftp_module.Connection(self.server.server_url, username=self.server.username, password=self.server.password) as sftp: remote_directory = concatenate_dirs(self.server.server_directory, sub_directory) if len(remote_directory) > 0: sftp.chdir(remote_directory) file_info_list = sftp.listdir() for file_info in file_info_list: file = {} lstatout = str(sftp.lstat(file_info)).split()[0] if 'd' in lstatout: if sub_directory is None or len(sub_directory) == 0: file['filename'] = file_info else: file['filename'] = sub_directory + '/' + file_info file['is_directory'] = True else: file['filename'] = file_info file['is_directory'] = False result_list.append(file) except: logger.exception('SFTPServer hit exception') is_reachable = False return result_list, is_reachable
def run(self): db_session = DBSession() try: system_option = SystemOption.get(db_session) inventory_hour = system_option.inventory_hour db_session.close() # Build a scheduler object that will look at absolute times scheduler = sched.scheduler(time.time, time.sleep) current_hour = datetime.datetime.now().hour # Put task for today at the designated hour. daily_time = datetime.time(inventory_hour) # If the scheduled time already passed, schedule it for tomorrow if current_hour > inventory_hour: first_time = datetime.datetime.combine( datetime.datetime.now() + datetime.timedelta(days=1), daily_time) else: first_time = datetime.datetime.combine(datetime.datetime.now(), daily_time) scheduler.enterabs(time.mktime(first_time.timetuple()), 1, self.scheduling, ( scheduler, daily_time, )) scheduler.run() except: logger.exception('InventoryManagerScheduler hit exception') db_session.close()
def region_create(): if not can_create(current_user): abort(401) form = RegionForm(request.form) if request.method == 'POST' and form.validate(): db_session = DBSession() region = get_region(db_session, form.region_name.data) if region is not None: return render_template('region/edit.html', form=form, duplicate_error=True) # Compose a list of server hostnames server_names = [ get_server_by_id(db_session, id).hostname for id in request.form.getlist('selected-servers') ] try: create_or_update_region(db_session=db_session, region_name=form.region_name.data, server_repositories=",".join(server_names), created_by=current_user.username) except Exception as e: db_session.rollback() logger.exception("region_create() encountered an exception: " + e.message) return redirect(url_for('home')) return render_template('region/edit.html', form=form)
def refresh_all(cls): """ Retrieves all the catalog data and SMU XML file data and updates the database. """ db_session = DBSession() catalog = SMUInfoLoader.get_catalog_from_cco() if len(catalog) > 0: system_option = SystemOption.get(db_session) try: # Remove all rows first db_session.query(CCOCatalog).delete() for platform in catalog: releases = catalog[platform] for release in releases: cco_catalog = CCOCatalog(platform=platform,release=release) db_session.add(cco_catalog) SMUInfoLoader(platform, release) system_option.cco_lookup_time = datetime.datetime.utcnow() db_session.commit() return True except Exception: logger.exception('refresh_all() hit exception') db_session.rollback() return False
def get_smu_info_from_cco(self, platform, release): save_to_db = True db_session = DBSession() platform_release = platform + '_' + release try: self.smu_meta = SMUMeta(platform_release=platform_release) # Load data from the SMU XML file self.load() # This can happen if the given platform and release is not valid. # The load() method calls get_smu_info_from_db and failed. if not self.is_valid: logger.error('get_smu_info_from_cco() hit exception, platform_release=' + platform_release) return db_smu_meta = db_session.query(SMUMeta).filter(SMUMeta.platform_release == platform_release).first() if db_smu_meta: if db_smu_meta.created_time == self.smu_meta.created_time: save_to_db = False else: # Delete the existing smu_meta and smu_info for this platform and release db_session.delete(db_smu_meta) db_session.commit() if save_to_db: db_session.add(self.smu_meta) else: db_smu_meta.retrieval_time = datetime.datetime.utcnow() db_session.commit() except Exception: logger.exception('get_smu_info_from_cco() hit exception, platform_release=' + platform_release)
def api_create_install_jobs(): db_session = DBSession() hostname = request.form['hostname'] install_action = request.form.getlist('install_action[]') scheduled_time = request.form['scheduled_time_UTC'] software_packages = request.form['software_packages'].split() server_id = request.form['server'] server_directory = request.form['server_directory'] pending_downloads = request.form['pending_downloads'].split() custom_command_profile_ids = [str(i) for i in request.form['custom_command_profile'].split()] host = get_host(db_session, hostname) try: # The dependency on each install action is already indicated in the implicit ordering in the selector. # If the user selected Pre-Upgrade and Install Add, Install Add (successor) will # have Pre-Upgrade (predecessor) as the dependency. dependency = 0 for one_install_action in install_action: new_install_job = create_or_update_install_job(db_session=db_session, host_id=host.id, install_action=one_install_action, scheduled_time=scheduled_time, software_packages=software_packages, server_id=server_id, server_directory=server_directory, pending_downloads=pending_downloads, custom_command_profile_ids=custom_command_profile_ids, dependency=dependency) dependency = new_install_job.id return jsonify({'status': 'OK'}) except Exception as e: logger.exception('api_create_install_job hit exception') return jsonify({'status': 'Failed Reason: ' + e.message})
def purge_install_job_history(self, db_session, entry_per_host): # Scanning the InstallJobHistory table for records that should be deleted. try: skip_count = 0 host_id = -1 install_jobs = db_session.query(InstallJobHistory) \ .order_by(InstallJobHistory.host_id, InstallJobHistory.created_time.desc()) for install_job in install_jobs: if install_job.host_id != host_id: host_id = install_job.host_id skip_count = 0 if skip_count >= entry_per_host: # Delete the session log directory try: if install_job.session_log is not None: shutil.rmtree(get_log_directory() + install_job.session_log) except: logger.exception('purge_install_job_history() hit exception - install job = %s', install_job.id) db_session.delete(install_job) skip_count += 1 db_session.commit() except: db_session.rollback() logger.exception('purge_install_job_history() hit exception')
def handle_create_tar_jobs(self, db_session): try: create_tar_jobs = db_session.query(CreateTarJob).filter().all() if create_tar_jobs: for create_tar_job in create_tar_jobs: if create_tar_job.status != JobStatus.COMPLETED and create_tar_job.status != JobStatus.FAILED: self.submit_job(CreateTarWorkUnit(create_tar_job.id)) except Exception: logger.exception('Unable to dispatch create tar job')
def handle_email_jobs(self, db_session): try: # Submit email notification jobs if any email_jobs = db_session.query(EmailJob).filter(EmailJob.status == None).all() if email_jobs: for email_job in email_jobs: self.submit_job(EmailWorkUnit(email_job.id)) except Exception: logger.exception('Unable to dispatch email job')
def purge_old_conformance_reports(db_session): conformance_reports = get_conformance_report_by_user(db_session, current_user.username) if len(conformance_reports) > 10: try: # delete the earliest conformance report. db_session.delete(conformance_reports[-1]) db_session.commit() except Exception: logger.exception('purge_old_conformance_reports hit exception')
def fill_custom_command_profiles(db_session, choices): del choices[:] try: profiles = get_custom_command_profile_list(db_session) for profile in profiles: choices.append((profile.id, profile.profile_name)) except: logger.exception('fill_custom_command_profiles() hit exception')
def fill_default_region(choices, region): # Remove all the existing entries del choices[:] try: if region is not None: choices.append((region.id, region.name)) except: logger.exception('fill_default_region() hits exception')
def is_connection_valid(hostname, urls): ctx = TestConnectionContext(hostname, urls) try: get_connection_handler().execute(ctx) except: logger.exception('is_connection_valid hit exception') return ctx.success
def execute(self, ctx): try: self.start(ctx) self.post_processing(ctx) except Exception: # If there is no db_session, it is not important to log the exception if isinstance(ctx, ConnectionContext): logger = get_db_session_logger(ctx.db_session) logger.exception('BaseHandler.execute() hit exception - hostname = %s', ctx.hostname)
def process(self): db_session = DBSession() host_id = None inventory_job = None ctx = None try: inventory_job = db_session.query(InventoryJob).filter(InventoryJob.id == self.job_id).first() if inventory_job is None: logger.error('Unable to retrieve inventory job: %s' % self.job_id) return host_id = inventory_job.host_id host = db_session.query(Host).filter(Host.id == host_id).first() if host is None: logger.error('Unable to retrieve host: %s' % host_id) handler_class = get_inventory_handler_class(host.platform) if handler_class is None: logger.error('Unable to get handler for %s, inventory job %s', host.platform, self.job_id) inventory_job.set_status(JobStatus.PROCESSING) inventory_job.session_log = create_log_directory(host.connection_param[0].host_or_ip, inventory_job.id) db_session.commit() # Delegates the get_software logic to the handler ctx = InventoryContext(host, db_session, inventory_job) handler = handler_class() handler.execute(ctx) if ctx.success: archive_inventory_job(db_session, inventory_job, JobStatus.COMPLETED) else: # removes the host object as host.packages may have been modified. db_session.expunge(host) archive_inventory_job(db_session, inventory_job, JobStatus.FAILED) # Reset the pending retrieval flag inventory_job.pending_submit = False db_session.commit() except: try: logger.exception('InventoryManager hit exception - inventory job = %s', self.job_id) archive_inventory_job(db_session, inventory_job, JobStatus.FAILED, trace=sys.exc_info) # Reset the pending retrieval flag inventory_job.pending_submit = False db_session.commit() except: logger.exception('InventoryManager hit exception - inventory job = %s', self.job_id) finally: with lock: if self.job_id is not None and self.job_id in in_progress_jobs: del in_progress_jobs[self.job_id] db_session.close()
def handle_email_jobs(self, db_session): try: # Submit email notification jobs if any email_jobs = db_session.query(EmailJob).filter( EmailJob.status == None).all() if email_jobs: for email_job in email_jobs: self.submit_job(EmailWorkUnit(email_job.id)) except Exception: logger.exception('Unable to dispatch email job')
def handle_convert_config_jobs(self, db_session): try: convert_config_jobs = db_session.query(ConvertConfigJob).filter().all() if convert_config_jobs: for convert_config_job in convert_config_jobs: if convert_config_job.status != JobStatus.COMPLETED and \ convert_config_job.status != JobStatus.FAILED: self.submit_job(ConvertConfigWorkUnit(convert_config_job.id)) except Exception: logger.exception('Unable to dispatch convert config job')
def purge_old_conformance_reports(db_session): conformance_reports = get_conformance_report_by_user( db_session, current_user.username) if len(conformance_reports) > 10: try: # delete the earliest conformance report. db_session.delete(conformance_reports[-1]) db_session.commit() except Exception: logger.exception('purge_old_conformance_reports hit exception')
def upload_file(self, source_file_path, dest_filename, sub_directory=None, callback=None): try: ssh = SSHClient() ssh.load_system_host_keys() ssh.connect(self.server.server_url, username=self.server.username, password=self.server.password) with SCPClient(ssh.get_transport(), socket_timeout=15.0) as scp: scp.put(source_file_path, os.path.join(self.server.server_directory, dest_filename)) except Exception as e: logger.exception('SCPServer hit exception - %s' % e.message)
def purge_tar_job(self, db_session): # Deleting old CreateTarJobs try: create_tar_jobs = db_session.query(CreateTarJob).all() for create_tar_job in create_tar_jobs: if create_tar_job.status == JobStatus.COMPLETED or create_tar_job.status == JobStatus.FAILED: db_session.delete(create_tar_job) db_session.commit() except: db_session.rollback() logger.exception('purge_tar_job() hit exception')
def purge_config_job(self, db_session): # Deleting old ConvertConfigJobs try: convert_config_jobs = db_session.query(ConvertConfigJob).all() for convert_config_job in convert_config_jobs: if convert_config_job.status == JobStatus.COMPLETED or convert_config_job.status == JobStatus.FAILED: db_session.delete(convert_config_job) db_session.commit() except: db_session.rollback() logger.exception('purge_config_job() hit exception')
def fill_software_profiles(db_session, choices): # Remove all the existing entries del choices[:] choices.append((-1, '')) try: software_profiles = get_software_profile_list(db_session) if software_profiles is not None: for software_profile in software_profiles: choices.append((software_profile.id, software_profile.name)) except: logger.exception('fill_software_profiles() hit exception')
def fill_regions(db_session, choices): # Remove all the existing entries del choices[:] choices.append((-1, '')) try: regions = get_region_list(db_session) if regions is not None: for region in regions: choices.append((region.id, region.name)) except: logger.exception('fill_regions() hit exception')
def software_profile_delete(software_profile_name): if not can_delete(current_user): abort(401) db_session = DBSession() try: delete_software_profile(db_session, software_profile_name) return jsonify({'status': 'OK'}) except Exception as e: logger.exception('software_profile_delete hit exception.') return jsonify({'status': e.message})
def is_connection_valid(hostname, urls): ctx = TestConnectionContext(hostname, urls) try: handler_class = get_connection_handler_class(ctx) if handler_class is None: logger.error('Unable to get connection handler') handler_class().execute(ctx) except Exception as e: logger.exception('is_connection_valid() hit exception') raise return ctx.success
def custom_command_profile_delete(profile_name): if not can_delete(current_user): abort(401) db_session = DBSession() try: delete_custom_command_profile(db_session, profile_name) return jsonify({'status': 'OK'}) except Exception as e: logger.exception('custom_command_profile_delete hit exception.') return jsonify({'status': e.message})
def fill_jump_hosts(db_session, choices): # Remove all the existing entries del choices[:] choices.append((-1, '')) try: hosts = get_jump_host_list(db_session) if hosts is not None: for host in hosts: choices.append((host.id, host.hostname)) except: logger.exception('fill_jump_hosts() hit exception')
def dispatch(self): db_session = DBSession() try: inventory_jobs = db_session.query(InventoryJob).filter(InventoryJob.pending_submit == True).all() if len(inventory_jobs)> 0: for inventory_job in inventory_jobs: self.submit_job(inventory_job.id) except: logger.exception('Unable to dispatch inventory job') finally: db_session.close()
def process(self): db_session = DBSession() ctx = None try: install_job = db_session.query(InstallJob).filter(InstallJob.id == self.job_id).first() if install_job is None: # This is normal because of race condition. It means the job is already deleted (completed). return if not can_install(db_session): # This will halt this host that has already been queued return host = db_session.query(Host).filter(Host.id == self.host_id).first() if host is None: logger.error('Unable to retrieve host %s', self.host_id) return handler_class = get_install_handler_class(host.platform) if handler_class is None: logger.error('Unable to get handler for %s, install job %s', host.platform, self.job_id) install_job.start_time = datetime.datetime.utcnow() install_job.set_status(JobStatus.PROCESSING) install_job.session_log = create_log_directory(host.connection_param[0].host_or_ip, install_job.id) db_session.commit() ctx = InstallContext(host, db_session, install_job) ctx.operation_id = get_last_operation_id(db_session, install_job) handler = handler_class() handler.execute(ctx) if ctx.success: # Update the software self.get_software(db_session, ctx) archive_install_job(db_session, ctx, install_job, JobStatus.COMPLETED) else: archive_install_job(db_session, ctx, install_job, JobStatus.FAILED) db_session.commit() except: try: logger.exception('InstallManager hit exception - install job = %s', self.job_id) archive_install_job(db_session, ctx, install_job, JobStatus.FAILED, trace=traceback.format_exc()) db_session.commit() except: logger.exception('InstallManager hit exception - install job = %s', self.job_id) finally: # Must remove the host from the in progress list remove_host_from_in_progress(self.host_id) db_session.close()
def handle_convert_config_jobs(self, db_session): try: convert_config_jobs = db_session.query( ConvertConfigJob).filter().all() if convert_config_jobs: for convert_config_job in convert_config_jobs: if convert_config_job.status != JobStatus.COMPLETED and \ convert_config_job.status != JobStatus.FAILED: self.submit_job( ConvertConfigWorkUnit(convert_config_job.id)) except Exception: logger.exception('Unable to dispatch convert config job')
def host_delete(hostname): if not can_delete(current_user): abort(401) db_session = DBSession() try: delete_host(db_session, hostname) except: logger.exception('delete_host hit exception') abort(404) return jsonify({'status': 'OK'})
def dispatch(self): db_session = DBSession() try: inventory_jobs = db_session.query(InventoryJob).filter(InventoryJob.request_update == True).all() if len(inventory_jobs) > 0: for inventory_job in inventory_jobs: self.submit_job(InventoryWorkUnit(inventory_job.host_id, inventory_job.id)) except Exception: logger.exception('Unable to dispatch inventory job') finally: db_session.close()
def dispatch(self): db_session = DBSession() try: inventory_jobs = db_session.query(InventoryJob).filter( InventoryJob.pending_submit == True).all() if len(inventory_jobs) > 0: for inventory_job in inventory_jobs: self.submit_job(inventory_job.id) except: logger.exception('Unable to dispatch inventory job') finally: db_session.close()