def get_smu_info_from_cco(self, platform, release): save_to_db = True db_session = DBSession() platform_release = platform + '_' + release try: self.smu_meta = SMUMeta(platform_release=platform_release) # Load data from the SMU XML file self.load() # This can happen if the given platform and release is not valid. # The load() method calls get_smu_info_from_db and failed. if not self.is_valid: logger.error('get_smu_info_from_cco() hit exception, platform_release=' + platform_release) return db_smu_meta = db_session.query(SMUMeta).filter(SMUMeta.platform_release == platform_release).first() if db_smu_meta: if db_smu_meta.created_time == self.smu_meta.created_time: save_to_db = False else: # Delete the existing smu_meta and smu_info for this platform and release db_session.delete(db_smu_meta) db_session.commit() if save_to_db: db_session.add(self.smu_meta) else: db_smu_meta.retrieval_time = datetime.datetime.utcnow() db_session.commit() except Exception: logger.exception('get_smu_info_from_cco() hit exception, platform_release=' + platform_release)
def start(self, ctx): pm = CSMPluginManager(ctx) try: pm.dispatch("run") except condoor.GeneralError as e: logger = get_db_session_logger(ctx.db_session) logger.error(str(e)) raise e
def export_inventory_dashboard(): """export the inventory dashboard to cvs, html or excel format.""" db_session = DBSession() export_dashboard_form = ExportInventoryDashboardForm(request.form) export_data = dict() export_data['export_format'] = export_dashboard_form.export_format.data try: export_data['region_id'] = int(export_dashboard_form.hidden_region_id.data) \ if export_dashboard_form.hidden_region_id.data else 0 except ValueError: export_data['region_id'] = 0 if export_data['region_id'] == 0: export_data['region_name'] = "ALL" else: export_data['region_name'] = db_session.query(Region.name).filter( Region.id == export_data['region_id']).first()[0] export_data['chassis_summary_iter'] = get_chassis_summary_query( db_session, export_data['region_id']) export_data[ 'model_name_summary_iter'] = get_model_name_summary_query_results( db_session, export_data['region_id']).__iter__() export_data['inventory_without_serial_number_iter'] = \ get_inventory_without_serial_number_query(db_session, export_data['region_id']) export_data['inventory_with_duplicate_serial_number_iter'] = \ get_inventory_with_duplicate_serial_number_query(db_session, export_data['region_id']) export_data['user'] = current_user writer = None if export_data.get( 'export_format') == ExportInformationFormat.MICROSOFT_EXCEL: writer = ExportInventoryDashboardExcelWriter(**export_data) elif export_data.get('export_format') == ExportInformationFormat.HTML: writer = ExportInventoryDashboardHTMLWriter(**export_data) elif export_data.get('export_format') == ExportInformationFormat.CSV: writer = ExportInventoryDashboardCSVWriter(**export_data) if writer: file_path = writer.write_report() if export_dashboard_form.send_email.data: email_message = "<html><head></head><body>Please find in the attachment the inventory dashboard summary " \ "in region: " + export_data['region_name'] + '</body></html>' create_email_job_with_attachment_files( db_session, email_message, file_path, export_dashboard_form.user_email.data) return send_file(file_path, as_attachment=True) logger.error('inventory: invalid export format "%s" chosen.' % export_data.get('export_format')) return
def download(self, output_file_path, callback=None): access_token = self.get_access_token(self.username, self.password) UDI = "PID: " + self.PID + " VID: V01 SN: FOX1316G5R5" response = self.send_meta_data_request(access_token, UDI) if response is not None: self.debug_print('response', response.text) json_text = response.json() metadata_trans_ID = self.get_json_value(json_text, BSD_METADATA_TRANS_ID) image_GUID = self.get_json_value(json_text, BSD_IMAGE_GUID) image_size = self.get_json_value(json_text, BSD_IMAGE_SIZE) exception_message = self.get_json_value(json_text, BSD_EXCEPTION_MESSAGE) if exception_message is None: if metadata_trans_ID is not None and image_GUID is not None: response = self.send_download_request(access_token, UDI, self.MDF_ID, metadata_trans_ID, image_GUID) if response is not None: self.debug_print('response', response.text) json_text = response.json() download_url = self.get_json_value(json_text, BSD_DOWNLOAD_URL) download_session_ID = self.get_json_value(json_text, BSD_DOWNLOAD_SESSION_ID) # When download_url is null, it may be that the user needs to # acknowledge the EULA or K9 agreement. if download_url is None: eula = self.get_json_value(json_text, BSD_EULA_FORM) k9 = self.get_json_value(json_text, BSD_K9_FORM) if eula is not None: response = self.send_EULA_request(access_token, download_session_ID) self.debug_print('EULA response', response.text) elif k9 is not None: response = self.send_K9_request(access_token, download_session_ID) self.debug_print('K9 response', response.text) response = self.send_download_request(access_token, UDI, self.MDF_ID, metadata_trans_ID, image_GUID) if response is not None: self.debug_print('After accepting EULA or K9', response.text) json_text = response.json() download_url = self.get_json_value(json_text, BSD_DOWNLOAD_URL) download_session_ID = self.get_json_value(json_text, BSD_DOWNLOAD_SESSION_ID) self.debug_print('download_url', download_url) self.debug_print('download_session', download_session_ID) if download_url is not None and download_session_ID is not None: self.send_get_image(access_token, download_url, output_file_path, self.image_name, image_size, callback) else: message = 'User "' + self.username + '" may not have software download privilege on cisco.com.' raise Exception(message) else: logger.error('bsd_service hit exception %s', exception_message) raise Exception(exception_message)
def hello_world(): logger.error( { "account": 123, "logger_name": "appname", "ip": "172.20.19.18" }, extra=extra) return json.dumps({'mes': 'ola'}), 200
def get_software(self, db_session, ctx): handler_class = get_inventory_handler_class(ctx.host.platform) if handler_class is None: logger.error('SoftwareManager: Unable to get handler for %s', ctx.host.platform) handler = handler_class() if handler.get_software(ctx.host, ctx.inactive_cli, ctx.active_cli, ctx.committed_cli): # Update the time stamp ctx.host.inventory_job[0].set_status(JobStatus.COMPLETED)
def get_progress(): db_session = DBSession() job_id = request.args.get('job_id') tar_job = db_session.query(CreateTarJob).filter(CreateTarJob.id == job_id).first() if tar_job is None: logger.error('Unable to retrieve Create Tar Job: %s' % job_id) return jsonify(status='Unable to retrieve job') return jsonify(status='OK',progress= tar_job.status)
def start(self, ctx): conn = condoor.Connection(ctx.hostname, ctx.host_urls, log_dir=ctx.log_directory, log_level=logging.DEBUG) try: conn.connect() ctx.success = True except (condoor.ConnectionError, condoor.GeneralError) as e: logger.error(str(e)) raise e finally: conn.disconnect()
def get_config_conversion_progress(): job_id = request.args.get('job_id', 0, type=int) db_session = DBSession() convert_config_job = db_session.query(ConvertConfigJob).filter(ConvertConfigJob.id == job_id).first() if convert_config_job is None: logger.error('Unable to retrieve Convert Config Job: %s' % job_id) return jsonify(status='Unable to retrieve job') return jsonify(status='OK', progress=convert_config_job.status)
def get_progress(): db_session = DBSession() job_id = request.args.get('job_id') tar_job = db_session.query(CreateTarJob).filter( CreateTarJob.id == job_id).first() if tar_job is None: logger.error('Unable to retrieve Create Tar Job: %s' % job_id) return jsonify(status='Unable to retrieve job') return jsonify(status='OK', progress=tar_job.status)
def export_inventory_dashboard(): """export the inventory dashboard to cvs, html or excel format.""" db_session = DBSession() export_dashboard_form = ExportInventoryDashboardForm(request.form) export_data = dict() export_data['export_format'] = export_dashboard_form.export_format.data try: export_data['region_id'] = int(export_dashboard_form.hidden_region_id.data) \ if export_dashboard_form.hidden_region_id.data else 0 except ValueError: export_data['region_id'] = 0 if export_data['region_id'] == 0: export_data['region_name'] = "ALL" else: export_data['region_name'] = db_session.query(Region.name).filter( Region.id == export_data['region_id']).first()[0] export_data['chassis_summary_iter'] = get_chassis_summary_query(db_session, export_data['region_id']) export_data['model_name_summary_iter'] = get_model_name_summary_query_results(db_session, export_data['region_id']).__iter__() export_data['inventory_without_serial_number_iter'] = \ get_inventory_without_serial_number_query(db_session, export_data['region_id']) export_data['inventory_with_duplicate_serial_number_iter'] = \ get_inventory_with_duplicate_serial_number_query(db_session, export_data['region_id']) export_data['user'] = current_user writer = None if export_data.get('export_format') == ExportInformationFormat.MICROSOFT_EXCEL: writer = ExportInventoryDashboardExcelWriter(**export_data) elif export_data.get('export_format') == ExportInformationFormat.HTML: writer = ExportInventoryDashboardHTMLWriter(**export_data) elif export_data.get('export_format') == ExportInformationFormat.CSV: writer = ExportInventoryDashboardCSVWriter(**export_data) if writer: file_path = writer.write_report() if export_dashboard_form.send_email.data: email_message = "<html><head></head><body>Please find in the attachment the inventory dashboard summary " \ "in region: " + export_data['region_name'] + '</body></html>' create_email_job_with_attachment_files(db_session, email_message, file_path, export_dashboard_form.user_email.data) return send_file(file_path, as_attachment=True) logger.error('inventory: invalid export format "%s" chosen.' % export_data.get('export_format')) return
def is_connection_valid(hostname, urls): ctx = TestConnectionContext(hostname, urls) try: handler_class = get_connection_handler_class(ctx) if handler_class is None: logger.error('Unable to get connection handler') handler_class().execute(ctx) except Exception as e: logger.exception('is_connection_valid() hit exception') raise return ctx.success
def get_config_conversion_progress(): job_id = request.args.get('job_id', 0, type=int) db_session = DBSession() convert_config_job = db_session.query(ConvertConfigJob).filter( ConvertConfigJob.id == job_id).first() if convert_config_job is None: logger.error('Unable to retrieve Convert Config Job: %s' % job_id) return jsonify(status='Unable to retrieve job') return jsonify(status='OK', progress=convert_config_job.status)
def process(self): db_session = DBSession() ctx = None try: install_job = db_session.query(InstallJob).filter(InstallJob.id == self.job_id).first() if install_job is None: # This is normal because of race condition. It means the job is already deleted (completed). return if not can_install(db_session): # This will halt this host that has already been queued return host = db_session.query(Host).filter(Host.id == self.host_id).first() if host is None: logger.error('Unable to retrieve host %s', self.host_id) return handler_class = get_install_handler_class(host.platform) if handler_class is None: logger.error('Unable to get handler for %s, install job %s', host.platform, self.job_id) install_job.start_time = datetime.datetime.utcnow() install_job.set_status(JobStatus.PROCESSING) install_job.session_log = create_log_directory(host.connection_param[0].host_or_ip, install_job.id) db_session.commit() ctx = InstallContext(host, db_session, install_job) ctx.operation_id = get_last_operation_id(db_session, install_job) handler = handler_class() handler.execute(ctx) if ctx.success: # Update the software self.get_software(db_session, ctx) archive_install_job(db_session, ctx, install_job, JobStatus.COMPLETED) else: archive_install_job(db_session, ctx, install_job, JobStatus.FAILED) db_session.commit() except: try: logger.exception('InstallManager hit exception - install job = %s', self.job_id) archive_install_job(db_session, ctx, install_job, JobStatus.FAILED, trace=traceback.format_exc()) db_session.commit() except: logger.exception('InstallManager hit exception - install job = %s', self.job_id) finally: # Must remove the host from the in progress list remove_host_from_in_progress(self.host_id) db_session.close()
def is_connection_valid(platform, urls): ctx = ConnectionContext(urls) try: handler_class = get_connection_handler_class(platform) if handler_class is None: logger.error('Unable to get connection handler for %s', platform) handler = handler_class() handler.execute(ctx) except: logger.exception('is_connection_valid hit exception') return ctx.success
def discover_platform_info(ctx): try: log_dir = os.path.join(get_log_directory(), create_log_directory(ctx.host.connection_param[0].host_or_ip)) except Exception: log_dir = None """Discover platform when added to CSM.""" conn = condoor.Connection(name=ctx.hostname, urls=ctx.host_urls, log_level=logging.CRITICAL, log_dir=log_dir) try: conn.connect(force_discovery=True) ctx.host.family = conn.family ctx.host.platform = conn.platform ctx.host.software_platform = get_software_platform(family=conn.family, os_type=conn.os_type) ctx.host.software_version = get_software_version(conn.os_version) ctx.host.os_type = conn.os_type ctx.db_session.commit() except condoor.ConnectionError as e: logger.error(str(e)) finally: conn.disconnect()
def upload_files_to_server_repository(sourcefile, server, selected_server_directory, destfile): """ Upload files from their locations in the host linux system to the FTP/TFTP/SFTP server repository. Arguments: :param sourcefile: one string file path that points to a file on the system where CSM is hosted. The paths are all relative to csm/csmserver/. For example, if the source file is in csm_data/migration/filename, sourcefile = "../../csm_data/migration/filename" :param server: the associated server repository object stored in CSM database :param selected_server_directory: the designated directory in the server repository :param destfile: one string filename that the source files should be named after being copied to the designated directory in the server repository. i.e., "thenewfilename" :return: True if no error occurred. """ server_type = server.server_type if server_type == ServerType.TFTP_SERVER: tftp_server = TFTPServer(server) try: tftp_server.upload_file(sourcefile, destfile, sub_directory=selected_server_directory) except Exception as inst: print(inst) logger.error('Unable to upload file to selected server directory in repository.') return 'Unable to upload file' elif server_type == ServerType.FTP_SERVER: ftp_server = FTPServer(server) try: ftp_server.upload_file(sourcefile, destfile, sub_directory=selected_server_directory) except Exception as inst: print(inst) logger.error('Unable to upload file to selected server directory in repository.') return 'Unable to upload file' elif server_type == ServerType.SFTP_SERVER: sftp_server = SFTPServer(server) try: sftp_server.upload_file(sourcefile, destfile, sub_directory=selected_server_directory) except Exception as inst: print(inst) logger.error('Unable to upload file to selected server directory in repository.') return 'Unable to upload file' else: logger.error('Only FTP, SFTP and TFTP server repositories are supported for this action.') return jsonify(status='Only FTP, SFTP and TFTP server repositories are supported for this action') return 'OK'
def delete_reservation(self, con_id): """ 1. Delete IP table rule on management node. 2. SSH into compute node and kill docker container. exit 3. Delete entry from containers table. :param con_id: Container id in the database :return: 0 on success """ container = self.database.get_container(con_id.strip()) status = self.database.delete_ssh_port_forward_rule(container) if status is False: logger.exception("Failed to delete IP table rule") return -1 status = self.kill_container(container) if status is False: logger.error("Failed to kill container {0}".format(container)) return -2 status = self.database.remove_container(container) if status is True: return 0 return -1
def get_smu_info_from_cco(self, platform, release): save_to_db = True db_session = DBSession() platform_release = platform + '_' + release try: self.smu_meta = SMUMeta(platform_release=platform_release) # Load data from the SMU XML file self.load() # This can happen if the given platform and release is not valid. # The load() method calls get_smu_info_from_db and failed. if not self.is_valid: logger.error( 'get_smu_info_from_cco() hit exception, platform_release=' + platform_release) return db_smu_meta = db_session.query(SMUMeta).filter( SMUMeta.platform_release == platform_release).first() if db_smu_meta: if db_smu_meta.created_time == self.smu_meta.created_time: save_to_db = False else: # Delete the existing smu_meta and smu_info for this platform and release db_session.delete(db_smu_meta) db_session.commit() if save_to_db: db_session.add(self.smu_meta) else: db_smu_meta.retrieval_time = datetime.datetime.utcnow() db_session.commit() except Exception: logger.exception( 'get_smu_info_from_cco() hit exception, platform_release=' + platform_release)
def migrate_all_containers(self, node): """ Should be called when a compute node is down. :param node: The compute node whose containers are to be shifted. :return: """ for con in node.container: logger.info("Migrating container {0}".format(con)) if self.database.add_deleted_container(con) is False: continue status = self.database.delete_ssh_port_forward_rule(con) if status is False: logger.exception("Failed to delete IP table rule") return -1 node = self.get_compute_node() if node is None: logger.exception("No available compute node available. Cannot migrate containers") return -1 image_name, ram, cpu, con_name = con.image.image_name, con.ram, con.cpu, con.container_name try: status, docker_port, con_name, con_id = self.run_container(image_name, node.ip_addr, ram, cpu, con_name) except Exception as e: logger.exception("Failed to launch a new container of image {0} on Node: {1}", image_name, node.ip_addr) continue if status is True: nat_port = con.nat_port image = self.database.get_image(con.image.image_id) user = self.database.get_user(con.user.id) nat_port, management_ip = self.database.add_ssh_port_forward_rule(node.ip_addr, docker_port, nat_port=nat_port) status = self.database.remove_container(con) status = self.database.add_container(container_id=con_id, container_name=con_name, nat_port=nat_port, management_ip=management_ip, docker_port=docker_port, ram=ram, cpu=cpu, username=DEFAULT_CONTAINER_USENAME, password=DEFAULT_CONTAINER_PASSWORD, node=node, image=image, user=user) if status is not True: logger.error("Failed to add to Database") else: logger.error("Successfully Migrated one container to {0}".format(node.ip_addr))
def process(self): db_session = DBSession() host_id = None inventory_job = None ctx = None try: inventory_job = db_session.query(InventoryJob).filter(InventoryJob.id == self.job_id).first() if inventory_job is None: logger.error('Unable to retrieve inventory job: %s' % self.job_id) return host_id = inventory_job.host_id host = db_session.query(Host).filter(Host.id == host_id).first() if host is None: logger.error('Unable to retrieve host: %s' % host_id) handler_class = get_inventory_handler_class(host.platform) if handler_class is None: logger.error('Unable to get handler for %s, inventory job %s', host.platform, self.job_id) inventory_job.set_status(JobStatus.PROCESSING) inventory_job.session_log = create_log_directory(host.connection_param[0].host_or_ip, inventory_job.id) db_session.commit() # Delegates the get_software logic to the handler ctx = InventoryContext(host, db_session, inventory_job) handler = handler_class() handler.execute(ctx) if ctx.success: archive_inventory_job(db_session, inventory_job, JobStatus.COMPLETED) else: # removes the host object as host.packages may have been modified. db_session.expunge(host) archive_inventory_job(db_session, inventory_job, JobStatus.FAILED) # Reset the pending retrieval flag inventory_job.pending_submit = False db_session.commit() except: try: logger.exception('InventoryManager hit exception - inventory job = %s', self.job_id) archive_inventory_job(db_session, inventory_job, JobStatus.FAILED, trace=sys.exc_info) # Reset the pending retrieval flag inventory_job.pending_submit = False db_session.commit() except: logger.exception('InventoryManager hit exception - inventory job = %s', self.job_id) finally: with lock: if self.job_id is not None and self.job_id in in_progress_jobs: del in_progress_jobs[self.job_id] db_session.close()
def send_install_status_email(install_job): db_session = DBSession username = install_job.created_by system_option = SystemOption.get(db_session) if not system_option.enable_email_notify: return smtp_server = get_smtp_server(db_session) if smtp_server is None: logger.error('mailer: SMTP Server has not been specified') return user = get_user(db_session, username) if user is None: logger.error('mailer: Unable to locate user "%s"' % username) return host = get_host(db_session, install_job.host_id) if host is None: logger.error('mailer: Unable to locate host id "%s"' % str(install_job.host_id)) return message = '<html><head><body>' if install_job.status == JobStatus.COMPLETED: message += 'The scheduled installation for host "' + host.hostname + '" has COMPLETED<br><br>' elif install_job.status == JobStatus.FAILED: message += 'The scheduled installation for host "' + host.hostname + '" has FAILED<br><br>' message += 'Scheduled Time: ' + get_datetime_string(install_job.scheduled_time) + ' (UTC)<br>' message += 'Start Time: ' + get_datetime_string(install_job.start_time) + ' (UTC)<br>' message += 'Install Action: ' + install_job.install_action + '<br><br>' session_log_url = system_option.base_url + '/' + get_session_log_link(host, install_job) message += 'For more information, click the link below<br><br>' message += session_log_url + '<br><br>' if install_job.packages is not None and len(install_job.packages) > 0: message += 'Followings are the software packages: <br><br>' + install_job.packages.replace(',','<br>') message += '</body></head></html>' sendmail( server=smtp_server.server, server_port=smtp_server.server_port, sender=smtp_server.sender, recipient=user.email, message=message, use_authentication=smtp_server.use_authentication, username=smtp_server.username, password=smtp_server.password, secure_connection=smtp_server.secure_connection)
def send_install_status_email(install_job): db_session = DBSession username = install_job.created_by system_option = SystemOption.get(db_session) if not system_option.enable_email_notify: return smtp_server = get_smtp_server(db_session) if smtp_server is None: logger.error('mailer: SMTP Server has not been specified') return user = get_user(db_session, username) if user is None: logger.error('mailer: Unable to locate user "%s"' % username) return host = get_host(db_session, install_job.host_id) if host is None: logger.error('mailer: Unable to locate host id "%s"' % str(install_job.host_id)) return message = '<html><head><body>' if install_job.status == JobStatus.COMPLETED: message += 'The scheduled installation for host "' + host.hostname + '" has COMPLETED<br><br>' elif install_job.status == JobStatus.FAILED: message += 'The scheduled installation for host "' + host.hostname + '" has FAILED<br><br>' message += 'Scheduled Time: ' + get_datetime_string( install_job.scheduled_time) + ' (UTC)<br>' message += 'Start Time: ' + get_datetime_string( install_job.start_time) + ' (UTC)<br>' message += 'Install Action: ' + install_job.install_action + '<br><br>' if install_job.packages is not None and len(install_job.packages) > 0: message += 'Following are the software packages: <br>' + install_job.packages.replace( '\n', '<br>') message += '</body></head></html>' sendmail(server=smtp_server.server, server_port=smtp_server.server_port, sender=smtp_server.sender, recipient=user.email, message=message, use_authentication=smtp_server.use_authentication, username=smtp_server.username, password=smtp_server.password, secure_connection=smtp_server.secure_connection)
def upload_config_to_server_repository(): server_id = request.args.get('server_id', -1, type=int) server_directory = request.args.get('server_directory', '', type=str) filename = request.args.get('filename', '', type=str) if server_id == -1: logger.error('No server repository selected.') return jsonify(status='No server repository selected.') db_session = DBSession() server = get_server_by_id(db_session, server_id) if not server: logger.error('Selected server repository not found in database.') return jsonify( status='Selected server repository not found in database.') if not server_directory: server_directory = None if not filename: logger.error('No filename selected.') return jsonify(status='No filename selected.') config_conversion_path = get_config_conversion_path() stripped_filename = get_stripped_filename(filename) output_iox = stripped_filename + ".iox" output_cal = stripped_filename + ".cal" status = upload_files_to_server_repository( os.path.join(config_conversion_path, output_iox), server, server_directory, output_iox) if status == "OK": status = upload_files_to_server_repository( os.path.join(config_conversion_path, output_cal), server, server_directory, output_cal) return jsonify(status=status)
def refresh(self): self.manager.set_free_space() self.manager.set_accesses() status = NodeStatus.NA try: if not (False in self.manager.get_accesses().values()): free = self.manager.get_free_space() if (self.free_limit < free.get('INCOMING')) and ( (MIN_FREE_SPACE < free.get('BACKUP'))): status = status + NodeStatus.READY else: if (self.free_limit >= free.get('INCOMING')): status = status + NodeStatus.INCOMING_FULL if (self.free_limit >= free.get('BACKUP')): status = status + NodeStatus.BACKUP_FULL except Exception as err: import traceback logger.error(__name__) logger.error(err) logger.error(traceback.format_exc()) finally: self.manager.set_status(status)
def upload_config_to_server_repository(): server_id = request.args.get('server_id', -1, type=int) server_directory = request.args.get('server_directory', '', type=str) filename = request.args.get('filename', '', type=str) if server_id == -1: logger.error('No server repository selected.') return jsonify(status='No server repository selected.') db_session = DBSession() server = get_server_by_id(db_session, server_id) if not server: logger.error('Selected server repository not found in database.') return jsonify(status='Selected server repository not found in database.') if not server_directory: server_directory = None if not filename: logger.error('No filename selected.') return jsonify(status='No filename selected.') config_conversion_path = get_config_conversion_path() stripped_filename = get_stripped_filename(filename) output_iox = stripped_filename + ".iox" output_cal = stripped_filename + ".cal" status = upload_files_to_server_repository(os.path.join(config_conversion_path, output_iox), server, server_directory, output_iox) if status == "OK": status = upload_files_to_server_repository(os.path.join(config_conversion_path, output_cal), server, server_directory, output_cal) return jsonify(status=status)
def run_container(image_name, compute_node_ip, ram, cpu, container_name=None): """ Start a container related to the image_name on compute_node_ip TODO: SSH into compute_node_ip and execute the proper docker run command :param image_name: (string) Eg. Nginx :param compute_node_ip: (String) Eg. 10.0.0.1 :param ram: Max ram for this container (in MB) :return: Status(Boolean), Docker port(Integer), container_name(str), container_id(str) """ #pdb.set_trace() image_name = "{0}/{1}".format(DOCKER_REGISTRY_URI, image_name) base_cmd = "ssh -o StrictHostKeyChecking=no root@{0} ".format(compute_node_ip) pull_cmd = "docker pull {0}".format(image_name) try: # We don't the exception to be printed on master's screen, hence stdout=sp.PIPE output = sp.call("{0}{1}".format(base_cmd, pull_cmd), shell=True, stdout=sp.PIPE) if output != 0: logger.error("Failed: Pull image {0}".format(image_name)) except Exception as e: logger.exception("Failed: command: {0}{1}\n{2}".format(base_cmd, pull_cmd,e)) # It's ok to get an exception. Maybe the registry is down. Just try if the image # is in local storage. # return False, None, None, None # Once image has been pulled, run it. # We plan to restart the containers on a failed node to a live node. # Hence we don't want the --restart always option. Otherwise we will # have duplicate containers. TEMPORARY = "temporary" if container_name: # If a container name has been given # Check if a shared dir already exists master_dir_name = os.path.join(MASTER_SHARED_DIR, container_name) host_mount_dir = os.path.join(SERVER_SHARED_DIR, container_name) container_name = "--name {0}".format(container_name) else: container_name = "" host_mount_dir = os.path.join(SERVER_SHARED_DIR, TEMPORARY ) master_dir_name = os.path.join(MASTER_SHARED_DIR, TEMPORARY ) if not os.path.exists(master_dir_name): os.mkdir(master_dir_name) os.chmod(master_dir_name, 0o777) shared_volume = "-v {0}:{1}".format(host_mount_dir, CON_SHARED_DIR) run_cmd = "docker run -P --detach --cpus {0} --memory {1}m {2} {3} {4}" \ .format(cpu, ram, container_name, shared_volume, image_name) try: print("{0}{1}".format(base_cmd, run_cmd)) #exit() cont_id = sp.check_output("{0}{1}".format(base_cmd, run_cmd), shell=True).decode('utf-8') assert cont_id except Exception as e: logger.exception("Failed: {0}{1}\n{1}".format(base_cmd, run_cmd, e)) return False, None, None, None inspect_cmd = "docker inspect {0}".format(cont_id) try: output = sp.check_output("{0}{1}".format(base_cmd, inspect_cmd), shell=True).decode('utf-8') data = json.loads(output)[0] cont_name = data['Name'][1:].strip() # The output is "/some_name" docker_port = int(data['NetworkSettings']['Ports']['22/tcp'][0]['HostPort']) new_dir_name = os.path.join(MASTER_SHARED_DIR, cont_name) os.rename(master_dir_name, new_dir_name) logger.info("Successfully Launched Container. Name={0}, Mounted Dir {1}".format(cont_name, new_dir_name)) except Exception as e: logger.exception("Failed: {0}{1}\n{2}".format(base_cmd, inspect_cmd, e)) return False, None, None, None return True, docker_port, cont_name, cont_id.strip()
def get_server_managed_hosts(region_id, chassis, filter_failed): dt_params = DataTableParams(request) rows = [] db_session = DBSession() clauses = [] if len(dt_params.search_value): criteria = '%' + dt_params.search_value + '%' clauses.append(Host.hostname.like(criteria)) clauses.append(Region.name.like(criteria)) clauses.append(Host.location.like(criteria)) clauses.append(ConnectionParam.host_or_ip.like(criteria)) clauses.append(Host.platform.like(criteria)) clauses.append(Host.software_platform.like(criteria)) clauses.append(Host.software_version.like(criteria)) query = db_session.query(Host)\ .join(Region, Host.region_id == Region.id)\ .join(ConnectionParam, Host.id == ConnectionParam.host_id)\ and_clauses = [] if region_id != 0: and_clauses.append(Host.region_id == region_id) if chassis is not None: and_clauses.append(Host.platform == chassis) if filter_failed != 0: query = query.join(InventoryJob, Host.id == InventoryJob.host_id) and_clauses.append(InventoryJob.status == JobStatus.FAILED) if and_clauses: query = query.filter(and_(*and_clauses)) total_count = query.count() else: total_count = db_session.query(Host).count() query = query.filter(or_(*clauses)) filtered_count = query.count() if dt_params.columns_on_display is None: columns = [ getattr(Host.hostname, dt_params.sort_order)(), getattr(Region.name, dt_params.sort_order)(), getattr(Host.location, dt_params.sort_order)(), getattr(ConnectionParam.host_or_ip, dt_params.sort_order)(), getattr(Host.platform, dt_params.sort_order)(), getattr(Host.software_platform, dt_params.sort_order)(), getattr(Host.software_version, dt_params.sort_order)() ] else: columns = [] check_and_add_column(columns, 'hostname', Host.hostname, dt_params) check_and_add_column(columns, 'region', Region.name, dt_params) check_and_add_column(columns, 'location', Host.location, dt_params) check_and_add_column(columns, 'host_or_ip', ConnectionParam.host_or_ip, dt_params) check_and_add_column(columns, 'chassis', Host.platform, dt_params) check_and_add_column(columns, 'platform', Host.software_platform, dt_params) check_and_add_column(columns, 'software', Host.software_version, dt_params) hosts = query.order_by(columns[dt_params.column_order])\ .slice(dt_params.start_length, dt_params.start_length + dt_params.display_length).all() if hosts is not None: for host in hosts: row = dict() row['hostname'] = host.hostname row['region'] = '' if host.region is None else host.region.name row['location'] = host.location if len(host.connection_param) > 0: row['host_or_ip'] = host.connection_param[0].host_or_ip row['chassis'] = host.platform row['platform'] = UNKNOWN if host.software_platform is None else host.software_platform row['software'] = UNKNOWN if host.software_version is None else host.software_version inventory_job = host.inventory_job[0] if inventory_job is not None: row['last_successful_retrieval'] = get_last_successful_inventory_elapsed_time( host) row['inventory_retrieval_status'] = inventory_job.status else: row['last_successful_retrieval'] = '' row['inventory_retrieval_status'] = '' rows.append(row) else: logger.error('Host %s has no connection information.', host.hostname) response = dict() response['draw'] = dt_params.draw response['recordsTotal'] = total_count response['recordsFiltered'] = filtered_count response['data'] = rows return jsonify(**response)
def process(self): db_session = DBSession() ctx = None try: install_job = db_session.query(InstallJob).filter( InstallJob.id == self.job_id).first() if install_job is None: # This is normal because of race condition. It means the job is already deleted (completed). return if not can_install(db_session): # This will halt this host that has already been queued return host = db_session.query(Host).filter( Host.id == self.host_id).first() if host is None: logger.error('Unable to retrieve host %s', self.host_id) return handler_class = get_install_handler_class(host.platform) if handler_class is None: logger.error('Unable to get handler for %s, install job %s', host.platform, self.job_id) install_job.start_time = datetime.datetime.utcnow() install_job.set_status(JobStatus.PROCESSING) install_job.session_log = create_log_directory( host.connection_param[0].host_or_ip, install_job.id) db_session.commit() ctx = InstallContext(host, db_session, install_job) ctx.operation_id = get_last_operation_id(db_session, install_job) handler = handler_class() handler.execute(ctx) if ctx.success: # Update the software self.get_software(db_session, ctx) archive_install_job(db_session, ctx, install_job, JobStatus.COMPLETED) else: archive_install_job(db_session, ctx, install_job, JobStatus.FAILED) db_session.commit() except: try: logger.exception( 'InstallManager hit exception - install job = %s', self.job_id) archive_install_job(db_session, ctx, install_job, JobStatus.FAILED, trace=traceback.format_exc()) db_session.commit() except: logger.exception( 'InstallManager hit exception - install job = %s', self.job_id) finally: # Must remove the host from the in progress list remove_host_from_in_progress(self.host_id) db_session.close()
def get_server_managed_hosts(region_id, chassis, filter_failed): dt_params = DataTableParams(request) rows = [] db_session = DBSession() clauses = [] if len(dt_params.search_value): criteria = '%' + dt_params.search_value + '%' clauses.append(Host.hostname.like(criteria)) clauses.append(Region.name.like(criteria)) clauses.append(Host.location.like(criteria)) clauses.append(ConnectionParam.host_or_ip.like(criteria)) clauses.append(Host.platform.like(criteria)) clauses.append(Host.software_platform.like(criteria)) clauses.append(Host.software_version.like(criteria)) query = db_session.query(Host)\ .join(Region, Host.region_id == Region.id)\ .join(ConnectionParam, Host.id == ConnectionParam.host_id)\ and_clauses = [] if region_id != 0: and_clauses.append(Host.region_id == region_id) if chassis is not None: and_clauses.append(Host.platform == chassis) if filter_failed != 0: query = query.join(InventoryJob, Host.id == InventoryJob.host_id) and_clauses.append(InventoryJob.status == JobStatus.FAILED) if and_clauses: query = query.filter(and_(*and_clauses)) total_count = query.count() else: total_count = db_session.query(Host).count() query = query.filter(or_(*clauses)) filtered_count = query.count() if dt_params.columns_on_display is None: columns = [getattr(Host.hostname, dt_params.sort_order)(), getattr(Region.name, dt_params.sort_order)(), getattr(Host.location, dt_params.sort_order)(), getattr(ConnectionParam.host_or_ip, dt_params.sort_order)(), getattr(Host.platform, dt_params.sort_order)(), getattr(Host.software_platform, dt_params.sort_order)(), getattr(Host.software_version, dt_params.sort_order)()] else: columns = [] check_and_add_column(columns, 'hostname', Host.hostname, dt_params) check_and_add_column(columns, 'region', Region.name, dt_params) check_and_add_column(columns, 'location', Host.location, dt_params) check_and_add_column(columns, 'host_or_ip', ConnectionParam.host_or_ip, dt_params) check_and_add_column(columns, 'chassis', Host.platform, dt_params) check_and_add_column(columns, 'platform', Host.software_platform, dt_params) check_and_add_column(columns, 'software', Host.software_version, dt_params) hosts = query.order_by(columns[dt_params.column_order])\ .slice(dt_params.start_length, dt_params.start_length + dt_params.display_length).all() if hosts is not None: for host in hosts: row = dict() row['hostname'] = host.hostname row['region'] = '' if host.region is None else host.region.name row['location'] = host.location if len(host.connection_param) > 0: row['host_or_ip'] = host.connection_param[0].host_or_ip row['chassis'] = host.platform row['platform'] = UNKNOWN if host.software_platform is None else host.software_platform row['software'] = UNKNOWN if host.software_version is None else host.software_version inventory_job = host.inventory_job[0] if inventory_job is not None: row['last_successful_retrieval'] = get_last_successful_inventory_elapsed_time(host) row['inventory_retrieval_status'] = inventory_job.status else: row['last_successful_retrieval'] = '' row['inventory_retrieval_status'] = '' rows.append(row) else: logger.error('Host %s has no connection information.', host.hostname) response = dict() response['draw'] = dt_params.draw response['recordsTotal'] = total_count response['recordsFiltered'] = filtered_count response['data'] = rows return jsonify(**response)
def process(self): db_session = DBSession() download_job = None try: download_job = db_session.query(DownloadJob).filter(DownloadJob.id == self.job_id).first() if download_job is None: logger.error('Unable to retrieve download job: %s' % self.job_id) return self.download_job = download_job output_file_path = get_repository_directory() + download_job.cco_filename # Only download if the image (tar file) is not in the downloads directory. # And, the image is a good one. if not is_tar_file_valid(output_file_path): user_id = download_job.user_id user = db_session.query(User).filter(User.id == user_id).first() if user is None: logger.error('Unable to retrieve user: %s' % user_id) preferences = db_session.query(Preferences).filter(Preferences.user_id == user_id).first() if preferences is None: logger.error('Unable to retrieve user preferences: %s' % user_id) download_job.set_status(JobStatus.PROCESSING) db_session.commit() bsd = BSDServiceHandler(username=preferences.cco_username, password=preferences.cco_password, image_name=download_job.cco_filename, PID=download_job.pid, MDF_ID=download_job.mdf_id, software_type_ID=download_job.software_type_id) download_job.set_status('Preparing to download from cisco.com.') db_session.commit() bsd.download(output_file_path, callback=self.progress_listener) # Untar the file to the output directory tarfile_file_list = untar(output_file_path, get_repository_directory()) else: tarfile_file_list = get_tarfile_file_list(output_file_path) # Now transfers to the server repository download_job.set_status('Transferring file to server repository.') db_session.commit() server = db_session.query(Server).filter(Server.id == download_job.server_id).first() if server is not None: server_impl = get_server_impl(server) for filename in tarfile_file_list: server_impl.upload_file(get_repository_directory() + filename, filename, sub_directory=download_job.server_directory) archive_download_job(db_session, download_job, JobStatus.COMPLETED) db_session.commit() except: try: logger.exception('DownloadManager hit exception - download job = %s', self.job_id) archive_download_job(db_session, download_job, JobStatus.FAILED, traceback.format_exc()) db_session.commit() except: logger.exception('DownloadManager hit exception - download job = %s', self.job_id) finally: with lock: if download_job is not None and \ download_job.cco_filename in in_progress_downloads: del in_progress_downloads[download_job.cco_filename] db_session.close()
def process(self): db_session = DBSession() download_job = None try: download_job = db_session.query(DownloadJob).filter( DownloadJob.id == self.job_id).first() if download_job is None: logger.error('Unable to retrieve download job: %s' % self.job_id) return self.download_job = download_job output_file_path = get_repository_directory( ) + download_job.cco_filename # Only download if the image (tar file) is not in the downloads directory. # And, the image is a good one. if not is_tar_file_valid(output_file_path): user_id = download_job.user_id user = db_session.query(User).filter( User.id == user_id).first() if user is None: logger.error('Unable to retrieve user: %s' % user_id) preferences = db_session.query(Preferences).filter( Preferences.user_id == user_id).first() if preferences is None: logger.error('Unable to retrieve user preferences: %s' % user_id) download_job.set_status(JobStatus.PROCESSING) db_session.commit() bsd = BSDServiceHandler( username=preferences.cco_username, password=preferences.cco_password, image_name=download_job.cco_filename, PID=download_job.pid, MDF_ID=download_job.mdf_id, software_type_ID=download_job.software_type_id) download_job.set_status( 'Preparing to download from cisco.com.') db_session.commit() bsd.download(output_file_path, callback=self.progress_listener) # Untar the file to the output directory tarfile_file_list = untar(output_file_path, get_repository_directory()) else: tarfile_file_list = get_tarfile_file_list(output_file_path) # Now transfers to the server repository download_job.set_status('Transferring file to server repository.') db_session.commit() server = db_session.query(Server).filter( Server.id == download_job.server_id).first() if server is not None: server_impl = get_server_impl(server) for filename in tarfile_file_list: server_impl.upload_file( get_repository_directory() + filename, filename, sub_directory=download_job.server_directory) archive_download_job(db_session, download_job, JobStatus.COMPLETED) db_session.commit() except: try: logger.exception( 'DownloadManager hit exception - download job = %s', self.job_id) archive_download_job(db_session, download_job, JobStatus.FAILED, traceback.format_exc()) db_session.commit() except: logger.exception( 'DownloadManager hit exception - download job = %s', self.job_id) finally: with lock: if download_job is not None and \ download_job.cco_filename in in_progress_downloads: del in_progress_downloads[download_job.cco_filename] db_session.close()
def process(self): db_session = DBSession() host_id = None inventory_job = None ctx = None try: inventory_job = db_session.query(InventoryJob).filter( InventoryJob.id == self.job_id).first() if inventory_job is None: logger.error('Unable to retrieve inventory job: %s' % self.job_id) return host_id = inventory_job.host_id host = db_session.query(Host).filter(Host.id == host_id).first() if host is None: logger.error('Unable to retrieve host: %s' % host_id) handler_class = get_inventory_handler_class(host.platform) if handler_class is None: logger.error('Unable to get handler for %s, inventory job %s', host.platform, self.job_id) inventory_job.set_status(JobStatus.PROCESSING) inventory_job.session_log = create_log_directory( host.connection_param[0].host_or_ip, inventory_job.id) db_session.commit() # Delegates the get_software logic to the handler ctx = InventoryContext(host, db_session, inventory_job) handler = handler_class() handler.execute(ctx) if ctx.success: archive_inventory_job(db_session, inventory_job, JobStatus.COMPLETED) else: # removes the host object as host.packages may have been modified. db_session.expunge(host) archive_inventory_job(db_session, inventory_job, JobStatus.FAILED) # Reset the pending retrieval flag inventory_job.pending_submit = False db_session.commit() except: try: logger.exception( 'InventoryManager hit exception - inventory job = %s', self.job_id) archive_inventory_job(db_session, inventory_job, JobStatus.FAILED, trace=sys.exc_info) # Reset the pending retrieval flag inventory_job.pending_submit = False db_session.commit() except: logger.exception( 'InventoryManager hit exception - inventory job = %s', self.job_id) finally: with lock: if self.job_id is not None and self.job_id in in_progress_jobs: del in_progress_jobs[self.job_id] db_session.close()
def get_managed_host_details(region_id): dt_params = DataTableParams(request) rows = [] db_session = DBSession() clauses = [] if len(dt_params.search_value): criteria = '%' + dt_params.search_value + '%' clauses.append(Host.hostname.like(criteria)) clauses.append(Region.name.like(criteria)) clauses.append(Host.location.like(criteria)) clauses.append(Host.roles.like(criteria)) clauses.append(Host.platform.like(criteria)) clauses.append(Host.software_platform.like(criteria)) clauses.append(Host.software_version.like(criteria)) clauses.append(ConnectionParam.connection_type.like(criteria)) clauses.append(ConnectionParam.host_or_ip.like(criteria)) clauses.append(ConnectionParam.port_number.like(criteria)) clauses.append(ConnectionParam.username.like(criteria)) clauses.append(JumpHost.hostname.like(criteria)) query = db_session.query(Host)\ .join(Region, Host.region_id == Region.id)\ .join(ConnectionParam, Host.id == ConnectionParam.host_id)\ .outerjoin(JumpHost, ConnectionParam.jump_host_id == JumpHost.id)\ if region_id == 0: query = query.filter(or_(*clauses)) total_count = db_session.query(Host).count() else: query = query.filter(and_(Host.region_id == region_id), or_(*clauses)) total_count = db_session.query(Host).filter(Host.region_id == region_id).count() filtered_count = query.count() columns = [getattr(Host.hostname, dt_params.sort_order)(), getattr(Region.name, dt_params.sort_order)(), getattr(Host.location, dt_params.sort_order)(), getattr(Host.roles, dt_params.sort_order)(), getattr(Host.platform, dt_params.sort_order)(), getattr(Host.software_platform, dt_params.sort_order)(), getattr(Host.software_version, dt_params.sort_order)(), getattr(ConnectionParam.connection_type, dt_params.sort_order)(), getattr(ConnectionParam.host_or_ip, dt_params.sort_order)(), getattr(ConnectionParam.port_number, dt_params.sort_order)(), getattr(ConnectionParam.username, dt_params.sort_order)(), getattr(JumpHost.hostname, dt_params.sort_order)()] hosts = query.order_by(columns[dt_params.column_order])\ .slice(dt_params.start_length, dt_params.start_length + dt_params.display_length).all() if hosts is not None: for host in hosts: row = dict() row['hostname'] = host.hostname row['region'] = '' if host.region is None else host.region.name row['location'] = host.location row['roles'] = host.roles row['chassis'] = host.platform row['platform'] = UNKNOWN if host.software_platform is None else host.software_platform row['software'] = UNKNOWN if host.software_version is None else host.software_version if len(host.connection_param) > 0: connection_param = host.connection_param[0] row['connection'] = connection_param.connection_type row['host_or_ip'] = connection_param.host_or_ip row['port_number'] = connection_param.port_number if not is_empty(connection_param.jump_host): row['jump_host'] = connection_param.jump_host.hostname else: row['jump_host'] = '' row['username'] = connection_param.username rows.append(row) else: logger.error('Host %s has no connection information.', host.hostname) response = dict() response['draw'] = dt_params.draw response['recordsTotal'] = total_count response['recordsFiltered'] = filtered_count response['data'] = rows return jsonify(**response)
def get_managed_host_details(region_id): dt_params = DataTableParams(request) rows = [] db_session = DBSession() clauses = [] if len(dt_params.search_value): criteria = '%' + dt_params.search_value + '%' clauses.append(Host.hostname.like(criteria)) clauses.append(Region.name.like(criteria)) clauses.append(Host.location.like(criteria)) clauses.append(Host.roles.like(criteria)) clauses.append(Host.platform.like(criteria)) clauses.append(Host.software_platform.like(criteria)) clauses.append(Host.software_version.like(criteria)) clauses.append(ConnectionParam.connection_type.like(criteria)) clauses.append(ConnectionParam.host_or_ip.like(criteria)) clauses.append(ConnectionParam.port_number.like(criteria)) clauses.append(ConnectionParam.username.like(criteria)) clauses.append(JumpHost.hostname.like(criteria)) query = db_session.query(Host)\ .join(Region, Host.region_id == Region.id)\ .join(ConnectionParam, Host.id == ConnectionParam.host_id)\ .outerjoin(JumpHost, ConnectionParam.jump_host_id == JumpHost.id)\ if region_id == 0: query = query.filter(or_(*clauses)) total_count = db_session.query(Host).count() else: query = query.filter(and_(Host.region_id == region_id), or_(*clauses)) total_count = db_session.query(Host).filter( Host.region_id == region_id).count() filtered_count = query.count() columns = [ getattr(Host.hostname, dt_params.sort_order)(), getattr(Region.name, dt_params.sort_order)(), getattr(Host.location, dt_params.sort_order)(), getattr(Host.roles, dt_params.sort_order)(), getattr(Host.platform, dt_params.sort_order)(), getattr(Host.software_platform, dt_params.sort_order)(), getattr(Host.software_version, dt_params.sort_order)(), getattr(ConnectionParam.connection_type, dt_params.sort_order)(), getattr(ConnectionParam.host_or_ip, dt_params.sort_order)(), getattr(ConnectionParam.port_number, dt_params.sort_order)(), getattr(ConnectionParam.username, dt_params.sort_order)(), getattr(JumpHost.hostname, dt_params.sort_order)() ] hosts = query.order_by(columns[dt_params.column_order])\ .slice(dt_params.start_length, dt_params.start_length + dt_params.display_length).all() if hosts is not None: for host in hosts: row = dict() row['hostname'] = host.hostname row['region'] = '' if host.region is None else host.region.name row['location'] = host.location row['roles'] = host.roles row['chassis'] = host.platform row['platform'] = UNKNOWN if host.software_platform is None else host.software_platform row['software'] = UNKNOWN if host.software_version is None else host.software_version if len(host.connection_param) > 0: connection_param = host.connection_param[0] row['connection'] = connection_param.connection_type row['host_or_ip'] = connection_param.host_or_ip row['port_number'] = connection_param.port_number if not is_empty(connection_param.jump_host): row['jump_host'] = connection_param.jump_host.hostname else: row['jump_host'] = '' row['username'] = connection_param.username rows.append(row) else: logger.error('Host %s has no connection information.', host.hostname) response = dict() response['draw'] = dt_params.draw response['recordsTotal'] = total_count response['recordsFiltered'] = filtered_count response['data'] = rows return jsonify(**response)
def upload_files_to_server_repository(sourcefile, server, selected_server_directory, destfile): """ Upload files from their locations in the host linux system to the FTP/TFTP/SFTP server repository. Arguments: :param sourcefile: one string file path that points to a file on the system where CSM is hosted. The paths are all relative to csm/csmserver/. For example, if the source file is in csm_data/migration/filename, sourcefile = "../../csm_data/migration/filename" :param server: the associated server repository object stored in CSM database :param selected_server_directory: the designated directory in the server repository :param destfile: one string filename that the source files should be named after being copied to the designated directory in the server repository. i.e., "thenewfilename" :return: True if no error occurred. """ server_type = server.server_type if server_type == ServerType.TFTP_SERVER: tftp_server = TFTPServer(server) try: tftp_server.upload_file(sourcefile, destfile, sub_directory=selected_server_directory) except Exception as inst: print(inst) logger.error( 'Unable to upload file to selected server directory in repository.' ) return 'Unable to upload file' elif server_type == ServerType.FTP_SERVER: ftp_server = FTPServer(server) try: ftp_server.upload_file(sourcefile, destfile, sub_directory=selected_server_directory) except Exception as inst: print(inst) logger.error( 'Unable to upload file to selected server directory in repository.' ) return 'Unable to upload file' elif server_type == ServerType.SFTP_SERVER: sftp_server = SFTPServer(server) try: sftp_server.upload_file(sourcefile, destfile, sub_directory=selected_server_directory) except Exception as inst: print(inst) logger.error( 'Unable to upload file to selected server directory in repository.' ) return 'Unable to upload file' else: logger.error( 'Only FTP, SFTP and TFTP server repositories are supported for this action.' ) return jsonify( status= 'Only FTP, SFTP and TFTP server repositories are supported for this action' ) return 'OK'
def export_inventory_information(): """export the inventory search result to cvs, html or excel format.""" db_session = DBSession() export_results_form = ExportInventoryInformationForm(request.form) export_data = dict() export_data['export_format'] = export_results_form.export_format.data export_data['serial_number'] = export_results_form.hidden_serial_number.data \ if export_results_form.hidden_serial_number.data != "" else None export_data['region_ids'] = export_results_form.hidden_region_ids.data.split(',') \ if export_results_form.hidden_region_ids.data else [] export_data['chassis_types'] = export_results_form.hidden_chassis_types.data.split(',') \ if export_results_form.hidden_chassis_types.data else [] export_data['software_versions'] = export_results_form.hidden_software_versions.data.split(',') \ if export_results_form.hidden_software_versions.data else [] export_data['model_names'] = export_results_form.hidden_model_names.data.split(',') \ if export_results_form.hidden_model_names.data else [] export_data['partial_model_names'] = export_results_form.hidden_partial_model_names.data.split(',') \ if export_results_form.hidden_partial_model_names.data else [] export_data['vid'] = export_results_form.hidden_vid.data \ if export_results_form.hidden_vid.data != "" else None if export_data['region_ids']: region_names = db_session.query(Region.name).filter( Region.id.in_(map(int, export_data['region_ids']))).order_by( Region.name.asc()).all() export_data['region_names'] = [] [ export_data['region_names'].append(query_tuple[0]) for query_tuple in region_names ] else: export_data['region_names'] = [] export_data['available_inventory_iter'] = query_available_inventory( db_session, export_data.get('serial_number'), export_data.get('model_names'), export_data.get('partial_model_names'), export_data.get('vid')) export_data['in_use_inventory_iter'] = query_in_use_inventory( db_session, export_data) export_data['user'] = current_user writer = None if export_data.get('export_format') == ExportInformationFormat.HTML: writer = ExportInventoryInfoHTMLWriter(**export_data) elif export_data.get( 'export_format') == ExportInformationFormat.MICROSOFT_EXCEL: writer = ExportInventoryInfoExcelWriter(**export_data) elif export_data.get('export_format') == ExportInformationFormat.CSV: writer = ExportInventoryInfoCSVWriter(**export_data) if writer: file_path = writer.write_report() if export_results_form.send_email.data: email_message = "<html><head></head><body>Please find in the attachment the inventory search results " \ "matching the following search criteria: " search_criteria_in_html = get_search_filter_in_html(export_data) if search_criteria_in_html: email_message += search_criteria_in_html + '</body></html>' else: email_message += ' None</body></html>' create_email_job_with_attachment_files( db_session, email_message, file_path, export_results_form.user_email.data) return send_file(file_path, as_attachment=True) logger.error('inventory: invalid export format "%s" chosen.' % export_data.get('export_format')) return
def export_inventory_information(): """export the inventory search result to cvs, html or excel format.""" db_session = DBSession() export_results_form = ExportInventoryInformationForm(request.form) export_data = dict() export_data['export_format'] = export_results_form.export_format.data export_data['serial_number'] = export_results_form.hidden_serial_number.data \ if export_results_form.hidden_serial_number.data != "" else None export_data['region_ids'] = export_results_form.hidden_region_ids.data.split(',') \ if export_results_form.hidden_region_ids.data else [] export_data['chassis_types'] = export_results_form.hidden_chassis_types.data.split(',') \ if export_results_form.hidden_chassis_types.data else [] export_data['software_versions'] = export_results_form.hidden_software_versions.data.split(',') \ if export_results_form.hidden_software_versions.data else [] export_data['model_names'] = export_results_form.hidden_model_names.data.split(',') \ if export_results_form.hidden_model_names.data else [] export_data['partial_model_names'] = export_results_form.hidden_partial_model_names.data.split(',') \ if export_results_form.hidden_partial_model_names.data else [] export_data['vid'] = export_results_form.hidden_vid.data \ if export_results_form.hidden_vid.data != "" else None if export_data['region_ids']: region_names = db_session.query(Region.name).filter( Region.id.in_(map(int, export_data['region_ids']))).order_by(Region.name.asc()).all() export_data['region_names'] = [] [export_data['region_names'].append(query_tuple[0]) for query_tuple in region_names] else: export_data['region_names'] = [] export_data['available_inventory_iter'] = query_available_inventory(db_session, export_data.get('serial_number'), export_data.get('model_names'), export_data.get('partial_model_names'), export_data.get('vid')) export_data['in_use_inventory_iter'] = query_in_use_inventory(db_session, export_data) export_data['user'] = current_user writer = None if export_data.get('export_format') == ExportInformationFormat.HTML: writer = ExportInventoryInfoHTMLWriter(**export_data) elif export_data.get('export_format') == ExportInformationFormat.MICROSOFT_EXCEL: writer = ExportInventoryInfoExcelWriter(**export_data) elif export_data.get('export_format') == ExportInformationFormat.CSV: writer = ExportInventoryInfoCSVWriter(**export_data) if writer: file_path = writer.write_report() if export_results_form.send_email.data: email_message = "<html><head></head><body>Please find in the attachment the inventory search results " \ "matching the following search criteria: " search_criteria_in_html = get_search_filter_in_html(export_data) if search_criteria_in_html: email_message += search_criteria_in_html + '</body></html>' else: email_message += ' None</body></html>' create_email_job_with_attachment_files(db_session, email_message, file_path, export_results_form.user_email.data) return send_file(file_path, as_attachment=True) logger.error('inventory: invalid export format "%s" chosen.' % export_data.get('export_format')) return