def run_command(args): logger = log(__class__.__name__, args.logging_level) logger.debug(_("tabcmd.launching")) session = Session() server = session.create_session(args) new_site = TSC.SiteItem( name=args.site_name, content_url=args.url, admin_mode=args.admin_mode, user_quota=args.user_quota, storage_quota=args.storage_quota, ) try: logger.info(_("createsite.status").format(args.site_name)) server.sites.create(new_site) logger.info(_("common.output.succeeded")) except TSC.ServerResponseError as e: if Errors.is_resource_conflict(e): if args.continue_if_exists: logger.info( _("createsite.errors.site_name_already_exists").format( args.site_name)) return else: Errors.exit_with_error( logger, _("createsite.errors.site_name_already_exists").format( args.site_name)) Errors.exit_with_error( logger, _("publish.errors.unexpected_server_response"), e)
def run_command(args): logger = log(__class__.__name__, args.logging_level) logger.debug(_("tabcmd.launching")) session = Session() server = session.create_session(args) logger.info(_("delete.status").format(args.name, "")) error = None try: item_to_delete = DeleteCommand.get_workbook_item( logger, server, args.name) item_type = "workbook" except TSC.ServerResponseError as workbook_error: error = workbook_error try: item_to_delete = DeleteCommand.get_data_source_item( logger, server, args.name) item_type = "datasource" except TSC.ServerResponseError as ds_error: error = ds_error if not item_type: logger.debug(error) Errors.exit_with_error( logger, _("delete.errors.requires_workbook_datasource")) try: if item_type == "workbook": server.workbooks.delete(item_to_delete.id) else: server.datasources.delete(item_to_delete.id) logger.info(_("common.output.succeeded")) except TSC.ServerResponseError as e: Errors.exit_with_error(logger, "Error deleting from server", e)
def run_command(args): logger = log(__class__.__name__, args.logging_level) logger.debug(_("tabcmd.launching")) session = Session() server = session.create_session(args) if args.parent_project_path is not None: project_path = Server.get_project_by_name_and_parent_path( logger, server, None, args.parent_project_path) else: project_path = None try: project = PublishSamplesCommand.get_project_by_name_and_parent_path( logger, server, args.project_name, project_path) except Exception as e: Errors.exit_with_error( logger, _("tabcmd.report.error.publish_samples.expected_project"), exception=e) try: server.projects.update(project, samples=True) except Exception as e: Errors.exit_with_error(logger, _("tabcmd.result.failure.publish_samples"), exception=e)
def run_command(args): # A view can be returned in PDF, PNG, or CSV (summary data only) format. # A Tableau workbook is returned as a TWB if it connects to a datasource/live connection, # or a TWBX if it uses an extract. logger = log(__class__.__name__, args.logging_level) logger.debug(_("tabcmd.launching")) session = Session() server = session.create_session(args) if " " in args.url: Errors.exit_with_error( logger, _("export.errors.white_space_workbook_view")) file_type = GetUrl.get_file_type_from_filename(logger, args.filename, args.url) content_type = GetUrl.evaluate_content_type(logger, args.url) if content_type == "workbook": if file_type == "twbx" or file_type == "twb": GetUrl.generate_twb(logger, server, args, file_type) else: Errors.exit_with_error( logger, message=_( "publish.errors.mutually_exclusive_option").format( "twb", "twbx")) else: # content type = view if file_type == "pdf": GetUrl.generate_pdf(logger, server, args) elif file_type == "png": GetUrl.generate_png(logger, server, args) elif file_type == "csv": GetUrl.generate_csv(logger, server, args) else: Errors.exit_with_error( logger, message=_("tabcmd.get.extension.not_found"))
def __init__(self): self.username = None # we don't store the password self.user_id = None self.auth_token = None self.token_name = None self.token = None self.password_file = None self.site_name = None # The site name, e.g 'alpodev' self.site_id = None # The site id, e.g 'abcd-1234-1234-1244-1234' self.server_url = None self.last_command = None # for when we have to renew the session then re-try self.last_login_using = None self.no_prompt = False self.certificate = None self.no_certcheck = False self.no_proxy = True self.proxy = None self.timeout = None self.logging_level = "info" self.logger = log(__class__.__name__, self.logging_level) self._read_from_json() self.tableau_server = None # this one is an object that doesn't get persisted in the file
def run_command(args): logger = log(__class__.__name__, args.logging_level) logger.debug(_("tabcmd.launching")) session = Session() server = session.create_session(args) if args.parent_project_path: logger.debug("parent path: {}".format(args.parent_project_path)) try: logger.debug( _("deleteproject.status").format(args.parent_project_path, args.project_name)) project = Server.get_project_by_name_and_parent_path( logger, server, args.project_name, args.parent_project_path) except TSC.ServerResponseError as e: Errors.exit_with_error( logger, _("publish.errors.unexpected_server_response"), e) project_id = project.id try: logger.info(_("deleteproject.status").format(args.project_name)) server.projects.delete(project_id) logger.info(_("common.output.succeeded")) except TSC.ServerResponseError as e: Errors.exit_with_error(logger, "tabcmd.result.failure.delete.project", e)
def run_command(args): logger = log(__class__.__name__, args.logging_level) logger.debug(_("tabcmd.launching")) session = Session() server = session.create_session(args) logger.info( _("tabcmd.add.users.to_site").format(args.users.name, args.name)) UserCommand.act_on_users(logger, server, "added", server.groups.add_user, args)
def run_command(args): logger = log(__class__.__name__, args.logging_level) logger.debug(_("tabcmd.launching")) session = Session() server = session.create_session(args) logger.info(_("export.status").format(args.schedule)) schedule = DatasourcesAndWorkbooks.get_items_by_name( logger, server.schedules, args.schedule)[0] if not schedule: Errors.exit_with_error( logger, _("publish.errors.server_resource_not_found")) logger.info(_("runschedule.status")) Errors.exit_with_error(logger, "Not yet implemented")
def run_command(args): logger = log(__class__.__name__, args.logging_level) logger.debug(_("tabcmd.launching")) session = Session() server = session.create_session(args) try: logger.info(_("tabcmd.find.group").format(args.name)) group_id = Server.find_group_id(logger, server, args.name) logger.info(_("deletegroup.status").format(group_id)) server.groups.delete(group_id) logger.info(_("tabcmd.result.succeeded")) except TSC.ServerResponseError as e: Errors.exit_with_error(logger, "tabcmd.result.failed.delete.group", e)
def run_command(args): logger = log(__class__.__name__, args.logging_level) logger.debug(_("tabcmd.launching")) session = Session() server = session.create_session(args) site_id = server.sites.get_by_name(args.site_name) if site_id == session.site_id: Errors.exit_with_error( logger, "Cannot delete the site you are logged in to") try: server.sites.delete(site_id) logger.info("Successfully deleted the site") except TSC.ServerResponseError as e: Errors.exit_with_error(logger, "Error deleting site", e)
def run_command(args): logger = log(__class__.__name__, args.logging_level) logger.debug(_("tabcmd.launching")) session = Session() server = session.create_session(args) site_item = Server.get_site_for_command_or_throw(logger, server, args) try: logger.info(_("reencryptextracts.status").format(site_item.name)) job = server.sites.encrypt_extracts(site_item.id) except TSC.ServerResponseError as e: Errors.exit_with_error(logger, e) logger.info(_("common.output.job_queued_success")) logger.debug("Extract re-encryption queued with JobID: {}".format(job.id))
def run_command(args): logger = log(__class__.__name__, args.logging_level) logger.debug(_("tabcmd.launching")) session = Session() server = session.create_session(args) try: logger.info(_("creategroup.status").format(args.name)) new_group = TSC.GroupItem(args.name) server.groups.create(new_group) logger.info(_("tabcmd.result.succeeded")) except TSC.ServerResponseError as e: if args.continue_if_exists and Errors.is_resource_conflict(e): logger.info(_("tabcmd.result.already_exists.group").format(args.name)) return Errors.exit_with_error(logger, "tabcmd.result.failed.create_group")
def run_command(args): logger = log(__class__.__name__, args.logging_level) logger.debug(_("tabcmd.launching")) session = Session() server = session.create_session(args) try: sites, pagination = server.sites.get() logger.info(_("listsites.status").format(session.username)) for site in sites: print("NAME:", site.name) print("SITEID:", site.content_url) if args.get_extract_encryption_mode: print("EXTRACTENCRYPTION:", site.extract_encryption_mode) print("") except TSC.ServerResponseError as e: Errors.exit_with_error(logger, e)
def run_command(args): logger = log(__class__.__name__, args.logging_level) logger.debug(_("tabcmd.launching")) session = Session() server = session.create_session(args) number_of_users_listed = 0 number_of_users_added = 0 number_of_errors = 0 creation_site = "current site" UserCommand.validate_file_for_import(args.filename, logger, detailed=True, strict=args.require_all_valid) logger.info( _("tabcmd.add.users.to_x").format(args.filename.name, creation_site)) user_obj_list = UserCommand.get_users_from_file(args.filename, logger) logger.info(_("session.monitorjob.percent_complete").format(0)) error_list = [] for user_obj in user_obj_list: try: number_of_users_listed += 1 result = server.users.add(user_obj) logger.info( _("tabcmd.result.success.create_user").format( user_obj.name)) number_of_users_added += 1 except TSC.ServerResponseError as e: number_of_errors += 1 error_list.append(e) logger.debug(e) logger.info(_("session.monitorjob.percent_complete").format(100)) logger.info( _("importcsvsummary.line.processed").format( number_of_users_listed)) logger.info( _("importcsvsummary.line.skipped").format(number_of_errors)) logger.info( _("importcsvsummary.users.added.count").format( number_of_users_added)) if number_of_errors > 0: logger.info(_("importcsvsummary.error.details").format(error_list))
def run_command(args): logger = log(__class__.__name__, args.logging_level) logger.debug(_("tabcmd.launching")) session = Session() server = session.create_session(args) parent_id = None readable_name = args.project_name if args.parent_project_path: try: logger.info( _("tabcmd.find.parent_project").format( args.parent_project_path)) parent = Server.get_project_by_name_and_parent_path( logger, server, None, args.parent_project_path) except TSC.ServerResponseError as exc: Errors.exit_with_error( logger, _("publish.errors.server_resource_not_found"), exc) readable_name = "{0}/{1}".format(args.parent_project_path, args.project_name) parent_id = parent.id logger.debug("parent project = `{0}`, id = {1}".format( args.parent_project_path, parent_id)) logger.info(_("createproject.status").format(readable_name)) new_project = TSC.ProjectItem(args.project_name, args.description, None, parent_id) try: project_item = server.projects.create(new_project) logger.info(_("common.output.succeeded")) return project_item except TSC.ServerResponseError as e: if Errors.is_resource_conflict(e): if args.continue_if_exists: logger.info( _("tabcmd.result.already_exists").format( args.project_name)) return else: Errors.exit_with_error( logger, _("tabcmd.result.already_exists").format( args.project_name)) Errors.exit_with_error( logger, _("publish.errors.unexpected_server_response"), e)
def run_command(args): logger = log(__class__.__name__, args.logging_level) logger.debug(_("tabcmd.launching")) session = Session() server = session.create_session(args) try: if args.datasource: logger.info(_("deleteextracts.for.datasource").format(args.datasource)) data_source_item = Server.get_data_source_item(logger, server, args.datasource) job = server.datasources.delete_extract(data_source_item) elif args.workbook: logger.info(_("deleteextracts.for.workbook_name").format(args.workbook)) workbook_item = Server.get_workbook_item(logger, server, args.workbook) job = server.workbooks.delete_extract(workbook_item) except TSC.ServerResponseError as e: Errors.exit_with_error(logger, _("deleteextracts.errors.error"), e) logger.info(_("common.output.job_queued_success")) logger.debug("Extract deletion queued with JobID: {}".format(job.id))
class ProjectsTest(unittest.TestCase): logger = log("Projects_Tests", "debug") @staticmethod def test_parent_path_to_list(): assert Server._parse_project_path_to_list(None) == [] assert Server._parse_project_path_to_list("") == [""] assert Server._parse_project_path_to_list("parent") == ["parent"] assert Server._parse_project_path_to_list("parent/child") == [ "parent", "child" ] @mock.patch("tableauserverclient.Server") def test_get_project(self, mock_server): mock_server.projects.get = getter Server.get_project_by_name_and_parent_path(mock.MagicMock(), mock_server, "random_name", "") getter.assert_called()
def run_command(args): logger = log(__class__.__name__, args.logging_level) logger.debug(_("tabcmd.launching")) session = Session() server = session.create_session(args) logger.info(_("deleteusers.status").format(args.filename.name)) UserCommand.validate_file_for_import(args.filename, logger, strict=args.require_all_valid) number_of_users_deleted = 0 number_of_errors = 0 user_obj_list = UserCommand.get_users_from_file(args.filename, logger) logger.debug("Users: {}".format(len(user_obj_list))) error_list = [] for user_obj in user_obj_list: logger.info( _("importcsvsummary.line.processed").format( number_of_users_deleted)) try: user_id = UserCommand.find_user_id(logger, server, user_obj.name) server.users.remove(user_id) logger.debug( _("tabcmd.result.success.delete_user").format( user_obj.name, user_id)) number_of_users_deleted += 1 except Exception as e: Errors.check_common_error_codes_and_explain(logger, e) number_of_errors += 1 error_list.append(e) logger.info( _("importcsvsummary.line.processed").format( number_of_users_deleted)) logger.info( _("importcsvsummary.errors.count").format(number_of_errors)) if number_of_errors > 0: logger.info(_("importcsvsummary.error.details").format(error_list))
def run_command(args): logger = log(__class__.__name__, args.logging_level) logger.debug(_("tabcmd.launching")) session = Session() server = session.create_session(args) view_content_url, wb_content_url = ExportCommand.parse_export_url_to_workbook_and_view(logger, args.url) logger.debug([view_content_url, wb_content_url]) if not view_content_url and not wb_content_url: Errors.exit_with_error(logger, _("export.errors.requires_workbook_view_param").format(ExportCommand)) try: if args.fullpdf: # it's a workbook workbook_item = ExportCommand.get_wb_by_content_url(logger, server, wb_content_url) output = ExportCommand.download_wb_pdf(server, workbook_item) default_filename = "{}.pdf".format(workbook_item.name) elif args.pdf or args.png or args.csv: # it's a view view_item = ExportCommand.get_view_by_content_url(logger, server, view_content_url) if args.pdf: output = ExportCommand.download_view_pdf(server, view_item) default_filename = "{}.pdf".format(view_item.name) elif args.csv: output = ExportCommand.download_csv(server, view_item) default_filename = "{}.csv".format(view_item.name) elif args.png: output = ExportCommand.download_png(server, view_item) default_filename = "{}.png".format(view_item.name) except Exception as e: Errors.exit_with_error(logger, _("publish.errors.unexpected_server_response").format(""), e) try: save_name = args.filename or default_filename ExportCommand.save_to_file(logger, output, save_name) except Exception as e: Errors.exit_with_error(logger, "Error saving to file", e)
def run_command(args): logger = log(__class__.__name__, args.logging_level) logger.debug(_("tabcmd.launching")) session = Session() server = session.create_session(args) site_item = Server.get_site_for_command_or_throw(logger, server, args) if args.url: site_item.content_url = args.url if args.user_quota: site_item.user_quota = args.user_quota if args.storage_quota: site_item.storage_quota = args.storage_quota if args.status: site_item.state = args.status try: logger.info(_("editsite.status").format(site_item.name)) server.sites.update(site_item) logger.info(_("common.output.succeeded")) except TSC.ServerResponseError as e: Errors.exit_with_error( logger, _("publish.errors.unexpected_server_response"), e)
def run_command(args): logger = log(__class__.__name__, args.logging_level) logger.debug(_("tabcmd.launching")) session = Session() server = session.create_session(args) creation_call = None try: logger.debug( "Extract params: encrypt={}, include_all={}, datasources={}". format(args.encrypt, args.include_all, args.embedded_datasources)) if args.datasource: data_source_item = Server.get_data_source_item( logger, server, args.datasource) logger.info( _("createextracts.for.datasource").format(args.datasource)) job = server.datasources.create_extract(data_source_item, encrypt=args.encrypt) elif args.workbook: workbook_item = Server.get_workbook_item( logger, server, args.workbook) logger.info( _("createextracts.for.workbook_name").format( args.workbook)) job = server.workbooks.create_extract( workbook_item, encrypt=args.encrypt, includeAll=args.include_all, datasources=args.embedded_datasources, ) except TSC.ServerResponseError as e: Errors.exit_with_error(logger, exception=e) logger.info(_("common.output.job_queued_success")) logger.debug("Extract creation queued with JobID: {}".format(job.id))
def run_command(args): logger = log(__class__.__name__, args.logging_level) logger.debug(_("tabcmd.launching")) session = Session() server = session.create_session(args) if args.project_name: try: project_id = Server.get_project_by_name_and_parent_path( logger, server, args.project_name, args.parent_project_path ) except Exception as exc: Errors.exit_with_error(logger, _("publish.errors.server_resource_not_found"), exc) else: project_id = "" args.project_name = "default" args.parent_project_path = "" publish_mode = PublishCommand.get_publish_mode(args) source = PublishCommand.get_filename_extension_if_tableau_type(logger, args.filename) logger.info(_("publish.status").format(args.filename)) if source in ["twbx", "twb"]: new_workbook = TSC.WorkbookItem(project_id, name=args.name, show_tabs=args.tabbed) try: new_workbook = server.workbooks.publish(new_workbook, args.filename, publish_mode) except IOError as ioe: Errors.exit_with_error(logger, ioe) logger.info(_("publish.success") + "\n{}".format(new_workbook.webpage_url)) elif source in ["tds", "tdsx", "hyper"]: new_datasource = TSC.DatasourceItem(project_id, name=args.name) try: new_datasource = server.datasources.publish(new_datasource, args.filename, publish_mode) except IOError as ioe: Errors.exit_with_error(logger, exc) logger.info(_("publish.success") + "\n{}".format(new_datasource.webpage_url))
def run_command(args): logger = log(__class__.__name__, args.logging_level) logger.debug(_("tabcmd.launching")) session = Session() session.create_session(args)
def run_command(args): logger = log(__class__.__name__, args.logging_level) logger.debug(_("tabcmd.launching")) session = Session() server = session.create_session(args) refresh_action = "refresh" if args.addcalculations or args.removecalculations: logger.warning( "Data Acceleration tasks are deprecated and this parameter has no effect." "It will be removed in a future update.") # are these two mandatory? mutually exclusive? # docs: the REST method always runs a full refresh even if the refresh type is set to incremental. if args.incremental: # docs: run the incremental refresh logger.warn( "Incremental refresh is not yet available through the new tabcmd" ) # if args.synchronous: # docs: run a full refresh and poll until it completes # else: run a full refresh but don't poll for completion container = None if args.project_name: try: container = Server.get_project_by_name_and_parent_path( logger, server, args.project_name, args.parent_project_path) except Exception as ex: logger.warning( "Could not find project {}/{}. Continuing without.".format( args.parent_project_path, args.project_name)) job = None try: # TODO: use the container in the search if args.datasource: logger.debug(_("export.status").format(args.datasource)) datasource_id = Server.get_data_source_id( logger, server, args.datasource, container) logger.info( _("refreshextracts.status_refreshed").format( _("content_type.datasource"), args.datasource)) job: TSC.JobItem = server.datasources.refresh(datasource_id) elif args.workbook: logger.debug(_("export.status").format(args.workbook)) workbook_id = Server.get_workbook_id(logger, server, args.workbook, container) logger.info( _("refreshextracts.status_refreshed").format( _("content_type.workbook"), args.workbook)) job: TSC.JobItem = server.workbooks.refresh(workbook_id) elif args.url: logger.error("URL not yet implemented") except TSC.ServerResponseError as e: Errors.exit_with_error(logger, _("refreshextracts.errors.error"), e) logger.info(_("common.output.job_queued_success")) if args.synchronous: # maintains a live connection to the server while the refresh operation is underway, polling every second # until the background job is done. <job id="JOB_ID" mode="MODE" type="RefreshExtract" /> logger.info("Waiting for refresh job to begin ....") try: polling2.poll( lambda: logger.info(".") and job.started_at is not None, step=1, timeout=args.timeout) except polling2.TimeoutException as te: Errors.exit_with_error(logger, _("messages.timeout_error.summary")) logger.info("Job started at {}".format(job.started_at)) try: polling2.poll( lambda: logger.info("{}".format(job.progress)) and job. finish_code != -1, step=1, timeout=args.timeout, ) logger.info("Job completed at {}".format(job.completed_at)) except polling2.TimeoutException as te: Errors.exit_with_error(logger, _("messages.timeout_error.summary")) else: logger.info(_("common.output.job_queued_success")) logger.debug("Extract refresh started with JobID: {0}".format( job.id))
def test_get_project(self): logger = log(__class__.__name__, "info") server = E2EServerTests.test_log_in() Server.get_project_by_name_and_parent_path(logger, server, "Default", None)
class UserDataTest(unittest.TestCase): logger = log("UserDataTest", "debug") role_inputs = [ ["creator", "system", "yes", "SiteAdministrator"], ["None", "system", "no", "SiteAdministrator"], ["explorer", "SysTEm", "no", "SiteAdministrator"], ["creator", "site", "yes", "SiteAdministratorCreator"], ["explorer", "site", "yes", "SiteAdministratorExplorer"], ["creator", "SITE", "no", "SiteAdministratorCreator"], ["creator", "none", "yes", "Creator"], ["explorer", "none", "yes", "ExplorerCanPublish"], ["viewer", "None", "no", "Viewer"], ["explorer", "no", "yes", "ExplorerCanPublish"], ["EXPLORER", "noNO", "yes", "ExplorerCanPublish"], ["explorer", "no", "no", "Explorer"], ["unlicensed", "none", "no", "Unlicensed"], ["Chef", "none", "yes", "Unlicensed"], ["yes", "yes", "yes", "Unlicensed"], ] valid_import_content = [ "username, pword, fname, creator, site, yes, email", "username, pword, fname, explorer, none, no, email", "", "u", "p", ] valid_username_content = ["*****@*****.**"] usernames = [ "valid", "*****@*****.**", "domain/valid", "domain/[email protected]", "va!@#$%^&*()lid", "in@v@lid", "in valid", "", ] def test_validate_usernames(self): UserCommand._validate_username_or_throw(UserDataTest.usernames[0]) UserCommand._validate_username_or_throw(UserDataTest.usernames[1]) UserCommand._validate_username_or_throw(UserDataTest.usernames[2]) UserCommand._validate_username_or_throw(UserDataTest.usernames[3]) UserCommand._validate_username_or_throw(UserDataTest.usernames[4]) with self.assertRaises(AttributeError): UserCommand._validate_username_or_throw(UserDataTest.usernames[5]) with self.assertRaises(AttributeError): UserCommand._validate_username_or_throw(UserDataTest.usernames[6]) def test_evaluate_role(self): for line in UserDataTest.role_inputs: actual = UserCommand.evaluate_site_role(line[0], line[1], line[2]) assert actual == line[3], line + [actual] def test_get_user_detail_empty_line(self): test_line = "" test_user = UserCommand._parse_line(test_line) assert test_user is None def test_get_user_detail_standard(self): test_line = "username, pword, fname, license, admin, pub, email" test_user: TSC.UserItem = UserCommand._parse_line(test_line) assert test_user.name == "username", test_user.name assert test_user.fullname == "fname", test_user.fullname assert test_user.site_role == "Unlicensed", test_user.site_role assert test_user.email == "email", test_user.email def test_get_user_details_only_username(self): test_line = "username" test_user: TSC.UserItem = UserCommand._parse_line(test_line) def test_populate_user_details_only_some(self): values = ["username", "", "", "creator", "admin"] data = Userdata() data.populate(values) def test_populate_user_details_all(self): values = UserDataTest.valid_import_content[0] data = Userdata() data.populate([values]) def test_validate_user_detail_standard(self): test_line = "username, pword, fname, creator, site, 1, email" UserCommand._validate_user_or_throw(test_line, UserDataTest.logger) # for file handling def _mock_file_content(self, content: List[str]) -> io.TextIOWrapper: # the empty string represents EOF # the tests run through the file twice, first to validate then to fetch mock = MagicMock(io.TextIOWrapper) content.append("") # EOF mock.readline.side_effect = content mock.name = "file-mock" return mock def test_get_users_from_file_missing_elements(self): bad_content = [ "username, pword, , yes, email", "username", "username, pword", "username, pword, , , yes, email", ] test_data = self._mock_file_content(bad_content) UserCommand.get_users_from_file(test_data) def test_validate_import_file(self): test_data = self._mock_file_content(UserDataTest.valid_import_content) num_lines = UserCommand.validate_file_for_import(test_data, UserDataTest.logger, detailed=True) assert num_lines == 2, "Expected two lines to be parsed, got {}".format( num_lines) def test_validate_usernames_file(self): test_data = self._mock_file_content(UserDataTest.usernames) n = UserCommand.validate_file_for_import(test_data, UserDataTest.logger) assert n == 5, "Exactly 5 of the lines were valid, counted {}".format( n) def test_validate_usernames_file_strict(self): test_data = self._mock_file_content(UserDataTest.usernames) with self.assertRaises(SystemExit): UserCommand.validate_file_for_import(test_data, UserDataTest.logger, strict=True) def test_get_usernames_from_file(self): test_data = self._mock_file_content(UserDataTest.usernames) user_list = UserCommand.get_users_from_file(test_data) assert user_list[0].name == "valid", user_list
def run_command(args): logger = log(__class__.__name__, args.logging_level) logger.debug(_("tabcmd.launching")) session = Session() session.end_session_and_clear_data()