def _run_task(self): # Check that path exists if not os.path.isdir(self.options["path"]): raise TaskOptionsError( "The path {} does not exist or is not a directory".format( self.options["path"])) # Check that revert_path does not exist if os.path.exists(self.options["revert_path"]): raise TaskOptionsError( "The revert_path {} already exists. Delete it and try again". format(self.options["revert_path"])) # Copy path to revert_path copy_tree(self.options["path"], self.options["revert_path"]) # Edit metadata in path self.logger.info( "Preparing metadata in {0} for unmanaged EE deployment".format( self.options["path"])) for element in self.elements: fname_match, element_name = element.split(":") remove_xml_element_directory(element_name, os.path.join(self.options["path"]), fname_match) self.logger.info( "Metadata in {} is now prepared for unmanaged EE deployment". format(self.options["path"]))
def _run_task(self): # Check that path exists if not os.path.isdir(self.options['path']): raise TaskOptionsError( 'The path {} does not exist or is not a directory'.format( self.options['path'], )) # Check that revert_path does not exist if os.path.exists(self.options['revert_path']): raise TaskOptionsError( 'The revert_path {} already exists. Delete it and try again'. format(self.options['revert_path'], )) # Copy path to revert_path copy_tree(self.options['path'], self.options['revert_path']) # Edit metadata in path self.logger.info( 'Removing the string {0} from {1}/classes and {1}/triggers'.format( self.managed_token, self.options['path'], )) findReplace(self.managed_token, '', os.path.join(self.options['path'], 'classes'), '*.cls', self.logger) findReplace(self.managed_token, '', os.path.join(self.options['path'], 'triggers'), '*.trigger', self.logger) self.logger.info( '{} has been stripped from all classes and triggers in {}'.format( self.managed_token, self.options['path'], ))
def _init_options(self, kwargs): super(DeleteData, self)._init_options(kwargs) # Split and trim objects string into a list if not already a list self.options["objects"] = process_list_arg(self.options["objects"]) if not len(self.options["objects"]) or not self.options["objects"][0]: raise TaskOptionsError("At least one object must be specified.") self.options["where"] = self.options.get("where", None) if len(self.options["objects"]) > 1 and self.options["where"]: raise TaskOptionsError( "Criteria cannot be specified if more than one object is specified." ) self.options["hardDelete"] = process_bool_arg(self.options.get("hardDelete")) self.options["ignore_row_errors"] = process_bool_arg( self.options.get("ignore_row_errors") ) self.options["inject_namespaces"] = process_bool_arg( self.options.get("inject_namespaces", True) ) try: self.options["api"] = { "bulk": DataApi.BULK, "rest": DataApi.REST, "smart": DataApi.SMART, }[self.options.get("api", "smart").lower()] except KeyError: raise TaskOptionsError( f"{self.options['api']} is not a valid value for API (valid: bulk, rest, smart)" ) if self.options["hardDelete"] and self.options["api"] is DataApi.REST: raise TaskOptionsError("The hardDelete option requires Bulk API.")
def _run_task(self): apex = "" apex_path = self.options.get("path") if apex_path: if not in_directory(apex_path, self.project_config.repo_root): raise TaskOptionsError( "Please specify a path inside your project repository. " "You specified: {}".format(apex_path) ) self.logger.info("Executing anonymous Apex from {}".format(apex_path)) try: with open(apex_path, "r", encoding="utf-8") as f: apex = f.read() except IOError: raise TaskOptionsError( "Could not find or read file: {}".format(apex_path) ) else: self.logger.info("Executing anonymous Apex") apex_string = self.options.get("apex") if apex_string: apex = apex + "\n" + apex_string apex = self._prepare_apex(apex) result = self.tooling._call_salesforce( method="GET", url="{}executeAnonymous".format(self.tooling.base_url), params={"anonymousBody": apex}, ) self._check_result(result) self.logger.info("Anonymous Apex Success")
def _init_options(self, kwargs): super(LoadData, self)._init_options(kwargs) self.options["ignore_row_errors"] = process_bool_arg( self.options.get("ignore_row_errors", False)) if self.options.get("database_url"): # prefer database_url if it's set self.options["sql_path"] = None elif self.options.get("sql_path"): self.options["sql_path"] = os_friendly_path( self.options["sql_path"]) self.options["database_url"] = None else: raise TaskOptionsError( "You must set either the database_url or sql_path option.") self.reset_oids = self.options.get("reset_oids", True) self.bulk_mode = (self.options.get("bulk_mode") and self.options.get("bulk_mode").title()) if self.bulk_mode and self.bulk_mode not in ["Serial", "Parallel"]: raise TaskOptionsError( "bulk_mode must be either Serial or Parallel") self.options["inject_namespaces"] = process_bool_arg( self.options.get("inject_namespaces", True)) self.options["drop_missing_schema"] = process_bool_arg( self.options.get("drop_missing_schema", False))
def _init_options(self, kwargs): super(RunApexTests, self)._init_options(kwargs) self.options["test_name_match"] = self.options.get( "test_name_match", self.project_config.project__test__name_match ) self.options["test_name_exclude"] = self.options.get( "test_name_exclude", self.project_config.project__test__name_exclude ) if self.options["test_name_exclude"] is None: self.options["test_name_exclude"] = "" self.options["namespace"] = self.options.get( "namespace", self.project_config.project__package__namespace ) self.options["junit_output"] = self.options.get( "junit_output", "test_results.xml" ) self.options["json_output"] = self.options.get( "json_output", "test_results.json" ) self.options["managed"] = process_bool_arg(self.options.get("managed", False)) self.options["retry_failures"] = process_list_arg( self.options.get("retry_failures", []) ) compiled_res = [] for regex in self.options["retry_failures"]: try: compiled_res.append(re.compile(regex)) except re.error as e: raise TaskOptionsError( "An invalid regular expression ({}) was provided ({})".format( regex, e ) ) self.options["retry_failures"] = compiled_res self.options["retry_always"] = process_bool_arg( self.options.get("retry_always", False) ) self.verbose = process_bool_arg(self.options.get("verbose", False)) self.counts = {} if "required_org_code_coverage_percent" in self.options: try: self.code_coverage_level = int( str(self.options["required_org_code_coverage_percent"]).rstrip("%") ) except ValueError: raise TaskOptionsError( f"Invalid code coverage level {self.options['required_org_code_coverage_percent']}" ) else: self.code_coverage_level = None
def _init_options(self, kwargs): super(Deploy, self)._init_options(kwargs) self.check_only = process_bool_arg( self.options.get("check_only", False)) self.test_level = self.options.get("test_level") if self.test_level and self.test_level not in [ "NoTestRun", "RunLocalTests", "RunAllTestsInOrg", "RunSpecifiedTests", ]: raise TaskOptionsError( f"Specified test run level {self.test_level} is not valid.") self.specified_tests = process_list_arg( self.options.get("specified_tests", [])) if bool(self.specified_tests) != (self.test_level == "RunSpecifiedTests"): raise TaskOptionsError( "The specified_tests option and test_level RunSpecifiedTests must be used together." ) self.options["namespace_inject"] = ( self.options.get("namespace_inject") or self.project_config.project__package__namespace)
def _init_options(self, kwargs): self.task_config.options["api_names"] = "dummy" super()._init_options(kwargs) self.options["overwrite"] = process_bool_arg( self.options.get("overwrite", False)) if (not isinstance(self.options.get("fields"), list) or len(self.options["fields"]) == 0): raise TaskOptionsError( "Please populate the fields field with a list of dictionaries containing at minimum one entry with an 'api_name' and 'help_text' keys" ) if not all(["api_name" in entry for entry in self.options["fields"]]): raise TaskOptionsError( "The 'api_name' key is required on all entry values.") if not all(["help_text" in entry for entry in self.options["fields"]]): raise TaskOptionsError( "The 'help_text' key is required on all entry values to declare what help text value to insert." ) self.api_name_list = defaultdict(list) for entry in process_list_arg(self.options["fields"]): try: obj, field = entry["api_name"].split(".") self.api_name_list[self._inject_namespace(obj)].append( (self._inject_namespace(field), entry["help_text"])) except ValueError: raise TaskOptionsError( f"api_name {entry} is not a valid Object.Field reference") self.api_names = set(self.api_name_list.keys())
def _init_options(self, kwargs): super(UpdateDependencies, self)._init_options(kwargs) self.options["purge_on_delete"] = process_bool_arg( self.options.get("purge_on_delete", True)) self.options["namespaced_org"] = process_bool_arg( self.options.get("namespaced_org", False)) self.options["include_beta"] = process_bool_arg( self.options.get("include_beta", False)) self.options["dependencies"] = ( self.options.get("dependencies") or self.project_config.project__dependencies) self.options["allow_newer"] = process_bool_arg( self.options.get("allow_newer", True)) self.options["allow_uninstalls"] = process_bool_arg( self.options.get("allow_uninstalls", False)) self.options["security_type"] = self.options.get( "security_type", "FULL") if self.options["security_type"] not in ("FULL", "NONE", "PUSH"): raise TaskOptionsError( f"Unsupported value for security_type: {self.options['security_type']}" ) if "ignore_dependencies" in self.options: if any("github" not in dep and "namespace" not in dep for dep in self.options["ignore_dependencies"]): raise TaskOptionsError( "An invalid dependency was specified for ignore_dependencies." )
def _init_options(self, kwargs): super()._init_options(kwargs) mapping_file = self.options.get("mapping", None) if mapping_file: self.mapping_file = os.path.abspath(mapping_file) if not os.path.exists(self.mapping_file): raise TaskOptionsError(f"{self.mapping_file} cannot be found.") else: self.mapping_file = None self.database_url = self.options.get("database_url") self.num_records = int(self.options["num_records"]) self.batch_size = int(self.options.get("batch_size", self.num_records)) if self.batch_size <= 0: raise TaskOptionsError("Batch size should be greater than zero") class_path = self.options.get("data_generation_task") if class_path: self.data_generation_task = import_global(class_path) else: raise TaskOptionsError("No data generation task specified") self.debug_dir = self.options.get("debug_dir", None) self.database_url = self.options.get("database_url") if self.database_url: engine, metadata = self._setup_engine(self.database_url) tables = metadata.tables if len(list(tables)) and not self.options.get("replace_database"): raise TaskOptionsError( f"Database {self.database_url} has tables " f"({list(tables)}) " "but `replace_database` was not specified")
def _init_options(self, kwargs): super(UpdateDependencies, self)._init_options(kwargs) self.options["purge_on_delete"] = process_bool_arg( self.options.get("purge_on_delete", True)) self.options["include_beta"] = process_bool_arg( self.options.get("include_beta", False)) self.options["dependencies"] = ( self.options.get("dependencies") or self.project_config.project__dependencies) self.options["allow_newer"] = process_bool_arg( self.options.get("allow_newer", True)) self.options["allow_uninstalls"] = process_bool_arg( self.options.get("allow_uninstalls", False)) self.options["security_type"] = self.options.get( "security_type", "FULL") if self.options["security_type"] not in ("FULL", "NONE", "PUSH"): raise TaskOptionsError( f"Unsupported value for security_type: {self.options['security_type']}" ) if "ignore_dependencies" in self.options: if any("github" not in dep and "namespace" not in dep for dep in self.options["ignore_dependencies"]): raise TaskOptionsError( "An invalid dependency was specified for ignore_dependencies." ) if (self.org_config and self.options["include_beta"] and not self.org_config.scratch): self.logger.warning( "The `include_beta` option is enabled but this not a scratch org.\n" "Setting `include_beta` to False to avoid installing beta package versions in a persistent org." ) self.options["include_beta"] = False
def _init_options(self, kwargs): self.client_id = None self.client_secret = None kwargs["command"] = "force:mdapi:deploy --wait {}".format( self.deploy_wait) super(CreateConnectedApp, self)._init_options(kwargs) # Validate label if not re.match(r"^\w+$", self.options["label"]): raise TaskOptionsError( "label value must contain only alphanumeric or underscore characters" ) # Default email to the github service's email if configured if "email" not in self.options: try: github = self.project_config.keychain.get_service("github") except ServiceNotConfigured: raise TaskOptionsError( "Could not determine a default for option 'email'. Either configure the github service using 'cci service connect github' or provide a value for the 'email' option" ) self.options["email"] = github.email self.options["connect"] = process_bool_arg( self.options.get("connect") or False) self.options["overwrite"] = process_bool_arg( self.options.get("overwrite") or False)
def _process_renames(self, renamed_paths): """ For each entry in renames, any renames and store them in self.local_to_target_paths. """ if not renamed_paths: return {} is_list_of_dicts = all( isinstance(pair, dict) for pair in renamed_paths) dicts_have_correct_keys = is_list_of_dicts and all( {"local", "target"} == pair.keys() for pair in renamed_paths) ERROR_MSG = ( "Renamed paths must be a list of dicts with `local:` and `target:` keys." ) if not dicts_have_correct_keys: raise TaskOptionsError(ERROR_MSG) local_to_target_paths = {} for rename in renamed_paths: local_path = rename.get("local") target_path = rename.get("target") if local_path and target_path: local_to_target_paths[local_path] = target_path else: raise TaskOptionsError(ERROR_MSG) return local_to_target_paths
def _get_or_create_package(self, package_config: PackageConfig): """Find or create the Package2 Checks the Dev Hub for an existing, non-deprecated 2GP package with matching name, type, and namespace. """ message = f"Checking for existing {package_config.package_type} Package named {package_config.package_name}" query = ( f"SELECT Id, ContainerOptions FROM Package2 WHERE IsDeprecated = FALSE " f"AND ContainerOptions='{package_config.package_type}' " f"AND IsOrgDependent={package_config.org_dependent} " f"AND Name='{package_config.package_name}'") if package_config.namespace: query += f" AND NamespacePrefix='{package_config.namespace}'" message += f" with namespace {package_config.namespace}" else: query += " AND NamespacePrefix=null" self.logger.info(message) try: res = self.tooling.query(query) except SalesforceMalformedRequest as err: if "Object type 'Package2' is not supported" in err.content[0][ "message"]: raise TaskOptionsError( "This org does not have a Dev Hub with 2nd-generation packaging enabled. " "Make sure you are using the correct org and/or check the Dev Hub settings in Setup." ) raise # pragma: no cover if res["size"] > 1: raise TaskOptionsError( f"Found {res['size']} packages with the same name, namespace, and package_type" ) if res["size"] == 1: existing_package = res["records"][0] if existing_package[ "ContainerOptions"] != package_config.package_type: raise PackageUploadFailure( f"Duplicate Package: {existing_package['ContainerOptions']} package with id " f"{ existing_package['Id']} has the same name ({package_config.package_name}) " "for this namespace but has a different package type") package_id = existing_package["Id"] self.logger.info(f"Found {package_id}") return package_id self.logger.info("No existing package found, creating the package") Package2 = self._get_tooling_object("Package2") package = Package2.create({ "ContainerOptions": package_config.package_type, "IsOrgDependent": package_config.org_dependent, "Name": package_config.package_name, "Description": package_config.description, "NamespacePrefix": package_config.namespace, }) return package["id"]
def _init_options(self, kwargs): super(LoadData, self)._init_options(kwargs) if self.options.get("sql_path"): if self.options.get("database_url"): raise TaskOptionsError( "The database_url option is set dynamically with the sql_path option. Please unset the database_url option." ) self.options["sql_path"] = os_friendly_path( self.options["sql_path"]) if not os.path.isfile(self.options["sql_path"]): raise TaskOptionsError("File {} does not exist".format( self.options["sql_path"])) self.logger.info("Using in-memory sqlite database") self.options["database_url"] = "sqlite://"
def _init_options(self, kwargs): super(DeleteData, self)._init_options(kwargs) # Split and trim objects string into a list if not already a list self.options["objects"] = process_list_arg(self.options["objects"]) if not len(self.options["objects"]) or not self.options["objects"][0]: raise TaskOptionsError("At least one object must be specified.") self.options["where"] = self.options.get("where", None) if len(self.options["objects"]) > 1 and self.options["where"]: raise TaskOptionsError( "Criteria cannot be specified if more than one object is specified." ) self.options["hardDelete"] = process_bool_arg(self.options.get("hardDelete"))
def _init_options(self, kwargs): super(EnsureRecordTypes, self)._init_options(kwargs) self.options["generate_business_process"] = False # Validate developer name if not re.match(r"^\w+$", self.options["record_type_developer_name"]): raise TaskOptionsError( "Record Type Developer Name value must contain only alphanumeric or underscore characters" ) # We don't currently support standard objects if self.options["sobject"].endswith("__c"): raise TaskOptionsError( "EnsureRecordTypes does not support custom objects")
def _get_test_classes(self): if self.options['managed']: namespace = self.options.get('namespace') if not namespace: raise TaskOptionsError( 'Running tests in managed mode but no namespace available.' ) namespace = "'{}'".format(namespace) else: namespace = 'null' # Split by commas to allow multiple class name matching options test_name_match = self.options['test_name_match'] included_tests = [] for pattern in test_name_match.split(','): if pattern: included_tests.append("Name LIKE '{}'".format(pattern)) # Add any excludes to the where clause test_name_exclude = self.options.get('test_name_exclude', '') excluded_tests = [] for pattern in test_name_exclude.split(','): if pattern: excluded_tests.append("(NOT Name LIKE '{}')".format(pattern)) # Get all test classes for namespace query = ('SELECT Id, Name FROM ApexClass ' + 'WHERE NamespacePrefix = {}'.format(namespace)) if included_tests: query += ' AND ({})'.format(' OR '.join(included_tests)) if excluded_tests: query += ' AND {}'.format(' AND '.join(excluded_tests)) # Run the query self.logger.info('Running query: {}'.format(query)) result = self.tooling.query_all(query) self.logger.info('Found {} test classes'.format(result['totalSize'])) return result
def _run_task(self): if not self.options["dependencies"]: self.logger.info("Project has no dependencies, doing nothing") return if self.options["include_beta"] and not isinstance( self.org_config, ScratchOrgConfig): raise TaskOptionsError( "Target org must be a scratch org when `include_beta` is true." ) self.logger.info("Preparing static dependencies map") dependencies = self.project_config.get_static_dependencies( self.options["dependencies"], include_beta=self.options["include_beta"]) self.installed = None self.uninstall_queue = [] self.install_queue = [] self.logger.info("Dependencies:") for line in self.project_config.pretty_dependencies(dependencies): self.logger.info(line) self._process_dependencies(dependencies) # Reverse the uninstall queue self.uninstall_queue.reverse() self._uninstall_dependencies() self._install_dependencies()
def _init_mapping(self): """Load a YAML mapping file.""" mapping_file_path = self.options["mapping"] if not mapping_file_path: raise TaskOptionsError("Mapping file path required") self.mapping = parse_from_yaml(mapping_file_path)
def _init_options(self, kwargs): super(InstallPackageVersion, self)._init_options(kwargs) if "namespace" not in self.options: self.options["namespace"] = self.project_config.project__package__namespace if "name" not in self.options: self.options["name"] = ( self.project_config.project__package__name_managed or self.project_config.project__package__name or self.options["namespace"] ) if "retries" not in self.options: self.options["retries"] = 5 if "retry_interval" not in self.options: self.options["retry_interval"] = 5 if "retry_interval_add" not in self.options: self.options["retry_interval_add"] = 30 version = self.options.get("version") if version == "latest": self.options["version"] = self.project_config.get_latest_version() elif version == "latest_beta": self.options["version"] = self.project_config.get_latest_version(beta=True) elif version == "previous": self.options["version"] = self.project_config.get_previous_version() self.options["activateRSS"] = process_bool_arg(self.options.get("activateRSS")) self.options["security_type"] = self.options.get("security_type", "FULL") if self.options["security_type"] not in ("FULL", "NONE", "PUSH"): raise TaskOptionsError( f"Unsupported value for security_type: {self.options['security_type']}" )
def _validate_options(self): super(RetrieveReportsAndDashboards, self)._validate_options() if (not "report_folders" in self.options and not "dashboard_folders" in self.options): raise TaskOptionsError( "You must provide at least one folder name for either report_folders or dashboard_folders" )
def _process_devhub_output(self, output): data = self._process_json_output(output) if "value" not in data["result"][0]: raise TaskOptionsError( "No sfdx config found for defaultdevhubusername. Please use the sfdx force:config:set to set the defaultdevhubusername and run again" ) self.options["username"] = data["result"][0]["value"]
def _process_dependencies(self, dependencies): for dependency in dependencies: # Process child dependencies dependency_uninstalled = False subdependencies = dependency.get("dependencies") if subdependencies: count_uninstall = len(self.uninstall_queue) self._process_dependencies(subdependencies) if count_uninstall != len(self.uninstall_queue): dependency_uninstalled = True # Process namespace dependencies (managed packages) if "namespace" in dependency: self._process_namespace_dependency(dependency, dependency_uninstalled) else: # zip_url or repo dependency self.install_queue.append(dependency) if self.uninstall_queue and not self.options["allow_uninstalls"]: raise TaskOptionsError( "Updating dependencies would require uninstalling these packages " "but uninstalls are not enabled: {}".format( ", ".join(dep["namespace"] for dep in self.uninstall_queue) ) )
def _validate_options(self): super(RobotLibDoc, self)._validate_options() self.options["path"] = process_glob_list_arg(self.options["path"]) # Attempt to collect all files that don't match existing # files. Note: "path" could be a library module path (for example, # cumulusci.robotframework.CumulusCI) so we only do this check for # files that end in known library suffixes (.py, .robot, .resource). bad_files = [] for path in self.options["path"]: name, extension = os.path.splitext(path) if extension in (".py", ".robot", ".resource") and not os.path.exists(path): bad_files.append(path) if bad_files: if len(bad_files) == 1: error_message = "Unable to find the input file '{}'".format( bad_files[0]) else: files = ", ".join( ["'{}'".format(filename) for filename in bad_files]) error_message = "Unable to find the following input files: {}".format( files) raise TaskOptionsError(error_message)
def _set_record_types(self): record_types = self.options.get('record_types') if not record_types: return # Set recordTypeVisibilities for rt in record_types: # Replace namespace prefix tokens in rt name rt_prefixed = rt['record_type'].format(**self.namespace_prefixes) # Look for the recordTypeVisiblities element xpath = ".//sf:recordTypeVisibilities[sf:recordType='{}']".format(rt_prefixed) elem = self.tree.find(xpath, self.namespaces) if elem is None: raise TaskOptionsError( "Record Type {} not found in retrieved Admin.profile".format(rt['record_type']) ) # Set visibile elem.find("sf:visible", self.namespaces).text = str(rt.get("visible", "true")) # Set default elem.find("sf:default", self.namespaces).text = str(rt.get("default", "false")) # Set person account default if element exists pa_default = elem.find("sf:personAccountDefault", self.namespaces) if pa_default is not None: pa_default.text = str(rt.get("person_account_default", "false"))
def _set_record_types(self, tree, api_name): record_types = self.options.get("record_types") or [] # If defaults are specified, # clear any pre-existing defaults if any("default" in rt for rt in record_types): for default in ("default", "personAccountDefault"): for elem in tree.findall("recordTypeVisibilities"): if elem.find(default): elem.find(default).text = "false" # Set recordTypeVisibilities for rt in record_types: # Replace namespace prefix tokens in rt name rt_prefixed = rt["record_type"].format(**self.namespace_prefixes) # Look for the recordTypeVisiblities element elem = tree.find("recordTypeVisibilities", recordType=rt_prefixed) if elem is None: raise TaskOptionsError( f"Record Type {rt['record_type']} not found in retrieved {api_name}.profile" ) # Set visible elem.visible.text = str(rt.get("visible", "true")).lower() # Set default elem.default.text = str(rt.get("default", "false")).lower() # Set person account default if element exists pa_default = elem.find("personAccountDefault") if pa_default is not None: pa_default.text = str(rt.get("person_account_default", "false")).lower()
def _get_test_classes(self): if self.options["managed"]: namespace = self.options.get("namespace") if not namespace: raise TaskOptionsError( "Running tests in managed mode but no namespace available." ) namespace = "'{}'".format(namespace) else: namespace = "null" # Split by commas to allow multiple class name matching options test_name_match = self.options["test_name_match"] included_tests = [] for pattern in test_name_match.split(","): if pattern: included_tests.append("Name LIKE '{}'".format(pattern)) # Add any excludes to the where clause test_name_exclude = self.options.get("test_name_exclude", "") excluded_tests = [] for pattern in test_name_exclude.split(","): if pattern: excluded_tests.append("(NOT Name LIKE '{}')".format(pattern)) # Get all test classes for namespace query = "SELECT Id, Name FROM ApexClass " + "WHERE NamespacePrefix = {}".format( namespace) if included_tests: query += " AND ({})".format(" OR ".join(included_tests)) if excluded_tests: query += " AND {}".format(" AND ".join(excluded_tests)) # Run the query self.logger.info("Running query: {}".format(query)) result = self.tooling.query_all(query) self.logger.info("Found {} test classes".format(result["totalSize"])) return result
def _init_task(self): super(Publish, self)._init_task() self.dry_run = self.options.get("dry_run") self.publish = not self.dry_run and process_bool_arg( self.options.get("publish", False) ) self.tag = self.options.get("tag") self.commit = self.options.get("commit") if not self.tag and not self.commit: raise TaskOptionsError("You must specify either the tag or commit option.") self.labels_path = self.options.get("labels_path", "metadeploy") if not os.path.exists(self.labels_path): # pragma: no cover os.makedirs(self.labels_path) plan_name = self.options.get("plan") if plan_name: plan_configs = {} plan_configs[plan_name] = getattr( self.project_config, "plans__{}".format(plan_name) ) self.plan_configs = plan_configs else: self.plan_configs = self.project_config.plans self._load_labels()
def _validate_options(self): super(AnonymousApexTask, self)._validate_options() if not self.options.get("path") and not self.options.get("apex"): raise TaskOptionsError( "You must specify either the `path` or `apex` option." )