コード例 #1
0
ファイル: lint.py プロジェクト: sdelements/sdetools
class Command(BaseCommand):
    name = 'lint'
    help = 'SDE Lint tool scans project file and displays tasks that match the context of each file.'
    conf_syntax = 'target1 [target2 ...]'
    conf_help = 'target(s) are the directory/file to be scanned.'

    def configure(self):
        self.scanner = Scanner(self.config)
        self.plugin = PlugInExperience(self.config)

    def process_args(self):
        if self.args:
            targets = self.args
        else:
            targets = self.config['args']
        err_reason = self.scanner.set_targets(targets)
        if err_reason:
            raise UsageError(err_reason)
        return True

    def handle(self):
        content = self.plugin.get_compiled_task_list()

        self.scanner.set_content(content)
        self.scanner.scan()
コード例 #2
0
    def __init__(self, config, alm_plugin):
        """  Initialization of the Connector

        Keyword arguments:
        sde_plugin -- An SD Elements Plugin configuration object
        alm_plugin -- A plugin to connect to the ALM tool
        """
        self.config = config
        self.ignored_tasks = []
        self.sde_plugin = PlugInExperience(self.config)
        self.alm_plugin = alm_plugin
        self._add_alm_config_options()
        self.emit = self.config.emit

        self.has_migrated = False
        self.migration_required = False
コード例 #3
0
    def __init__(self, config, tool_name, supported_input_types={}):
        self.findings = []
        self.mapping = {}
        self.report_id = "Not specified"
        self.config = config
        self.emit = self.config.emit
        self.behaviour = 'replace'
        self.weakness_map_identifier = 'ID'  # default XML attribute with weakness identifier
        self.weakness_title = {}
        self.confidence = {}
        self.task_mapping = None
        self.taskstatuses = {}
        self.plugin = PlugInExperience(self.config)
        self.supported_input_types = supported_input_types

        if supported_input_types and 'file' in supported_input_types:
            # 'report_file' is required if the tool only supports files
            report_file_mandatory = len(supported_input_types.keys()) == 1

            self.config.opts.add(
                "report_file",
                "Common separated list of %s Report Files" % tool_name.capitalize(),
                "x", default=None if report_file_mandatory else ''
            )
            self.config.opts.add(
                "report_type",
                "%s Report Type: %s|auto" % (tool_name.capitalize(), ', '.join(supported_input_types['file'])),
                default="auto"
            )
        self.config.opts.add(
            "mapping_file",
            "Task ID -> Weakness mapping (default=%s)" % self.get_default_mapping_file(),
            "m", ''
        )
        self.config.opts.add(
            "import_behaviour",
            "One of the following: %s" % ', '.join(self.VALID_IMPORT_BEHAVIOUR),
            default="replace"
        )
        self.config.opts.add(
            'task_status_mapping',
            'Update task status based on verification. Provide a mapping of (%s) to a task status slug'
            '(JSON encoded dictionary of strings)' % ', '.join(self.VALID_VERIFICATION_MAP.keys()),
            default=''
        )
        self.config.opts.add("flaws_only", "Only update tasks identified having flaws. (True | False)", "z", "False")
        self.config.opts.add("trial_run", "Trial run only: (True | False)", "t", "False")
コード例 #4
0
ファイル: modify_notes.py プロジェクト: sdelements/sdetools
class Command(BaseCommand):
    help = 'Search and replace text in task notes'
    sde_plugin = None

    def configure(self):
        self.sde_plugin = PlugInExperience(self.config)

        self.config.opts.add('search_string', "Search string to find in a task note")
        self.config.opts.add('replace_string', "Replacement string")

    def sde_connect(self):
        if not self.sde_plugin:
            raise Error('Requires initialization')
        try:
            self.sde_plugin.connect()
        except APIError as err:
            raise Error('Unable to connect to SD Elements. Please review URL, id,'
                        ' and password in configuration file. Reason: %s' % (str(err)))

    def handle(self):
        if not self.config['search_string']:
            raise UsageError('Missing value for search_string')
        if not self.config['replace_string']:
            raise UsageError('Missing value for replace_string')

        self.sde_connect()
        tasks = self.sde_plugin.get_task_list()
        for task in tasks:
            task_notes = self.sde_plugin.get_task_notes(AlmConnector._extract_task_id(task['id']))
            if 'ide' in task_notes:
                for note in task_notes['ide']:
                    new_note_text = re.sub(self.config['search_string'], self.config['replace_string'], note['text'])
                    if new_note_text != note['text']:
                        self.sde_plugin.update_task_ide_note(note['id'], new_note_text)
            if 'text' in task_notes:
                for note in task_notes['text']:
                    new_note_text = re.sub(self.config['search_string'], self.config['replace_string'], note['text'])
                    if new_note_text != note['text']:
                        self.sde_plugin.update_task_text_note(note['id'], new_note_text)

        return True
コード例 #5
0
ファイル: lint.py プロジェクト: sdelements/sdetools
 def configure(self):
     self.scanner = Scanner(self.config)
     self.plugin = PlugInExperience(self.config)
コード例 #6
0
class BaseIntegrator(object):
    # Subclasses override these attributes
    TOOL_NAME = 'External tool'
    DEFAULT_MAPPING_FILE = None

    AVAILABLE_IMPORTERS = []  # Subclasses must fill this list
    VALID_IMPORT_BEHAVIOUR = ['replace', 'replace-scanner', 'combine']

    # An internal map of possible verification and acceptable status meanings
    VALID_VERIFICATION_MAP = {'pass': ['TODO', 'DONE'], 'partial': ['TODO', 'DONE'], 'fail': ['TODO']}

    def __init__(self, config, tool_name, supported_input_types={}):
        self.findings = []
        self.mapping = {}
        self.report_id = "Not specified"
        self.config = config
        self.emit = self.config.emit
        self.behaviour = 'replace'
        self.weakness_map_identifier = 'ID'  # default XML attribute with weakness identifier
        self.weakness_title = {}
        self.confidence = {}
        self.task_mapping = None
        self.taskstatuses = {}
        self.plugin = PlugInExperience(self.config)
        self.supported_input_types = supported_input_types

        if supported_input_types and 'file' in supported_input_types:
            # 'report_file' is required if the tool only supports files
            report_file_mandatory = len(supported_input_types.keys()) == 1

            self.config.opts.add(
                "report_file",
                "Common separated list of %s Report Files" % tool_name.capitalize(),
                "x", default=None if report_file_mandatory else ''
            )
            self.config.opts.add(
                "report_type",
                "%s Report Type: %s|auto" % (tool_name.capitalize(), ', '.join(supported_input_types['file'])),
                default="auto"
            )
        self.config.opts.add(
            "mapping_file",
            "Task ID -> Weakness mapping (default=%s)" % self.get_default_mapping_file(),
            "m", ''
        )
        self.config.opts.add(
            "import_behaviour",
            "One of the following: %s" % ', '.join(self.VALID_IMPORT_BEHAVIOUR),
            default="replace"
        )
        self.config.opts.add(
            'task_status_mapping',
            'Update task status based on verification. Provide a mapping of (%s) to a task status slug'
            '(JSON encoded dictionary of strings)' % ', '.join(self.VALID_VERIFICATION_MAP.keys()),
            default=''
        )
        self.config.opts.add("flaws_only", "Only update tasks identified having flaws. (True | False)", "z", "False")
        self.config.opts.add("trial_run", "Trial run only: (True | False)", "t", "False")

    def initialize(self):
        """
        This is a post init initialization. It needs to be called as the first
        function after configuration is processed (usually first call inside handler of
        the module)
        """
        self.config.process_boolean_config('flaws_only')
        self.config.process_boolean_config('trial_run')
        self.config.process_json_str_dict('task_status_mapping')

        if self.config['import_behaviour'] in self.VALID_IMPORT_BEHAVIOUR:
            self.behaviour = self.config['import_behaviour']
        else:
            raise UsageError('Invalid import_behaviour %s' % self.config['import_behaviour'])

        if self.config['task_status_mapping']:
            # Get the available system task statuses and their meanings
            self._setup_taskstatuses()

            # Validate the mapping against the available system statuses
            # Sanity check the mapping
            #     - pass, partial may mark tasks with a status having meaning TODO or DONE only
            #     - fail may mark tasks with a status having meaning TODO only.
            for verification, status_name in self.config['task_status_mapping'].iteritems():
                if verification not in self.VALID_VERIFICATION_MAP:
                    raise UsageError('Invalid task_status_mapping verification %s' % verification)

                if status_name not in self.taskstatuses:
                    raise UsageError('Invalid task_status_mapping status "%s" for verification "%s"' %
                                     (status_name, verification))

                if self.taskstatuses[status_name]['meaning'] not in self.VALID_VERIFICATION_MAP[verification]:
                        raise UsageError('Unexpected task_status_mapping status "%s" for verification "%s"' %
                                         (status_name, verification))

        # Validate the report_type config. If report_type is not auto, we will process only
        # the specified report_type, else we process all supported file types.
        if 'file' in self.supported_input_types:
            if self.config['report_type'] in self.supported_input_types['file']:
                self.supported_input_types['file'] = [self.config['report_type']]
            elif self.config['report_type'] != 'auto':
                raise UsageError('Invalid report_type %s' % self.config['report_type'])

            self.process_report_file_config()

    def get_default_mapping_file(self):
        return get_media_file_path(os.path.join('analysis', self.DEFAULT_MAPPING_FILE))

    def _setup_taskstatuses(self):
        statuses = self.plugin.get_taskstatuses()
        for status in statuses:
            self.taskstatuses[status['slug']] = status

    def detect_importer(self, report_file):
        for item in self.AVAILABLE_IMPORTERS:
            if item['importer'].can_parse_file(report_file):
                return item['importer']
        return None

    @staticmethod
    def _get_file_extension(file_path):
        return os.path.splitext(file_path)[1][1:]

    @abstractmethod
    def parse_report_file(self, report_file, report_type):
        """ Returns the raw findings and the report id for a single report file """

        return [], None

    def set_tool_name(self, tool_name):
        self.TOOL_NAME = tool_name

    def parse(self):
        _raw_findings = []
        _report_ids = []

        for report_file in self.config['report_file']:
            if self.config['report_type'] == 'auto':
                if not isinstance(report_file, basestring):
                    raise UsageError("On auto-detect mode, the file name needs to be specified.")
                report_type = self._get_file_extension(report_file)
            else:
                report_type = self.config['report_type']

            raw_findings, report_id = self.parse_report_file(report_file, report_type)

            _raw_findings.extend(raw_findings)

            if report_id:
                _report_ids.append(report_id)

        self.findings = _raw_findings

        if _report_ids:
            self.report_id = ', '.join(_report_ids)
        else:
            self.report_id = "Not specified"
            self.emit.info("Report ID not found in report: Using default.")

    def process_report_file_config(self):
        """
        If report files contains a directory path, find all possible files in that folder
        """
        if not self.config['report_file']:
            raise UsageError("Missing configuration option 'report_file'")

        if not isinstance(self.config['report_file'], basestring):
            # Should be a file object
            self.config['report_file'] = [self.config['report_file']]
        else:
            processed_report_files = []

            for file_path in self.config['report_file'].split(','):
                file_path = file_path.strip()
                file_name, file_ext = os.path.splitext(file_path)
                file_ext = file_ext[1:]

                if file_ext in self.supported_input_types['file']:
                    processed_report_files.extend(glob.glob(file_path))
                elif re.search('[*?]', file_ext):
                    # Run the glob and filter out unsupported file types
                    processed_report_files.extend([f for f in glob.iglob(file_path)
                                                  if self._get_file_extension(f) in self.supported_input_types['file']])
                elif not file_ext:
                    # Glob using our supported file types
                    if os.path.isdir(file_path):
                        _base_path = file_path + '/*'
                    else:
                        _base_path = file_name
                    for file_type in self.supported_input_types['file']:
                        processed_report_files.extend(glob.glob('%s.%s' % (_base_path, file_type)))
                else:
                    raise UsageError('%s does not match any supported file type(s): %s' %
                                     (file_path, self.supported_input_types['file']))
            if not processed_report_files:
                raise UsageError("Did not find any report files. Check if 'report_file' is configured properly.")
            else:
                self.config['report_file'] = processed_report_files

    def load_mapping_from_xml(self):

        self.task_mapping = Mapping(self.weakness_map_identifier)

        mapping_file = self.config['mapping_file'] or self.get_default_mapping_file()
        self.task_mapping.load_mapping(mapping_file)

        if not self.task_mapping.count():
            raise IntegrationError("No base mapping was found in file '%s'" % self.config['mapping_file'])

    def generate_findings(self):
        return []

    def unique_findings(self):
        """
        Return a map (task_id=> *flaw) based on list of findings (weakness)

        Where flaw is defined as:
            flaw[weaknesses]
            flaw[related_tasks]
        """
        unique_findings = {'nomap': []}
        for finding in self.generate_findings():
            weakness_id = finding['weakness_id']
            mapped_tasks = self.task_mapping.get_tasks(weakness_id)
            if not mapped_tasks:
                unique_findings['nomap'].append(weakness_id)
                continue
            for mapped_task in mapped_tasks:
                if mapped_task['id'] in unique_findings:
                    flaws = unique_findings[mapped_task['id']]
                else:
                    flaws = {'weaknesses': []}
                flaws['weaknesses'].append(finding)
                flaws['related_tasks'] = mapped_tasks
                unique_findings[mapped_task['id']] = flaws
        return unique_findings

    def task_exists_in_project_tasks(self, task_id, project_tasks):
        """
        Return True if task_id is present in the array of project_tasks, False otherwise

        task_id is of the form [^\d]+\d+
        project_tasks is an array of maps. Each map contains a key 'id' with a corresponding integer value
        """
        for task in project_tasks:
            task_search = re.search('^(\d+)-([^\d]+\d+)$', task['id'])
            if task_search:
                project_task_id = task_search.group(2)
                if project_task_id == task_id:
                    return True
        return False

    def import_findings(self):
        stats_failures_added = 0
        stats_api_errors = 0
        stats_total_skips = 0
        stats_total_skips_findings = 0
        stats_total_flaws_found = 0
        import_start_datetime = datetime.now()

        logger.info("Integration underway for: %s" % self.report_id)
        logger.info("Mapped SD application/project: %s/%s" %
                    (self.config['sde_application'], self.config['sde_project']))

        if self.config['trial_run']:
            logger.info("Trial run only. No changes will be made")
        else:
            ret = self.plugin.create_analysis_session(self.report_id, self.TOOL_NAME)
            project_analysis_note_ref = ret['id']

        task_list = self.plugin.get_task_list()
        logger.debug("Retrieved %d tasks from %s/%s/%s" % (
                     len(task_list),
                     self.config['sde_businessunit'],
                     self.config['sde_application'],
                     self.config['sde_project']))

        unique_findings = self.unique_findings()
        missing_weakness_map = unique_findings['nomap']
        del unique_findings['nomap']

        task_ids = sorted(unique_findings.iterkeys())

        # Remap a finding to a different task (T193) if it maps to a task not found in the project
        for task_id in task_ids:
            finding = unique_findings[task_id]

            if not self.task_exists_in_project_tasks(task_id, task_list):
                logger.debug("Task %s not found in project tasks" % task_id)
                mapped_tasks = self.task_mapping.get_tasks("*")

                if not mapped_tasks:
                    continue

                new_task = mapped_tasks[0]  # use the first one

                if task_id == new_task['id']:
                    continue

                logger.info("Task %s was not found in the project, mapping it to the default task %s." %
                            (task_id, new_task['id']))

                if new_task['id'] not in unique_findings:
                    unique_findings[new_task['id']] = finding
                else:
                    for weakness in finding['weaknesses']:
                        unique_findings[new_task['id']]['weaknesses'].append(weakness)
                del unique_findings[task_id]

        task_ids = sorted(unique_findings.iterkeys())

        # Update the tasks' verification status for failure
        for task_id in task_ids:
            finding = unique_findings[task_id]

            stats_total_flaws_found += len(finding['weaknesses'])

            if not self.task_exists_in_project_tasks(task_id, task_list):
                logger.error("Task %s was not found in the project, skipping %d findings." %
                             (task_id, len(finding['weaknesses'])))
                stats_total_skips += 1
                stats_total_skips_findings += len(finding['weaknesses'])
                continue

            analysis_findings = []
            last_weakness = None
            weakness_finding = {}

            # Build the list of structured findings needed by the API
            for weakness in sorted(finding['weaknesses']):

                if 'description' in weakness:
                    weakness_description = weakness['description']
                else:
                    weakness_description = self.task_mapping.get_title_for_weakness(weakness['weakness_id'])
                if not weakness_description:
                    weakness_description = weakness['weakness_id']

                if last_weakness != weakness_description:
                    if len(weakness_finding.items()) > 0:
                        analysis_findings.append(weakness_finding)
                        weakness_finding = {}
                    weakness_finding['count'] = 0

                    mapped_weakness = self.task_mapping.get_weakness(weakness['weakness_id'])
                    if mapped_weakness:
                        cwe = mapped_weakness['cwe']
                        if cwe:
                            weakness_finding['cwe'] = cwe

                    weakness_finding['desc'] = weakness_description

                    last_weakness = weakness_description

                if 'count' in weakness:
                    weakness_finding['count'] += weakness['count']
                else:
                    weakness_finding['count'] += 1

            if len(finding.items()) > 0:
                analysis_findings.append(weakness_finding)

            task = self.task_mapping.get_task(task_id)
            if task:
                finding_confidence = task['confidence']
            else:
                finding_confidence = 'low'

            # Send the finding details, if any, to the API
            if not self.config['trial_run']:
                try:
                    ret = self.plugin.add_analysis_note(task_id, project_analysis_note_ref,
                                                        finding_confidence, analysis_findings,
                                                        self.behaviour, self.config['task_status_mapping'])
                    logger.debug("Marked %s as FAILURE with %s confidence" % (task_id, finding_confidence))
                    stats_failures_added += 1
                except APIError as e:
                    logger.exception("Unable to mark %s as FAILURE - Reason: %s" % (task_id, str(e)))
                    self.emit.error("API Error: Unable to mark %s as FAILURE. Skipping ..." % (task_id))
                    stats_api_errors += 1

        stats_passes_added = 0

        affected_tasks = []
        noflaw_tasks = []

        # Sift through the SDE tasks for any non-failures
        for task in task_list:
            task_search = re.search('^(\d+)-([^\d]+\d+)$', task['id'])
            if task_search:
                task_id = task_search.group(2)

                # Skip certain tasks unless they are explicitly mapped
                if not self.task_mapping.contains_task(task_id):
                    continue

                # The tool found a weakness that maps to the task
                if unique_findings.has_key(task_id):
                    affected_tasks.append(task_id)
                    continue

                # The tool found nothing related to the task
                noflaw_tasks.append(task_id)

        # Mark non-failures as PASS
        if not self.config['flaws_only']:

            for task_id in noflaw_tasks:

                task = self.task_mapping.get_task(task_id)
                if task:
                    finding_confidence = task['confidence']
                else:
                    finding_confidence = 'low'

                successful_verification_update = False

                if self.config['trial_run']:
                    successful_verification_update = True
                else:
                    analysis_findings = []

                    try:
                        self.plugin.add_analysis_note(task_id, project_analysis_note_ref, finding_confidence,
                                                      analysis_findings, self.behaviour,
                                                      self.config['task_status_mapping'])
                    except APIError as e:
                        logger.exception("Unable to mark %s as PASS - Reason: %s" % (task_id, str(e)))
                        self.emit.error("API Error: Unable to mark %s as PASS. Skipping ..." % (task_id))
                        stats_api_errors += 1
                    else:
                        successful_verification_update = True

                if successful_verification_update:
                    logger.info("Marked %s as PASS with %s confidence" % (task_id, finding_confidence))
                    stats_passes_added += 1

        if missing_weakness_map:
            self.emit.error("Could not map %s flaws" % (len(missing_weakness_map)),
                            err_type='unmapped_weakness',
                            weakness_list=missing_weakness_map)
        else:
            self.emit.info("All flaws successfully mapped to tasks.")

        results = {}
        results['total_flaws_found'] = (stats_total_flaws_found, 'Total Flaw Types Found')
        results['tasks_marked_fail'] = (stats_failures_added, 'Number of Tasks marked as FAILED')
        results['tasks_without_findings'] = (noflaw_tasks, 'Number of Tasks in the project without any flaws')
        if stats_total_skips:
            results['skipped_flaws'] = (
                stats_total_skips_findings,
                'Number of flaws skipped because the related task was not found in the project'
            )
            results['skipped_tasks'] = (stats_total_skips, 'Number of tasks with flaws not found in project')

        # We queue the information to be sent along the close emit
        self.emit.queue(results=results)

        return IntegrationResult(import_start_datetime=import_start_datetime,
                                 import_finish_datetime=datetime.now(),
                                 affected_tasks=affected_tasks,
                                 noflaw_tasks=noflaw_tasks,
                                 error_count=stats_api_errors,
                                 error_weaknesses_unmapped=len(missing_weakness_map))
コード例 #7
0
ファイル: modify_notes.py プロジェクト: sdelements/sdetools
    def configure(self):
        self.sde_plugin = PlugInExperience(self.config)

        self.config.opts.add('search_string', "Search string to find in a task note")
        self.config.opts.add('replace_string', "Replacement string")
コード例 #8
0
class AlmConnector(object):
    """
    Abstract base class for connectors to Application Lifecycle
    Management tools such as JIRA, Team Foundation Server, Rally, etc.
    """
    # This needs to be overwritten
    alm_name = 'ALM Module'
    ALM_PRIORITY_MAP = 'alm_priority_map'
    VERIFICATION_STATUSES = ['none', 'partial', 'pass', 'fail']
    TEST_OPTIONS = ['server', 'project', 'settings']
    STANDARD_STATUS_LIST = ['TODO', 'DONE', 'NA']
    FIELD_OPTIONS = ['task_id', 'title', 'context', 'application', 'project']
    DEFAULT_TITLE_FORMAT = '${task_id} ${title}'

    default_issue_template = {}
    default_priority_map = None
    feature_custom_lookup = False  # Assume the connector does not support custom lookups
    supports_references = False  # By default alm connections don't support task references
    sync_titles_only = False
    user_friendly_name_map = None

    # This is an abstract base class
    __metaclass__ = abc.ABCMeta

    def __init__(self, config, alm_plugin):
        """  Initialization of the Connector

        Keyword arguments:
        sde_plugin -- An SD Elements Plugin configuration object
        alm_plugin -- A plugin to connect to the ALM tool
        """
        self.config = config
        self.ignored_tasks = []
        self.sde_plugin = PlugInExperience(self.config)
        self.alm_plugin = alm_plugin
        self._add_alm_config_options()
        self.emit = self.config.emit

        self.has_migrated = False
        self.migration_required = False

    def _add_alm_config_options(self):
        """ Adds ALM config options to the config file"""
        self.config.opts.add('alm_phases', 'Phases to sync '
                             '(comma separated list, e.g. requirements,testing)',
                             default='requirements,architecture-design,development')
        self.config.opts.add('sde_statuses_in_scope', 'SDE statuses for adding to %s '
                             '(comma separated %s)' % (self.alm_name, ','.join(AlmConnector.STANDARD_STATUS_LIST)),
                             default='TODO')
        self.config.opts.add('sde_min_priority', 'Minimum SDE priority in scope',
                             default='7')
        self.config.opts.add('sde_tags_filter', 'Filter project tasks by tag (tag1, tag2)',
                             default='')
        self.config.opts.add('sde_verification_filter', 'Filter project tasks by verification statuses. Valid statuses '
                             'are: ' + ', '.join(AlmConnector.VERIFICATION_STATUSES),
                             default='')
        self.config.opts.add('how_tos_in_scope', 'Whether or not HowTos should be included',
                             default='False')
        self.config.opts.add('selected_tasks', 'Optionally limit the sync to certain tasks '
                             '(comma separated, e.g. T12,T13). Note: Overrides other selections.',
                             default='')
        self.config.opts.add('alm_project', 'Project in %s' % self.alm_name,
                             default='')
        self.config.opts.add('conflict_policy', 'Conflict policy to use',
                             default='alm')
        self.config.opts.add('alm_context', 'Add additional context to issues created in %s' % self.alm_name,
                             default='')
        self.config.opts.add('start_fresh', 'Delete any matching issues in %s' % self.alm_name,
                             default='False')
        self.config.opts.add('show_progress', 'Show progress',
                             default='False')
        self.config.opts.add('alm_title_format', 'Task title format in %s. May be composed of: %s' % (
                             self.alm_name, ','.join(AlmConnector.FIELD_OPTIONS)),
                             default=AlmConnector.DEFAULT_TITLE_FORMAT)
        self.config.opts.add('test_alm', 'Test Alm "server", "project" or "settings" configuration only',
                             default='')
        self.config.opts.add('alm_standard_workflow', 'Standard workflow in ALM?',
                             default='True')
        self.config.opts.add('alm_custom_fields',
                             'Customized fields to include when creating a task in %s '
                             '(JSON encoded dictionary of strings)' % self.alm_name,
                             default='')
        self.config.opts.add('alm_reference_context', 'Context to use to track external task references',
                             default='')

        if self.feature_custom_lookup:
            self.config.opts.add('alm_custom_lookup_fields',
                                 'Custom fields and values to use when finding a task in %s '
                                 '(JSON encoded dictionary of strings)' % self.alm_name,
                                 default='')

        if self.default_priority_map:
            self.config.opts.add(self.ALM_PRIORITY_MAP, 'Customized map from priority in SDE to %s '
                                 '(JSON encoded dictionary of strings)' % self.alm_name,
                                 default='')

    def initialize(self):
        """
        Verify that the configuration options are set properly
        """

        # Note: This will consider space as empty due to strip
        # We do this before checking if the config is non-empty later
        self.config.process_list_config('selected_tasks')
        for task in self.config['selected_tasks']:
            if not RE_TASK_IDS.match(task):
                raise UsageError('Invalid Task ID: %s' % task)

        self.config.process_list_config('sde_verification_filter')
        for verification_status in self.config['sde_verification_filter']:
            if verification_status not in AlmConnector.VERIFICATION_STATUSES:
                raise UsageError('Invalid status specified in sde_verification_filter: %s' % verification_status)

        if not self.config['selected_tasks']:
            self.config.process_list_config('alm_phases')
            if not self.config['alm_phases']:
                raise AlmException('Missing alm_phases in configuration')

            self.config.process_list_config('sde_statuses_in_scope')
            if not self.config['sde_statuses_in_scope']:
                raise AlmException('Missing the SD Elements statuses in scope')

            for status in self.config['sde_statuses_in_scope']:
                if status not in AlmConnector.STANDARD_STATUS_LIST:
                    raise AlmException('Invalid status specified in sde_statuses_in_scope')

            self.config.process_list_config('sde_tags_filter')

        if (not self.config['conflict_policy'] or
            not (self.config['conflict_policy'] == 'alm' or
                 self.config['conflict_policy'] == 'sde' or
                 self.config['conflict_policy'] == 'timestamp')):
            raise AlmException('Missing or incorrect conflict_policy '
                               'in configuration. Valid values are '
                               'alm, sde, or timestamp.')

        if self.config['conflict_policy'] == 'timestamp' and not self.alm_supports_timestamp_sync():
            raise AlmException('ALM does not support the timestamp conflict policy. Please use sde or alm')

        if self.config['sde_min_priority'] is not None:
            bad_priority_msg = 'Incorrect sde_min_priority specified in configuration. Valid values are > 0 '
            bad_priority_msg += 'and <= 10'

            try:
                self.config['sde_min_priority'] = int(self.config['sde_min_priority'])
            except:
                raise AlmException(bad_priority_msg)

            if self.config['sde_min_priority'] < 1 or self.config['sde_min_priority'] > 10:
                raise AlmException(bad_priority_msg)
        else:
            self.config['sde_min_priority'] = 1

        if self.config['test_alm'] and self.config['test_alm'] not in AlmConnector.TEST_OPTIONS:
            raise AlmException('Incorrect test_alm configuration setting. '
                               'Valid values are: %s' % ','.join(AlmConnector.TEST_OPTIONS))

        self.config.process_boolean_config('start_fresh')
        self.config.process_boolean_config('show_progress')
        self.config.process_boolean_config('how_tos_in_scope')
        self.config.process_boolean_config('alm_standard_workflow')
        self.config.process_json_dict('alm_custom_fields')

        if self.feature_custom_lookup:
            self.config.process_json_dict('alm_custom_lookup_fields')

        if self.config['start_fresh'] and not self.alm_supports_delete():
            raise AlmException('Incorrect start_fresh configuration: task deletion is not supported.')

        self.alm_plugin.post_conf_init()

        if self.ALM_PRIORITY_MAP in self.config:
            self.config.process_json_str_dict(self.ALM_PRIORITY_MAP)
            if not self.config[self.ALM_PRIORITY_MAP]:
                self.config[self.ALM_PRIORITY_MAP] = self.default_priority_map
            self._validate_alm_priority_map()

        matches = re.findall('\$\{?([a-zA-Z_]+)\}?', self.config['alm_title_format'])
        if not matches:
            raise AlmException('Incorrect alm_title_format configuration')
        if 'title' not in matches:
            raise AlmException('Incorrect alm_title_format configuration. Missing ${title}')
        if 'task_id' not in matches:
            raise AlmException('Incorrect alm_title_format configuration. Missing ${task_id}')
        if 'context' in matches and not self.config['alm_context']:
            raise AlmException('Missing alm_context in configuration')

        for match in matches:
            if match not in AlmConnector.FIELD_OPTIONS:
                raise AlmException('Incorrect alm_title_format configuration. Invalid field: ${%s}' % match)

        logger.info('*** AlmConnector initialized ***')

    def _transform_value(self, value, mapping):
        """
        Search for all macros in value and substitute them with their values
        """

        if isinstance(value, list):
            new_list = []
            for v in value:
                new_v = self._transform_value(v, mapping)
                if isinstance(new_v, list):
                    new_list.extend(new_v)
                else:
                    new_list.append(self._transform_value(v, mapping))
            return new_list
        elif isinstance(value, basestring):
            macros = re.findall('\$\{?([a-zA-Z_]+)\}?', value)
            for required_macro in macros:
                if required_macro not in mapping or not mapping[required_macro]:
                    raise AlmException("Missing value for configuration macro ${%s}" % required_macro)
                # the first non string macro that this finds will be returned
                # hence taking over the entire field
                if not isinstance(mapping[required_macro], basestring):
                    return mapping[required_macro]

            return Template(value).substitute(mapping).strip()
        else:
            raise TypeError('Unsupported type, cannot transform value: %s' % value)

    def transform_config_value(self, key, mapping):
        """
        Perform macro substitutions on configuration
         - key - Configuration key
         - mapping - dictionary of macro->substitution values
        """
        for field, value in self.config[key].iteritems():
            self.config[key][field] = self._transform_value(value, mapping)

    def alm_connect(self):
        self.alm_connect_server()

        if self.config['test_alm'] == 'server':
            return

        self.alm_connect_project()

        if self.config['test_alm'] == 'project':
            return

        self.alm_validate_configurations()

    @abstractmethod
    def alm_connect_server(self):
        """ Sets up a connection to the ALM tool.

        Raises an AlmException on encountering an error
        """
        pass

    def alm_validate_configurations(self):
        """ Validates alm-specific configurations

        Raises an AlmException on encountering an error
        """
        pass

    @abstractmethod
    def alm_connect_project(self):
        """ Sets up a connection to the ALM tool.

        Raises an AlmException on encountering an error
        """
        pass

    def alm_get_task_by_reference(self, task, reference):
        """ Returns an AlmTask that represents the value of this
        SD Elements task in the ALM, or None if the task doesn't exist

        Raises an AlmException on encountering an error

        Keyword arguments:
        task  -- An SDE task
        reference -- A task reference
        """
        return None

    def alm_get_task_legacy(self, task):
        """ Returns an AlmTask that represents the value of this
        SD Elements task in the ALM, or None if the task doesn't exist

        Raises an AlmException on encountering an error

        Keyword arguments:
        task  -- An SDE task
        """
        return None

    def alm_get_task(self, task):
        """ Returns an AlmTask that represents the value of this
        SD Elements task in the ALM, or None if the task doesn't exist

        Raises an AlmException on encountering an error

        Keyword arguments:
        task  -- An SDE task
        """
        reference = self._get_matching_reference(task)

        if self.supports_references:
            if reference:
                logger.debug('Looking up task %s via its reference' % task['task_id'])
                return self.alm_get_task_by_reference(task, reference)
            elif self.has_migrated:
                return None

        logger.debug('Looking up task %s via legacy lookup' % task['task_id'])
        return self.alm_get_task_legacy(task)

    @abstractmethod
    def alm_add_task(self, task):
        """ Adds SD Elements task to the ALM tool.

        Returns a string representing the task in the ALM tool,
        or None if that's not possible. This string will be
        added to a note for the task.

        Raises an AlmException on encountering an error.

        Keyword arguments:
        task  -- An SDE task
        """
        pass

    def alm_supports_timestamp_sync(self):
        """
        Returns True if we can retrieve the timestamp of the
        last status change on a Task.
        """
        return False

    def alm_supports_delete(self):
        """ Returns True if Task Delete is supported by the ALM
        """
        delete_method = getattr(self, "alm_remove_task", None)
        if delete_method:
            return callable(delete_method)
        return False

    @abstractmethod
    def alm_update_task_status(self, task, status):
        """ Updates the specified task in the ALM tool with a new status

        Raises an AlmException on encountering an error

        Keyword arguments:
        task  -- An AlmTask representing the task to be updated
        status -- A string specifying the new status. Either 'DONE', 'TODO',
                  or 'NA'
        """
        pass

    @abstractmethod
    def alm_disconnect(self):
        """ Attempt to disconnect from ALM, if necessary

        Raises an AlmException on encountering an error
        """
        pass

    def sde_connect(self):
        """ Connects to SD Elements server specified in plugin object

        Raises an AlmException on encountering an error
        """
        if not self.sde_plugin:
            raise AlmException('Requires initialization')
        try:
            self.sde_plugin.connect()
        except APIError as err:
            raise AlmException('Unable to connect to SD Elements. Please review URL, id,'
                               ' and password in configuration file. Reason: %s' % (str(err)))

        self.sde_validate_configuration()

        self.init_statuses()

    def init_statuses(self):
        # Set up the STATUS constant by making an api call
        statuses = self.sde_plugin.get_taskstatuses()
        done_list = []
        todo_list = []
        na_list = []

        for status in statuses:
            setattr(STATUS, status['slug'].encode('utf-8'), status['id'])
            if status['meaning'] == 'DONE':
                done_list.append(status['id'])
            elif status['meaning'] == 'NA':
                na_list.append(status['id'])
            else:
                todo_list.append(status['id'])

        setattr(STATUS, 'DONE_SET', set(done_list))
        setattr(STATUS, 'TODO_SET', set(todo_list))
        setattr(STATUS, 'NA_SET', set(na_list))

    def sde_validate_configuration(self):
        """ Validate selected phases, if applicable """
        if not self.config['selected_tasks']:
            result = self.sde_plugin.get_phases()
            if not result:
                raise AlmException('Unable to retrieve phases from SD Elements')

            all_phases_slugs = [phase['slug'] for phase in result]
            for selected_phase in self.config['alm_phases']:
                if selected_phase not in all_phases_slugs:
                    raise AlmException('Incorrect alm_phase configuration: %s is not a valid phase' % selected_phase)

    def is_sde_connected(self):
        """ Returns true if currently connected to SD Elements"""
        if not self.sde_plugin:
            return False
        return self.sde_plugin.connected

    def _validate_alm_priority_map(self):
        """
        Validate a priority mapping dictionary. The mapping specifies which value to use in another system
        based on the SD Elements task's priority numeric value. Priorities are numeric values from the range 1 to 10.

        This method ensures that:

         1. Keys represent a single priority {'10':'Critical'} or a range of priorities {'7-10':'High'}
         2. Priorities 1 to 10 are represented exactly once in the dictionary keys
         3. Mappings containing a range of priorities {'7-10':'High'} must have their values ordered from low to high.

        Valid example:
        {'1-3': 'Low', '4-6': 'Medium', '7-10': 'High'}

        All SD Elements tasks with priority 1 to 3 are to be mapped to the value "Low" in the other system.
        All SD Elements tasks with priority 4 to 6 are to be mapped to the value "Medium" in the other system.
        All SD Elements tasks with priority 7 to 10 are to be mapped to the value "High" in the other system.

        Invalid examples:
        {'1-3': 'Low', '4-6': 'Medium', '7-9': 'High'}
        {'1-3': 'Low', '4-6': 'Medium', '6-10': 'High'}
        {'3-1': 'Low', '6-4': 'Medium', '10-7': 'High'}
        """
        if self.ALM_PRIORITY_MAP not in self.config:
            return

        priority_set = set()
        for key, value in self.config[self.ALM_PRIORITY_MAP].iteritems():
            if not RE_MAP_RANGE_KEY.match(key):
                raise AlmException('Unable to process %s (not a JSON dictionary). '
                                   'Reason: Invalid range key %s' % (self.ALM_PRIORITY_MAP, key))

            if '-' in key:
                lrange, hrange = key.split('-')
                lrange = int(lrange)
                hrange = int(hrange)
                if lrange >= hrange:
                    raise AlmException('Invalid %s entry %s => %s: Priority %d should be less than %d' %
                                       (self.ALM_PRIORITY_MAP, key, value, lrange, hrange))
                for mapped_priority in range(lrange, hrange + 1):
                    if mapped_priority in priority_set:
                        raise AlmException('Invalid %s entry %s => %s: Priority %d is duplicated' %
                                           (self.ALM_PRIORITY_MAP, key, value, mapped_priority))
                    priority_set.add(mapped_priority)
            else:
                key_value = int(key)
                if key_value in priority_set:
                    raise AlmException('Invalid %s entry %s => %s: Priority %d is duplicated' %
                                       (self.ALM_PRIORITY_MAP, key, value, key_value))
                priority_set.add(key_value)

        for mapped_priority in xrange(1, 11):
            if mapped_priority not in priority_set:
                raise AlmException('Invalid %s: missing a value mapping for priority %d' %
                                   (self.ALM_PRIORITY_MAP, mapped_priority))

    @staticmethod
    def _extract_task_id(full_task_id):
        """
        Extract the task id e.g. "CT213" from the full project_id-task_id string e.g. "123-CT213"
        """
        task_id = None
        task_search = re.search('^(\d+)-([^\d]+\d+)$', full_task_id)
        if task_search:
            task_id = task_search.group(2)
        return task_id

    @staticmethod
    def get_alm_task_title(config, task, fixed=False):
        """
        Return the user-defined formatted fixed or full alm title for an SDE task. The fixed title can be used
        to uniquely identify an issue inside the ALM, allowing one ALM project to sync
        with more than one instance of the same SDE task
        """
        task_id = AlmConnector._extract_task_id(task['id'])

        # Remove the task id from the title to get the actual title
        title = re.sub('^%s:\s+' % task_id, '', task['title'])

        mapping = {
            'task_id': '%s:' % task_id,  # Force a suffix to avoid finding "T222" when searching with "T2"
            'context': config['alm_context'],
            'application': config['sde_application'],
            'project': config['sde_project'],
            'title': title,
        }
        if fixed:
            mapping['title'] = ''

        return Template(config['alm_title_format']).substitute(mapping).strip()

    def sde_get_project(self):
        """ Returns a single project from SD Elements with the given project_id

        Raises an AlmException on encountering an error
        """

        if not self.sde_plugin:
            raise AlmException('Requires initialization')

        try:
            return self.sde_plugin.get_project()
        except APIError as err:
            logger.error(err)
            raise AlmException('Unable to get project from SD Elements. Please ensure'
                               ' the project is valid and that the user has'
                               ' sufficient permission to access the project. Reason: %s' % (str(err)))

    def sde_get_tasks(self, accepted=True):
        """ Gets all tasks for project in SD Elements

        Raises an AlmException on encountering an error
        """

        if not self.sde_plugin:
            raise AlmException('Requires initialization')

        try:
            if self.config['selected_tasks']:
                return self.sde_plugin.get_task_list(accepted=accepted)
            else:
                return self.sde_plugin.get_task_list(priority__gte=self.config['sde_min_priority'], accepted=accepted)
        except APIError as err:
            logger.error(err)
            raise AlmException('Unable to get tasks from SD Elements. Please ensure'
                               ' the application and project are valid and that the user has'
                               ' sufficient permission to access the project. Reason: %s' % (str(err)))

    def sde_get_task(self, task_id):
        """ Returns a single task from SD Elements w given task_id

        Raises an AlmException if task doesn't exist or any other error
        """
        if not self.sde_plugin:
            raise AlmException('Requires initialization')

        try:
            return self.sde_plugin.get_task(task_id)
        except APIError as err:
            logger.error(err)
            raise AlmException('Unable to get task in SD Elements. Reason: %s' % (str(err)))

    def _add_note(self, task_id, note_msg):
        """ Convenience method to add note """
        if not self.sde_plugin:
            raise AlmException('Requires initialization')

        try:
            self.sde_plugin.add_task_text_note(task_id, note_msg)
            logger.debug('Successfully set note for task %s' % task_id)

        except APIError as err:
            logger.error(err)
            raise AlmException('Unable to add note in SD Elements. Reason: %s' % (str(err)))

    def in_scope(self, task):
        """ Check to see if an SDE task is in scope

        For example, has one of the appropriate phases
        """
        tid = self._extract_task_id(task['id'])
        if self.config['selected_tasks']:
            return tid in self.config['selected_tasks']

        in_scope = (task['phase']['slug'] in self.config['alm_phases'] and
                    task['priority'] >= self.config['sde_min_priority'])

        if in_scope and self.config['sde_tags_filter']:
            in_scope = in_scope and set(self.config['sde_tags_filter']).issubset(task['tags'])

        if in_scope and self.config['sde_verification_filter']:
            # Translate null to 'none' to match filter options
            if not task['verification_status']:
                task['verification_status'] = 'none'
            in_scope = in_scope and task['verification_status'] in self.config['sde_verification_filter']

        return in_scope

    def sde_update_task_status(self, task, status):
        """ Updates the status of the given task in SD Elements

        Raises an AlmException on encountering an error

        Keyword arguments:
        task  -- An SD Elements task representing the task to be updated
        status -- A string specifying the new status. Either STATUS.DONE, STATUS.TODO,
                  or STATUS.NA
        """
        if not self.sde_plugin:
            raise AlmException('Requires initialization')

        logger.debug('Attempting to update task %s to %s' % (task['id'], status))

        try:
            self.sde_plugin.update_task_status(task['id'], status)
        except APIError as err:
            logger.error(err)
            raise AlmException('Unable to update the task status in SD '
                               'Elements. Either the task no longer '
                               'exists, there was a problem connecting '
                               'to the server, or the status was invalid')
        readable_status = 'DONE'
        if status in STATUS.TODO_SET:
            readable_status = 'TODO'
        elif status in STATUS.NA_SET:
            readable_status = 'NA'
        logger.info('Status for task %s successfully set in SD Elements' % task['id'])

        note_msg = 'Task status changed to %s via %s' % (readable_status, self.alm_name)
        try:
            self._add_note(task['id'], note_msg)
        except APIError as err:
            logger.error('Unable to set a note to mark status '
                         'for %s to %s. Reason: %s' % (task['id'], status, str(err)))

    def convert_markdown_to_alm(self, content, ref):
        return content

    def sde_get_task_content(self, task):
        """ Convenience method that returns the text that should go into
        content of an ALM ticket/defect/story for a given task.

        Raises an AlmException on encountering an error

        Keyword arguments:
        task  -- An SD Elements task representing the task to enter in the
                 ALM
        """
        content = '%s\n\nImported from SD Elements: [%s](%s)' % (task['text'], task['url'], task['url'])
        if self.config['how_tos_in_scope'] and task['how_tos']:
            content += '\n\n# How Tos:\n\n'
            for implementation in task['how_tos']:
                content += '## %s\n\n' % (implementation['title'])
                content += implementation['text'] + '\n\n'

        content = RE_CODE_DOWNLOAD.sub(r'https://%s/\1' % self.config['sde_server'], content)

        return self.convert_markdown_to_alm(content, ref=task['id'])

    def output_progress(self, percent):
        if self.config['show_progress']:
            print str(percent) + "% complete"
            sys.stdout.flush()

    def status_match(self, alm_status, sde_status):
        if sde_status in STATUS.DONE_SET or sde_status in STATUS.NA_SET:
            return alm_status == STATUS.DONE
        else:
            return alm_status == STATUS.TODO

    def filter_tasks(self, tasks):
        return [task for task in tasks if self.in_scope(task)]

    def _determine_latest(self, alm_task, sde_task):
        """
        Used in synchronize() for the timestamp conflict policy to determine which
        task status takes precedence. Extracted from synchronize() so it is easier
        to test.
        """
        if 'status_updated' not in sde_task:
            raise AlmException('SDE does not support timestamp conflict policy')
        if sde_task['status_updated'] is None:
            return 'alm'

        sde_time = parse(sde_task['status_updated'])
        alm_time = alm_task.get_timestamp()
        logger.debug('Comparing timestamps for task %s - SDE: %s, ALM: %s' %
                     (sde_task['id'], str(sde_time), str(alm_time)))

        return 'sde' if sde_time > alm_time else 'alm'

    def _check_supports_references(self, tasks):
        if not tasks:
            return

        for task in tasks:
            if 'references' in task:
                return

        self.supports_references = False

    def _check_has_migrated(self):
        alm_connection_id = self.config['alm_reference_context']
        alm_connection = self.sde_plugin.get_alm_connection(alm_connection_id)

        # SDE 4.0 and below does not have get_alm_connection method or has_migrated flag
        if not alm_connection:
            return

        self.has_migrated = alm_connection.get('has_migrated', False)
        self.migration_required = 'has_migrated' in alm_connection and not self.has_migrated

        if self.migration_required:
            logger.debug("ALM Connection supports references - will perform a migration")

        if self.has_migrated:
            logger.debug("ALM Connection has already migrated to using references")

    def _update_alm_migration_status(self, has_migrated):
        alm_connection_id = self.config['alm_reference_context']
        self.sde_plugin.update_alm_connection(alm_connection_id, has_migrated)

    def _make_reference_dict(self, task):
        return {
            'alm_connection': self.config['alm_reference_context'],
            'reference': str(task.reference),
            'name': str(task.name),
            'link': str(task.link)
        }

    def _create_task_reference(self, task_id, task):
        """
        Create a Task Reference via SDE api.
        """
        data = self._make_reference_dict(task)
        logger.debug('Creating reference for task %s' % task_id)
        self.sde_plugin.add_task_reference(task_id, data)
        return data

    def _update_task_reference(self, task_id, task, reference):
        """
        Update a Task Reference via SDE api.
        """
        data = self._make_reference_dict(task)
        data.pop('alm_connection')
        self.sde_plugin.update_task_reference(task_id, reference, **data)
        return data

    def _remove_task_reference(self, task_id, reference_id):
        """
        Delete a Task Reference via SDE api.
        """
        logger.debug('Removing a task reference for %s' % task_id)
        self.sde_plugin.remove_task_reference(task_id, reference_id)

    def _remove_alm_task(self, sde_task, task_reference):
        """
        Given a task in SDE, remove it from an external system.
        """
        alm_task = self.alm_get_task(sde_task)
        if alm_task:
            self.alm_remove_task(alm_task)
        if task_reference and self.supports_references:
            self.sde_plugin.remove_task_reference(sde_task, task_reference['id'])

    def _reference_out_of_date(self, old_reference, new_reference):
        """
        Test whether an old Task Reference is stale, i.e. Jira task
        has moved projects, so its url reference is out of date.
        """
        new_reference = self._make_reference_dict(new_reference)
        old_reference = old_reference.copy()
        old_reference.pop('id')
        changes = old_reference != new_reference
        return changes

    def _apply_conflict_policy(self, task_id, sde_task, alm_task):
        """
        What takes precedence in case of a conflict of status? Starts with ALM
        """

        precedence = 'alm'
        updated_system = 'SD Elements'

        status = alm_task.get_status()
        if (status in STATUS.DONE_SET or status in STATUS.NA_SET):
            updated_status = 'DONE'
        else:
            updated_status = 'TODO'

        if self.config['conflict_policy'] == 'sde':
            precedence = 'sde'
        elif self.config['conflict_policy'] == 'timestamp':
            precedence = self._determine_latest(alm_task, sde_task)

        if precedence == 'alm':
            self.sde_update_task_status(sde_task, alm_task.get_status())
        else:
            self.alm_update_task_status(alm_task, sde_task['status']['id'])
            updated_system = self.alm_name
            updated_status = sde_task['status']['meaning']

        self.emit.info((u'Updated status of task {0} in {1} to {2}'
                        .format(task_id, updated_system, updated_status)))

    def _get_matching_reference(self, sde_task):
        """
        SDE Tasks may be synced to multiple systems, so match the reference
        to the current alm.
        """
        if not self.supports_references:
            return None

        task_references = sde_task.get('references', [])
        task_reference = None

        if task_references:
            task_reference = next((ref for ref in task_references
                                   if ref['alm_connection'] == self.config['alm_reference_context']), None)
        return task_reference

    def task_should_sync(self, sde_task):
        """
        Test whether we should sync a task, i.e. it's status has
        changed in SDE or the ALM.
        """
        if not sde_task['accepted']:
            return False

        if sde_task['id'] in self.ignored_tasks:
            return False

        if not self.config['selected_tasks'] and sde_task['status']['meaning'] not in self.config['sde_statuses_in_scope']:
            return False

        return True

    def synchronize(self):
        """ Synchronizes SDE project with ALM project.

        Reviews every task in the SDE project:
        - if the task exists in both SDE & ALM and the status is the same
          in both, nothing happens
        - if the task exists in both SDE & ALM and the status differs, then
          the conflict policy takes effect. Either the newest status based on
          timestamp is used, or the SDE status is used in every case, or
          the ALM tool status is used in every case. Default is ALM tool
          status
        - if the task only exists in SDE, the task is added to the ALM
          tool
        - NOTE: if a task that was previously imported from SDE into the
          ALM is later removed in the same SDE project, then the task is
          effectively orphaned. The task must be removed manually from the
          ALM tool

        Raises an AlmException on encountering an error
        """

        class progress:
            count = 0

        def update_progress(val, output=None):
            progress.count += val
            output = output or progress.count
            self.output_progress(output)

        try:
            if not self.sde_plugin:
                raise AlmException('Requires initialization')

            if self.config['test_alm']:
                self.alm_connect()
                return

            # Attempt to connect to SDE & ALM
            self.sde_connect()
            update_progress(2)

            self.alm_connect()
            update_progress(2)

            project = self.sde_get_project()

            # Attempt to get all tasks
            accepted_tasks = self.sde_get_tasks()
            unaccepted_tasks = self.sde_get_tasks(accepted=False)
            logger.info('Retrieved all tasks from SDE')

            # Filter tasks - progress must match reality
            tasks = self.filter_tasks(accepted_tasks + unaccepted_tasks)
            tasks = self.transform_tasks(tasks, project)

            self._check_supports_references(tasks)

            if self.supports_references:
                self._check_has_migrated()
            else:
                logger.debug(("ALM Connection doesn't support references - "
                              "legacy lookup using titles will be used."))

            logger.info('Filtered tasks')

            if self.config['start_fresh']:
                total_work = progress.count + len(tasks) * 2
            else:
                total_work = progress.count + len(tasks)

            for sde_task in tasks:
                task_id = sde_task['task_id']
                update_progress(1, 100 * progress.count / total_work)

                task_reference = self._get_matching_reference(sde_task)

                if self.config['start_fresh']:
                    self._remove_alm_task(sde_task, task_reference)
                    update_progress(1, 100 * progress.count / total_work)

                alm_task = self.alm_get_task(sde_task)

                if alm_task:
                    if self.supports_references:
                        if not task_reference:
                            self._create_task_reference(sde_task['id'], alm_task)
                        elif self._reference_out_of_date(task_reference, alm_task):
                            reference_id = task_reference.pop('id')
                            self._update_task_reference(sde_task['id'], alm_task, reference_id)

                    if not self.config['alm_standard_workflow']:
                        continue

                    # Close tasks that are no longer part of the SDE project and remove the task reference
                    if not sde_task['accepted']:
                        if not self.status_match(alm_task.get_status(), STATUS.DONE):
                            self.alm_update_task_status(alm_task, STATUS.DONE)
                        if task_reference:
                            self._remove_task_reference(sde_task['id'], task_reference['id'])
                        continue

                    # Has the status changed between SDE and ALM?
                    if not self.status_match(alm_task.get_status(), sde_task['status']['id']):
                        self._apply_conflict_policy(task_id, sde_task, alm_task)
                else:
                    # Task only exists in SD Elements
                    # Remove reference if it exists - ALM Task was probably deleted
                    # Also pop any old references to force lookup using legacy
                    if task_reference:
                        self._remove_task_reference(sde_task['id'], task_reference['id'])
                        sde_task.pop('references', [])

                    if self.task_should_sync(sde_task):
                        new_alm_task = self.alm_add_task(sde_task)

                        if self.supports_references:
                            self._create_task_reference(sde_task['id'], new_alm_task)

                        ref = self.get_note_message(new_alm_task)
                        self.emit.info(u'Added task %s to %s' % (task_id, self.alm_name))
                        note_msg = u'Task synchronized in %s. Reference: [%s]' % (self.alm_name, ref)
                        self._add_note(sde_task['id'], note_msg)
                        logger.debug(note_msg)

            logger.info('Synchronization complete')

            if self.supports_references and self.migration_required:
                self._update_alm_migration_status(has_migrated=True)

            self.alm_disconnect()

        except AlmException:
            self.alm_disconnect()
            raise

    def get_note_message(self, alm_task):
        pass

    def translate_priority(self, priority):
        """ Translates an SDE priority into an Alm label """
        pmap = self.config[self.ALM_PRIORITY_MAP]

        if not pmap:
            return None

        try:
            priority = int(priority)
        except TypeError:
            logger.error('Could not coerce %s into an integer' % priority)
            raise AlmException("Error in translating SDE priority to %s: "
                               "%s is not an integer priority" % (priority, self.alm_name))

        for key in pmap:
            if '-' in key:
                lrange, hrange = key.split('-')
                lrange = int(lrange)
                hrange = int(hrange)
                if lrange <= priority <= hrange:
                    return pmap[key]
            else:
                if int(key) == priority:
                    return pmap[key]

    @property
    def new_issue_template(self):
        issue_template = {}
        issue_template.update(self.default_issue_template)

        if self.config['alm_custom_fields']:
            issue_template.update(self.config['alm_custom_fields'])

        return issue_template

    def get_new_issue(self, task, defaults=None):
        data = defaults.copy() if defaults else {}

        macros = task['__macros']

        data.update(
            (field, self._transform_value(value, macros))
            for field, value in self.new_issue_template.iteritems()
        )

        if self.user_friendly_name_map:
            data = self.map_user_friendly_names(data)

        return data

    def map_user_friendly_names(self, task):
        def get_field_name(field_name):
            if not self.user_friendly_name_map:
                return field_name
            return self.user_friendly_name_map.get(field_name, field_name)

        return dict(
            (get_field_name(field), value)
            for field, value in task.iteritems()
        )

    def transform_tasks(self, tasks, project=None):
        return [self.transform_task(task, project) for task in tasks]

    def transform_task(self, task, project=None):
        """
        Take a task and add a new entry, __macros, containing a set of macro->value pairs
        so that this ALM's fields can be remapped to any of the values when a new issue is created
        """
        task['alm_full_title'] = self.get_alm_task_title(self.config, task, fixed=False)
        task['alm_fixed_title'] = self.get_alm_task_title(self.config, task, fixed=True)

        task_id = self._extract_task_id(task['id'])

        # each macro is assumed to be a string or a list of strings
        macros = {
            'business_unit': self.config['sde_businessunit'],
            'application': self.config['sde_application'],
            'project': self.config['sde_project'],
            'context': self.config['alm_context'],
            'alm_user': self.config['alm_user'],
            'alm_project': self.config['alm_project'],
            'id': task['id'],
            'task_id': task_id,
            'task_title': re.sub('^%s:\s+' % task_id, '', task['title']),
            'task_alm_title': task['alm_full_title'],
            'task_url': task['url'],
            'task_content': task['text'],
            'task_richcontent': self.sde_get_task_content(task),
            'task_phase': task['phase']['slug'],
            'task_status': task['status']['meaning'],
            'task_priority': str(task['priority']),
            'task_tags': task['tags'],
            'problem_content': task['problem']['text'],
            'problem_richcontent': self.convert_markdown_to_alm(task['problem']['text'], task['problem']['id']),
            'problem_id': task['problem']['id'],
            'problem_title': re.sub('^%s:\s+' % task['problem']['id'], '', task['problem']['title']),
        }

        if self.ALM_PRIORITY_MAP in self.config:
            macros['task_priority_translated'] = self.translate_priority(task['priority'])

        if self.sync_titles_only:
            macros['task_content'] = PUBLIC_TASK_CONTENT
            macros['task_richcontent'] = PUBLIC_TASK_CONTENT
            macros['problem_content'] = PUBLIC_TASK_CONTENT
            macros['problem_richcontent'] = PUBLIC_TASK_CONTENT

        if 'alm_issue_label' in self.config:
            macros['task_tags'].append(self.config['alm_issue_label'])

        if project and 'custom_attributes' in project:
            for attr, value in project['custom_attributes'].items():
                macros['project_custom_attr_%s' % attr] = value

        task['__macros'] = macros
        return task
コード例 #9
0
ファイル: import_csv.py プロジェクト: sdelements/sdetools
class Command(BaseCommand):
    help = 'Update task status and verification details given the contents of a CSV'
    sde_plugin = None

    def configure(self):
        self.sde_plugin = PlugInExperience(self.config)
        self.config.opts.add('csv_file', "CSV file")

    def sde_connect(self):
        if not self.sde_plugin:
            raise Error('Requires initialization')
        try:
            self.sde_plugin.connect()
        except APIError as err:
            raise Error('Unable to connect to SD Elements. Please review URL, id,'
                        ' and password in configuration file. Reason: %s' % (str(err)))

    def handle(self):
        if not self.config['csv_file']:
            raise UsageError('Missing csv_file')

        self.sde_connect()

        with open(self.config['csv_file'], 'r') as csvfile:
            reader = csv.DictReader(csvfile)

            if not reader.fieldnames:
                raise Error('Empty CSV file')

            # Remove extra white space in headings
            for i in range(len(reader.fieldnames)):
                reader.fieldnames[i] = reader.fieldnames[i].strip()

            if len(reader.fieldnames) <= 1:
                raise Error('Please make sure CSV file has task id field and at least one more field.')

            try:
                for row in reader:
                    if 'task id' not in reader.fieldnames:
                        raise Error('Please make sure task id field exists.')
                    task_id = row['task id']
                    if 'verification note' in reader.fieldnames and 'verification status' not in reader.fieldnames:
                        raise Error('Please make sure a verification status field exists along with verification note.')
                    if 'task status' in reader.fieldnames:
                        status = row['task status']
                        self.sde_plugin.update_task_status(task_id, status)
                    if 'task note' in reader.fieldnames:
                        text_note = row['task note']
                        self.sde_plugin.add_task_text_note(task_id, text_note)
                    if 'verification status' in reader.fieldnames:
                        verification_status = row['verification status']
                        if 'verification note' in reader.fieldnames:
                            finding_ref = row['verification note']
                        else:
                            finding_ref = ''
                        self.sde_plugin.add_manual_analysis_note(task_id, verification_status, finding_ref)

            except csv.Error, csv_err:
                raise Error('Unable to parse CSV file, line %d: %s' % (reader.line_num, csv_err))

        return True
コード例 #10
0
ファイル: import_csv.py プロジェクト: sdelements/sdetools
 def configure(self):
     self.sde_plugin = PlugInExperience(self.config)
     self.config.opts.add('csv_file', "CSV file")