Пример #1
0
def create_note(job, all_matched):
    if not (all_matched and job.is_fully_autoclassified()):
        return

    # We don't want to add a job note after an autoclassification if there is
    # already one and after a verification if there is already one not supplied
    # by the autoclassifier
    if not JobNote.objects.filter(job=job).exists():
        JobNote.create_autoclassify_job_note(job)
Пример #2
0
def create_note(job, all_matched):
    if not (all_matched and job.is_fully_autoclassified()):
        return

    # We don't want to add a job note after an autoclassification if there is
    # already one and after a verification if there is already one not supplied
    # by the autoclassifier
    if not JobNote.objects.filter(job=job).exists():
        JobNote.create_autoclassify_job_note(job)
Пример #3
0
def update_db(job, matches, all_matched):
    matches_by_error = defaultdict(set)
    classified_failures = {
        item.id: item
        for item in ClassifiedFailure.objects.filter(
            id__in=[match.classified_failure_id for _, match in matches])
    }
    for matcher, match in matches:
        classified_failure = classified_failures[match.classified_failure_id]
        matches_by_error[match.text_log_error].add(
            (matcher, match, classified_failure))

    for text_log_error, matches in iteritems(matches_by_error):
        for (matcher, match, classified_failure) in matches:
            try:
                TextLogErrorMatch.objects.create(
                    score=match.score,
                    matcher=matcher,
                    classified_failure=classified_failure,
                    text_log_error=match.text_log_error)
                if match.text_log_error.metadata and match.text_log_error.metadata.failure_line:
                    FailureMatch.objects.create(
                        score=match.score,
                        matcher=matcher,
                        classified_failure=classified_failure,
                        failure_line=match.text_log_error.metadata.failure_line
                    )
            except IntegrityError:
                logger.warning(
                    "Tried to create duplicate match for TextLogError %i with matcher %i and classified_failure %i",
                    text_log_error.id, matcher.id, classified_failure.id)
        best_match = text_log_error.best_automatic_match(
            AUTOCLASSIFY_CUTOFF_RATIO)
        if best_match:
            text_log_error.mark_best_classification(classified_failure)

    if all_matched:
        if job.is_fully_autoclassified():
            # We don't want to add a job note after an autoclassification if there is already
            # one and after a verification if there is already one not supplied by the
            # autoclassifier
            if not JobNote.objects.filter(job=job).exists():
                JobNote.create_autoclassify_job_note(job)
    def handle(self, *args, **options):

        for ds in Datasource.objects.all():
            self.stdout.write('{}\n'.format(ds.project))
            try:
                repository = Repository.objects.get(name=ds.project)
            except:
                self.stderr.write(
                    'No repository for datasource project {}, skipping\n'.
                    format(ds.project))
                continue

            db_options = settings.DATABASES['default'].get('OPTIONS', {})
            db = MySQLdb.connect(
                host=settings.DATABASES['default']['HOST'],
                db=ds.name,
                user=settings.DATABASES['default']['USER'],
                passwd=settings.DATABASES['default'].get('PASSWORD') or '',
                **db_options)
            c = db.cursor()

            #
            # Migrate bug job map
            #
            c.execute(
                """SELECT job_id, bug_id, submit_timestamp, who from bug_job_map"""
            )
            ds_bug_job_maps = c.fetchall()

            (job_id_mapping, email_mapping) = self._get_mappings(
                repository, set([bjm[0] for bjm in ds_bug_job_maps]),
                set([bjm[3] for bjm in ds_bug_job_maps]))

            # migrate everything in one big bulk transaction (there aren't
            # that many)
            migrated_bug_job_maps = []
            for (ds_job_id, ds_bug_id, ds_timestamp,
                 ds_email) in ds_bug_job_maps:
                if not job_id_mapping.get(ds_job_id):
                    self.stderr.write(
                        "WARNING: job id {} not found when migrating bug job map, skipping\n"
                        .format(ds_job_id))
                    continue
                migrated_bug_job_maps.append(
                    BugJobMap(
                        job_id=job_id_mapping[ds_job_id],
                        bug_id=ds_bug_id,
                        user_id=email_mapping.get(ds_email),
                        created=datetime.datetime.fromtimestamp(ds_timestamp)))
            BugJobMap.objects.bulk_create(migrated_bug_job_maps)

            #
            # Migrate job notes
            #
            c.execute(
                """SELECT job_id, failure_classification_id, who, note, note_timestamp from job_note"""
            )
            ds_job_notes = c.fetchall()

            (job_id_mapping, email_mapping) = self._get_mappings(
                repository, set([jn[0] for jn in ds_job_notes]),
                set([jn[2] for jn in ds_job_notes]))
            migrated_job_notes = []
            for (ds_job_id, ds_failure_classification_id, ds_email,
                 ds_note_text, ds_timestamp) in ds_job_notes:
                if not job_id_mapping.get(ds_job_id):
                    self.stderr.write(
                        "WARNING: job id {} not found when migrating job notes, skipping\n"
                        .format(ds_job_id))
                    continue
                ds_note_text = filter(lambda x: x in set(string.printable),
                                      ds_note_text)
                migrated_job_notes.append(
                    JobNote(
                        job_id=job_id_mapping[ds_job_id],
                        failure_classification_id=ds_failure_classification_id,
                        user_id=email_mapping.get(ds_email),
                        text=ds_note_text,
                        created=datetime.datetime.fromtimestamp(ds_timestamp)))
            JobNote.objects.bulk_create(migrated_job_notes)