def autoclassify_failures(self, failures, classification):
        for tasks in failures.values():
            for task in tasks:
                # Keeping only the tasks that should be autoclassified
                if not task.get("autoclassify"):
                    continue

                bugs = []
                for failing_test_name in task.get("tests", []):
                    try:
                        bugs.append(
                            Bugscache.objects.get(
                                summary__endswith=
                                f"{failing_test_name} | single tracking bug"))
                    except Bugscache.DoesNotExist:
                        logger.info(
                            "No single tracking Bugzilla bug found for test name: %s",
                            failing_test_name,
                        )

                if not bugs:
                    # No associated Bugzilla bug exists, skipping the autoclassification
                    continue

                # Retrieving the relevant Job
                try:
                    job = Job.objects.get(
                        taskcluster_metadata__task_id=task["task_id"])
                except Job.DoesNotExist:
                    logger.error(
                        "Job associated to the TC task %s does not exist and could not be autoclassified.",
                        task["task_id"],
                    )
                    raise

                # Adding an "autoclassified intermittent" classification on it
                JobNote.objects.create(
                    job=job,
                    failure_classification=classification,
                    text=
                    "Autoclassified by mozci bot as an intermittent failure",
                )

                # Linking it to the relevant Bugzilla single tracking bugs
                BugJobMap.objects.bulk_create(
                    [BugJobMap(job=job, bug_id=bug.id) for bug in bugs],
                    ignore_conflicts=True)
    def handle(self, *args, **options):

        for ds in Datasource.objects.all():
            self.stdout.write('{}\n'.format(ds.project))
            try:
                repository = Repository.objects.get(name=ds.project)
            except:
                self.stderr.write(
                    'No repository for datasource project {}, skipping\n'.
                    format(ds.project))
                continue

            db_options = settings.DATABASES['default'].get('OPTIONS', {})
            db = MySQLdb.connect(
                host=settings.DATABASES['default']['HOST'],
                db=ds.name,
                user=settings.DATABASES['default']['USER'],
                passwd=settings.DATABASES['default'].get('PASSWORD') or '',
                **db_options)
            c = db.cursor()

            #
            # Migrate bug job map
            #
            c.execute(
                """SELECT job_id, bug_id, submit_timestamp, who from bug_job_map"""
            )
            ds_bug_job_maps = c.fetchall()

            (job_id_mapping, email_mapping) = self._get_mappings(
                repository, set([bjm[0] for bjm in ds_bug_job_maps]),
                set([bjm[3] for bjm in ds_bug_job_maps]))

            # migrate everything in one big bulk transaction (there aren't
            # that many)
            migrated_bug_job_maps = []
            for (ds_job_id, ds_bug_id, ds_timestamp,
                 ds_email) in ds_bug_job_maps:
                if not job_id_mapping.get(ds_job_id):
                    self.stderr.write(
                        "WARNING: job id {} not found when migrating bug job map, skipping\n"
                        .format(ds_job_id))
                    continue
                migrated_bug_job_maps.append(
                    BugJobMap(
                        job_id=job_id_mapping[ds_job_id],
                        bug_id=ds_bug_id,
                        user_id=email_mapping.get(ds_email),
                        created=datetime.datetime.fromtimestamp(ds_timestamp)))
            BugJobMap.objects.bulk_create(migrated_bug_job_maps)

            #
            # Migrate job notes
            #
            c.execute(
                """SELECT job_id, failure_classification_id, who, note, note_timestamp from job_note"""
            )
            ds_job_notes = c.fetchall()

            (job_id_mapping, email_mapping) = self._get_mappings(
                repository, set([jn[0] for jn in ds_job_notes]),
                set([jn[2] for jn in ds_job_notes]))
            migrated_job_notes = []
            for (ds_job_id, ds_failure_classification_id, ds_email,
                 ds_note_text, ds_timestamp) in ds_job_notes:
                if not job_id_mapping.get(ds_job_id):
                    self.stderr.write(
                        "WARNING: job id {} not found when migrating job notes, skipping\n"
                        .format(ds_job_id))
                    continue
                ds_note_text = filter(lambda x: x in set(string.printable),
                                      ds_note_text)
                migrated_job_notes.append(
                    JobNote(
                        job_id=job_id_mapping[ds_job_id],
                        failure_classification_id=ds_failure_classification_id,
                        user_id=email_mapping.get(ds_email),
                        text=ds_note_text,
                        created=datetime.datetime.fromtimestamp(ds_timestamp)))
            JobNote.objects.bulk_create(migrated_job_notes)