Exemple #1
0
    def _handle(self, *args, **options):
        if len(args) != 2:
            raise CommandError("Need to specify (only) branch and changeset")

        (project, changeset) = args

        # get reference to repo
        rdm = RefDataManager()
        repos = filter(lambda x: x['name'] == project,
                       rdm.get_all_repository_info())
        if not repos:
            raise CommandError("No project found named '%s'" % project)
        repo = repos[0]

        # make sure all tasks are run synchronously / immediately
        settings.CELERY_ALWAYS_EAGER = True

        # get hg pushlog
        pushlog_url = '%s/json-pushes/?full=1&version=2' % repo['url']

        # ingest this particular revision for this project
        process = HgPushlogProcess()
        # Use the actual push SHA, in case the changeset specified was a tag
        # or branch name (eg tip). HgPushlogProcess returns the full SHA.
        push_sha = process.run(pushlog_url, project, changeset=changeset)[:12]

        Builds4hJobsProcess().run(project_filter=project,
                                  revision_filter=push_sha,
                                  job_group_filter=options['filter_job_group'])
        PendingJobsProcess().run(project_filter=project,
                                 revision_filter=push_sha,
                                 job_group_filter=options['filter_job_group'])
        RunningJobsProcess().run(project_filter=project,
                                 revision_filter=push_sha,
                                 job_group_filter=options['filter_job_group'])
Exemple #2
0
    def _handle(self, *args, **options):
        if len(args) != 2:
            raise CommandError("Need to specify (only) branch and changeset")

        (project, changeset) = args

        # get reference to repo
        rdm = RefDataManager()
        repos = filter(lambda x: x['name'] == project,
                       rdm.get_all_repository_info())
        if not repos:
            raise CommandError("No project found named '%s'" % project)
        repo = repos[0]

        # make sure all tasks are run synchronously / immediately
        settings.CELERY_ALWAYS_EAGER = True

        # get hg pushlog
        pushlog_url = '%s/json-pushes/?full=1&version=2' % repo['url']

        # ingest this particular revision for this project
        process = HgPushlogProcess()
        # Use the actual push SHA, in case the changeset specified was a tag
        # or branch name (eg tip). HgPushlogProcess returns the full SHA.
        push_sha = process.run(pushlog_url, project, changeset=changeset)[:12]

        Builds4hJobsProcess().run(project_filter=project,
                                  revision_filter=push_sha,
                                  job_group_filter=options['filter_job_group'])
        PendingJobsProcess().run(project_filter=project,
                                 revision_filter=push_sha,
                                 job_group_filter=options['filter_job_group'])
        RunningJobsProcess().run(project_filter=project,
                                 revision_filter=push_sha,
                                 job_group_filter=options['filter_job_group'])
    def _handle(self, *args, **options):
        if len(args) != 2:
            raise CommandError("Need to specify (only) branch and changeset")

        (project, changeset) = args

        # get reference to repo
        rdm = RefDataManager()
        repos = filter(lambda x: x['name'] == project,
                       rdm.get_all_repository_info())
        if not repos:
            raise CommandError("No project found named '%s'" % project)
        repo = repos[0]

        # make sure all tasks are run synchronously / immediately
        settings.CELERY_ALWAYS_EAGER = True

        # get hg pushlog
        pushlog_url = '%s/json-pushes/?full=1' % repo['url']

        # ingest this particular revision for this project
        process = HgPushlogProcess()
        process.run(pushlog_url, project, changeset=changeset)

        self._process_all_objects_for_project(project)

        Builds4hJobsProcess().run(filter_to_project=project,
                                  filter_to_revision=changeset)
        PendingJobsProcess().run(filter_to_project=project,
                                 filter_to_revision=changeset)
        RunningJobsProcess().run(filter_to_project=project,
                                 filter_to_revision=changeset)

        self._process_all_objects_for_project(project)
def fetch_push_logs():
    """
    Run several fetch_hg_push_log subtasks, one per repository
    """
    rdm = RefDataManager()
    try:
        repos = filter(lambda x: x['url'], rdm.get_all_repository_info())
        # create a group of subtasks and apply them
        g = group(fetch_hg_push_log.si(repo['name'], repo['url'])
                            for repo in repos if repo['dvcs_type'] == 'hg')
        g()
    finally:
        rdm.disconnect()
def fetch_push_logs():
    """
    Run several fetch_hg_push_log subtasks, one per repository
    """
    rdm = RefDataManager()
    try:
        repos = filter(lambda x: x['url'], rdm.get_all_repository_info())
        for repo in repos:
            if repo['dvcs_type'] == 'hg':
                fetch_hg_push_log.apply_async(args=(repo['name'], repo['url']),
                                              routing_key='pushlog')

    finally:
        rdm.disconnect()
    def list(self, request):
        """
        Retrieves a list of bugs from the bugs cache
        search -- Mandatory term of search
        """
        search_term = request.QUERY_PARAMS.get("search", None)
        if not search_term:
            return Response({"message": "the 'search' parameter is mandatory"}, status=400)

        rdm = RefDataManager()
        try:
            suggested_bugs = rdm.get_bug_suggestions(search_term)
        finally:
            rdm.disconnect()
        return Response(suggested_bugs)
    def handle(self, *args, **options):
        repositories = Repository.objects.filter(active_status='active')
        if options['repo_name']:
            repositories = repositories.filter(name=options['repo_name'])
        if options['codebase']:
            repositories = repositories.filter(codebase=options['codebase'])
        if options['group']:
            repositories = repositories.filter(
                repository_group__name=options['group'])

        repo_ids = repositories.values_list('id', flat=True)
        
        refdata = RefDataManager()
        
        for repo_id in repo_ids:
            refdata.update_repository_version(repo_id)
def fetch_push_logs():
    """
    Run several fetch_hg_push_log subtasks, one per repository
    """
    rdm = RefDataManager()
    try:
        repos = filter(lambda x: x['url'], rdm.get_all_repository_info())
        for repo in repos:
            if repo['dvcs_type'] == 'hg':
                fetch_hg_push_log.apply_async(
                    args=(repo['name'], repo['url']),
                    routing_key='pushlog'
                )

    finally:
        rdm.disconnect()
Exemple #9
0
    def list(self, request):
        """
        Retrieves a list of bugs from the bugs cache
        search -- Mandatory term of search
        """
        search_term = request.QUERY_PARAMS.get("search", None)
        if not search_term:
            return Response({"message": "the 'search' parameter is mandatory"},
                            status=400)

        rdm = RefDataManager()
        try:
            suggested_bugs = rdm.get_bug_suggestions(search_term)
        finally:
            rdm.disconnect()
        return Response(suggested_bugs)
Exemple #10
0
    def run(self):
        # this is the last day we fetched bugs from bugzilla

        bug_list = []

        offset = 0
        limit = 500

        while True:
            # fetch the bugzilla service until we have an empty result
            paginated_url = "{0}&offset={1}&limit={2}".format(
                get_bz_source_url(),
                offset,
                limit
            )
            response = self.extract(paginated_url)
            temp_bug_list = response.get('bugs', [])
            bug_list += temp_bug_list
            if len(temp_bug_list) < limit:
                break
            offset += limit

        if bug_list:
            for bug in bug_list:
                # drop the timezone indicator to avoid issues with mysql
                bug["last_change_time"] = bug["last_change_time"][0:19]

            with RefDataManager() as rdm:
                rdm.update_bugscache(bug_list)
Exemple #11
0
    def handle(self, *args, **options):
        repositories = Repository.objects.filter(active_status='active')
        if options['repo_name']:
            repositories = repositories.filter(name=options['repo_name'])
        if options['codebase']:
            repositories = repositories.filter(codebase=options['codebase'])
        if options['group']:
            repositories = repositories.filter(
                repository_group__name=options['group'])

        repo_ids = repositories.values_list('id', flat=True)
        
        refdata = RefDataManager()
        
        for repo_id in repo_ids:
            refdata.update_repository_version(repo_id)
Exemple #12
0
def fetch_missing_push_logs(missing_pushlogs):
    """
    Run several fetch_hg_push_log subtasks, one per repository
    """
    rdm = RefDataManager()
    try:
        repos = filter(lambda x: x['url'], rdm.get_all_repository_info())
        for repo in repos:
            if repo['dvcs_type'] == 'hg' and repo['name'] in missing_pushlogs:
                # we must get them one at a time, because if ANY are missing
                # from json-pushes, it'll return a 404 for the group.
                for resultset in missing_pushlogs[repo['name']]:
                    fetch_missing_hg_push_logs.apply_async(
                        args=(repo['name'], repo['url'], resultset),
                        routing_key='pushlog')
    finally:
        rdm.disconnect()
Exemple #13
0
    def run(self):
        # this is the last day we fetched bugs from bugzilla
        last_fetched = cache.get("bz_last_fetched")
        curr_date = datetime.date.today()

        bug_list = []

        if last_fetched:
            # if we have a last_fetched timestamp available
            # we don't need pagination.
            source_url = self._get_bz_source_url(last_fetched)
            response = self.extract(source_url)
            if response:
                bug_list = response.get("bugs", [])
        else:
            offset = 0
            limit = 500

            # fetch new pages no more than 30 times
            # this is a safe guard to not generate an infinite loop
            # in case something went wrong
            for i in range(1, 30 + 1):
                # fetch the bugzilla service until we have an empty result
                paginated_url = "{0}&offset={1}&limit={2}".format(self._get_bz_source_url(), offset, limit)
                response = self.extract(paginated_url)
                temp_bug_list = response.get("bugs", [])
                bug_list += temp_bug_list
                if len(temp_bug_list) < limit:
                    break
                else:
                    offset += limit

        if bug_list:

            # store the new date for one day
            cache.set("bz_last_fetched", curr_date, 60 * 60 * 24)

            rdm = RefDataManager()
            try:
                rdm.update_bugscache(bug_list)
            finally:
                rdm.disconnect()
def fetch_missing_push_logs(missing_pushlogs):
    """
    Run several fetch_hg_push_log subtasks, one per repository
    """
    rdm = RefDataManager()
    try:
        repos = filter(lambda x: x['url'], rdm.get_all_repository_info())
        for repo in repos:
            if repo['dvcs_type'] == 'hg' and repo['name'] in missing_pushlogs:
                # we must get them one at a time, because if ANY are missing
                # from json-pushes, it'll return a 404 for the group.
                for resultset in missing_pushlogs[repo['name']]:
                    fetch_missing_hg_push_logs.apply_async(args=(
                        repo['name'],
                        repo['url'],
                        resultset
                        ),
                        routing_key='pushlog'
                )
    finally:
        rdm.disconnect()
Exemple #15
0
    def list(self, request):
        """
        Retrieves a list of bugs from the bugs cache
        search -- Mandatory term of search
        """
        search_term = request.QUERY_PARAMS.get("search", None)
        if not search_term:
            return Response({"message": "the 'search' parameter is mandatory"},
                            status=400)

        with RefDataManager() as rdm:
            return Response(rdm.get_bug_suggestions(search_term))
Exemple #16
0
    def run(self):
        # this is the last day we fetched bugs from bugzilla

        bug_list = []

        offset = 0
        limit = 500

        # fetch new pages no more than 30 times
        # this is a safe guard to not generate an infinite loop
        # in case something went wrong
        for i in range(1, 30 + 1):
            # fetch the bugzilla service until we have an empty result
            paginated_url = "{0}&offset={1}&limit={2}".format(
                get_bz_source_url(), offset, limit)
            response = self.extract(paginated_url)
            temp_bug_list = response.get('bugs', [])
            bug_list += temp_bug_list
            if len(temp_bug_list) < limit:
                break
            else:
                offset += limit

        if bug_list:

            for bug in bug_list:
                # drop the timezone indicator to avoid issues with mysql
                bug["last_change_time"] = bug["last_change_time"][0:19]

            rdm = RefDataManager()
            try:
                rdm.update_bugscache(bug_list)
            finally:
                rdm.disconnect()
Exemple #17
0
    def list(self, request):
        """
        Retrieves a list of bugs from the bugs cache

        search -- Mandatory term of search
        status -- Optional filter on the status. Can be 'open' or 'closed'. Open by default
        """
        search_term = request.QUERY_PARAMS.get("search", None)
        if not search_term:
            return Response({"message": "the 'search' parameter is mandatory"}, status=400)

        status = request.QUERY_PARAMS.get("status", "open")
        if not status in ("open", "closed"):
            return Response({"message": "status must be 'open' or 'closed'"}, status=400)

        open_only = True if status == "open" else False

        rdm = RefDataManager()
        try:
            suggested_bugs = rdm.get_bug_suggestions(search_term, open_only)
        finally:
            rdm.disconnect()
        return Response(suggested_bugs)
Exemple #18
0
    def list(self, request):
        with RefDataManager() as rdm:
            option_collection_hash = rdm.get_all_option_collections()

        ret = []
        for (option_hash, val) in option_collection_hash.iteritems():
            ret.append({
                'option_collection_hash':
                option_hash,
                'options': [{
                    'name': name
                } for name in val['opt'].split()]
            })
        return Response(ret)
    def run(self):
        # this is the last day we fetched bugs from bugzilla

        bug_list = []

        offset = 0
        limit = 500

        # fetch new pages no more than 30 times
        # this is a safe guard to not generate an infinite loop
        # in case something went wrong
        for i in range(1, 30+1):
            # fetch the bugzilla service until we have an empty result
            paginated_url = "{0}&offset={1}&limit={2}".format(
                get_bz_source_url(),
                offset,
                limit
            )
            response = self.extract(paginated_url)
            temp_bug_list = response.get('bugs', [])
            bug_list += temp_bug_list
            if len(temp_bug_list) < limit:
                break
            else:
                offset += limit

        if bug_list:

            for bug in bug_list:
                # drop the timezone indicator to avoid issues with mysql
                bug["last_change_time"] = bug["last_change_time"][0:19]

            rdm = RefDataManager()
            try:
                rdm.update_bugscache(bug_list)
            finally:
                rdm.disconnect()
Exemple #20
0
def refdata(request):
    """returns a patched RefDataManager for testing purpose"""
    from treeherder.model.derived import RefDataManager
    from tests.conftest import add_test_procs_file

    refdata = RefDataManager()

    proc_path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
                             'refdata_test.json')

    add_test_procs_file(refdata.dhub, 'reference', proc_path)

    def fin():
        refdata.disconnect()

    request.addfinalizer(fin)

    return refdata
Exemple #21
0
def parse_log(project, job_id, result_set_id, check_errors=False):
    """
    Call ArtifactBuilderCollection on the given job.
    """
    pattern_obj = re.compile('\d+:\d+:\d+\s+')

    jm = JobsModel(project=project)
    rdm = RefDataManager()

    open_bugs_cache = {}
    closed_bugs_cache = {}

    status_publisher = JobStatusPublisher(settings.BROKER_URL)
    failure_publisher = JobFailurePublisher(settings.BROKER_URL)

    try:
        # return the resultset with the job id to identify if the UI wants
        # to fetch the whole thing.
        resultset = jm.get_result_set_by_id(result_set_id=result_set_id)[0]
        del(resultset["active_status"])
        del(resultset["revision_hash"])

        log_references = jm.get_log_references(job_id)

        # we may have many log references per job
        for log in log_references:

            # parse a log given its url
            artifact_bc = ArtifactBuilderCollection(
                log['url'],
                check_errors=check_errors,
            )
            artifact_bc.parse()

            artifact_list = []
            for name, artifact in artifact_bc.artifacts.items():
                artifact_list.append((job_id, name, 'json', json.dumps(artifact)))

            if check_errors:
                # I'll try to begin with a full_text search on the entire row

                all_errors = artifact_bc.artifacts['Structured Log']['step_data']['all_errors']

                open_bugs_suggestions = {}
                closed_bugs_suggestions = {}

                for err in all_errors:

                    # remove timestamp
                    clean_line = pattern_obj.sub('', err['line'])

                    if clean_line not in open_bugs_cache:
                        open_bugs_cache[clean_line] = rdm.get_suggested_bugs(
                            clean_line)

                    if clean_line not in closed_bugs_cache:
                        closed_bugs_cache[clean_line] = rdm.get_suggested_bugs(
                            clean_line, open_bugs=False)

                    open_bugs_suggestions[ err['line'] ] = open_bugs_cache[clean_line]
                    closed_bugs_suggestions[ err['line'] ] = closed_bugs_cache[clean_line]

                artifact_list.append((job_id, 'Open bugs', 'json', json.dumps(open_bugs_suggestions)))
                artifact_list.append((job_id, 'Closed bugs', 'json', json.dumps(closed_bugs_suggestions)))

            # store the artifacts generated
            jm.store_job_artifact(artifact_list)
        status_publisher.publish(job_id, resultset, project, 'processed')
        if check_errors:
            failure_publisher.publish(job_id, project)

    finally:
        rdm.disconnect()
        jm.disconnect()
        status_publisher.disconnect()
        failure_publisher.disconnect()