예제 #1
0
    def synchronize(self):
        """Synchronize all repos."""
        logger.info('Synchronizing repositories ...')

        # Get all repos
        repos = self.get_all_repos()
        logger.info('%s repositories currently in track', len(repos))

        # Get untracked repos currently on disk
        untracked_repos = self.get_untracked_repos(repos)
        logger.info('%s untracked repositories found on disk',
                    len(list(untracked_repos.keys())))

        for repo in untracked_repos:
            # Purge all branch heads
            logger.info('Purging branch heads (repo: %s) ...', repo)
            self.axdb_client.purge_branch_heads(repo)
            # Delete workspace
            logger.info('Deleting workspace (path: %s) ...',
                        untracked_repos[repo])
            shutil.rmtree(untracked_repos[repo])
            # Invalidate caches
            logger.info('Invalidating caches (workspace: %s) ...',
                        untracked_repos[repo])
            key_pattern = '^{}\:{}.*$'.format(NAMESPACE, untracked_repos[repo])
            keys = redis_client.keys(key_pattern)
            for k in keys:
                logger.debug('Invalidating cache (key: %s) ...', k)
                redis_client.delete(k)

        # Send event to trigger garbage collection from axops
        if untracked_repos:
            kafka_client = ProducerClient()
            ci_event = {
                'Op': "gc",
                'Payload': {
                    'details': "Repo or branch get deleted."
                }
            }
            kafka_client.send(AxSettings.TOPIC_GC_EVENT,
                              key=AxSettings.TOPIC_GC_EVENT,
                              value=ci_event,
                              timeout=120)

        return repos, len(untracked_repos) > 0
예제 #2
0
파일: api.py 프로젝트: abhinavdas/argo
    def events(self, request):
        """Create a DevOps event.

        :param request:
        :return:
        """
        payload, headers = request.data, request.META
        try:
            logger.info('Translating SCM event ...')
            events = EventTranslator.translate(payload, headers)
        except Exception as e:
            logger.error('Failed to translate event: %s', e)
            # Todo Tianhe Issue: #330 comment out for now because it is distracting
            # event_notification_client.send_message_to_notification_center(CODE_JOB_CI_EVENT_TRANSLATE_FAILURE,
            #                                                               detail={
            #                                                                   'payload': payload,
            #                                                                   'error': str(e)
            #                                                               })
            raise AXApiInternalError('Failed to translate event',
                                     detail=str(e))
        else:
            logger.info('Successfully translated event')

        kafka_client = ProducerClient()
        successful_events = []
        for event in events:
            if event['type'] == AxEventTypes.PING:
                logger.info(
                    'Received a PING event, skipping service creation ...')
                continue
            else:
                try:
                    logger.info('Creating AX event ...\n%s',
                                AxPrettyPrinter().pformat(event))
                    key = '{}_{}_{}'.format(event['repo'], event['branch'],
                                            event['commit'])
                    kafka_client.send(AxSettings.TOPIC_DEVOPS_CI_EVENT,
                                      key=key,
                                      value=event,
                                      timeout=120)
                except Exception as e:
                    event_notification_client.send_message_to_notification_center(
                        CODE_JOB_CI_EVENT_CREATION_FAILURE,
                        detail={
                            'event_type': event.get('type', 'UNKNOWN'),
                            'error': str(e)
                        })
                    logger.warning('Failed to create AX event: %s', e)
                else:
                    logger.info('Successfully created AX event')
                    successful_events.append(event)
        kafka_client.close()
        return Response(successful_events)
예제 #3
0
def kafka_producer(broker):
    from ax.devops.kafka.kafka_client import ProducerClient
    host, port = broker.split(':')
    k_producer = ProducerClient(host, port=port)
    yield k_producer
    k_producer.producer.close()
예제 #4
0
    def update_repo(self, repo_type, vendor, protocol, repo_owner, repo_name,
                    username, password, use_webhook):
        """Update a repo."""

        # Examples for the input variables
        # BASE_DIR:   /ax/data/repos
        # Repo_type:  github
        # Vendor:     github.com
        # Protocol:   https
        # Repo_owner: argo
        # Repo_name:  prod.git

        is_first_fetch = False
        do_send_gc_event = False
        workspace = '{}/{}/{}/{}'.format(BASE_DIR, vendor, repo_owner,
                                         repo_name)
        url = '{}://{}/{}/{}'.format(protocol, vendor, repo_owner, repo_name)
        kafka_client = ProducerClient()

        if not os.path.isdir(workspace):
            os.makedirs(workspace)
            # If we recreate the workspace, we need to purge all branch heads of this repo
            self.axdb_client.purge_branch_heads(url)

        logger.info("Start scanning repository (%s) ...", url)
        if repo_type == ScmVendors.CODECOMMIT:
            client = CodeCommitClient(path=workspace,
                                      repo=url,
                                      username=username,
                                      password=password)
        else:
            client = GitClient(path=workspace,
                               repo=url,
                               username=username,
                               password=password,
                               use_permanent_credentials=True)

        # Even if there is no change, performing a fetch is harmless but has a benefit
        # that, in case the workspace is destroyed without purging the history, we can
        # still update the workspace to the proper state
        logger.info("Start fetching ...")
        client.fetch()

        # Retrieve all previous branch heads and construct hash table
        prev_heads = self.axdb_client.get_branch_heads(url)
        logger.info("Have %s branch heads (repo: %s) from previous scan",
                    len(prev_heads), url)

        if len(prev_heads) == 0:
            is_first_fetch = True
            logger.debug(
                "This is an initial scan as no previous heads were found")

        prev_heads_map = dict()
        for prev_head in prev_heads:
            key = (prev_head['repo'], prev_head['branch'])
            prev_heads_map[key] = prev_head['head']

        # Retrieve all current branch heads
        current_heads = client.get_remote_heads()
        logger.info("Have %s branch heads (repo: %s) from current scan",
                    len(current_heads), url)
        current_heads = sorted(current_heads,
                               key=lambda v: v['commit_date'],
                               reverse=is_first_fetch)

        # Find out which branch heads need to be updated
        heads_to_update = list()
        heads_for_event = list()
        for current_head in current_heads:
            head, branch = current_head['commit'], current_head[
                'reference'].replace('refs/heads/', '')
            previous_head = prev_heads_map.pop((url, branch), None)
            if head != previous_head:
                event = {'repo': url, 'branch': branch, 'head': head}
                heads_to_update.append(event)

                if previous_head is None:
                    logger.info(
                        "New branch detected (branch: %s, current head: %s)",
                        branch, head)
                else:
                    logger.info(
                        "Existing ranch head updated (branch: %s, previous: %s, current: %s)",
                        branch, previous_head, head)
                    # Send CI event in case of policy
                    heads_for_event.append(event.copy())

        if prev_heads_map:
            logger.info("There are %s get deleted from repo: %s",
                        prev_heads_map.keys(), url)
            do_send_gc_event = True
            for key in prev_heads_map:
                self.axdb_client.purge_branch_head(repo=key[0], branch=key[1])

        # Invalidate cache if there is head update or branch deleted
        if heads_to_update or prev_heads_map:
            cache_key = '{}:{}'.format(NAMESPACE, workspace)
            logger.info('Invalidating cache (key: %s) ...', cache_key)
            if redis_client.exists(cache_key):
                redis_client.delete(cache_key)

        # Update YAML contents
        count = 0
        for event in heads_to_update:
            res_count = RepoManager.update_yaml(repo_client=client,
                                                kafka_client=kafka_client,
                                                repo=url,
                                                branch=event['branch'],
                                                head=event['head'])
            if res_count >= 0:
                self.axdb_client.set_branch_head(**event)
                count += res_count

        logger.info(
            "Updated %s YAML files (template/policy) for %s branches (repo: %s)",
            count, len(heads_to_update), url)
        logger.info("Updated %s branch heads (repo: %s)", len(heads_to_update),
                    url)

        # If garbarge collection needed due to branch or repo deletion
        if do_send_gc_event:
            logger.info(
                "Send gc event so that axops can garbage collect deleted branch / repo"
            )
            ci_event = {
                'Op': "gc",
                'Payload': {
                    'details': "Repo or branch get deleted."
                }
            }
            kafka_client.send(AxSettings.TOPIC_GC_EVENT,
                              key=AxSettings.TOPIC_GC_EVENT,
                              value=ci_event,
                              timeout=120)

        # If webhook is disabled, we need to send CI events
        if not use_webhook:
            for event in heads_for_event:
                commit = client.get_commit(event['head'])
                ci_event = {
                    'Op': "ci",
                    'Payload': {
                        'author':
                        commit['author'],
                        'branch':
                        event['branch'],
                        'commit':
                        commit['revision'],
                        'committer':
                        commit['committer'],
                        'date':
                        datetime.datetime.fromtimestamp(
                            commit['date']).strftime('%Y-%m-%dT%H:%M:%S'),
                        'description':
                        commit['description'],
                        'repo':
                        commit['repo'],
                        'type':
                        "push",
                        'vendor':
                        repo_type
                    }
                }
                kafka_client.send("devops_template",
                                  key="{}$$$${}".format(
                                      event['repo'], event['branch']),
                                  value=ci_event,
                                  timeout=120)
            logger.info('Webhook not enabled, send %s devops_ci_event events',
                        len(heads_for_event))

        kafka_client.close()
        logger.info('Successfully scanned repository (%s)', url)

        return len(heads_to_update) > 0 or len(prev_heads_map) > 0