Example #1
0
        def handler():
            node = models.Node(mac='60:a4:4c:35:28:95',
                               timestamp=datetime.datetime.now())

            node.attributes = models.IPAddr()
            db.add(node)
            db.flush()
Example #2
0
 def create_default_group(cls, instance):
     node_group = models.NodeGroup(name=consts.NODE_GROUPS.default,
                                   is_default=True)
     instance.node_groups.append(node_group)
     db.add(node_group)
     db().flush()
     return node_group
Example #3
0
 def create_default_group(cls, instance):
     node_group = models.NodeGroup(name=consts.NODE_GROUPS.default,
                                   is_default=True)
     instance.node_groups.append(node_group)
     db.add(node_group)
     db().flush()
     return node_group
Example #4
0
    def __call__(self, env, start_response):
        if env['REQUEST_METHOD'] in self.methods_to_analyze:
            url_matcher = self._get_url_matcher(url=env['PATH_INFO'])
            if url_matcher:
                request_body = utils.get_body_from_env(env)

                def save_headers_start_response(status, headers, *args):
                    """Hook for saving response headers for further
                    processing
                    """
                    self.status = status
                    return start_response(status, headers, *args)

                # Prepare arguments for ActionLog instance creation
                create_kwargs = {}

                actor_id = self._get_actor_id(env)
                create_kwargs['actor_id'] = actor_id

                # save actor_id in env for further processing
                env['fuel.action.actor_id'] = actor_id

                create_kwargs['start_timestamp'] = datetime.datetime.now()
                response = self.app(env, save_headers_start_response)
                create_kwargs['end_timestamp'] = datetime.datetime.now()

                # since responce is iterator to avoid its exhaustion in
                # analysing process we make two copies of it: one to be
                # processed in stats collection logic and the other to
                # propagate further on middleware stack
                response_to_analyse, response_to_propagate = \
                    itertools.tee(response)

                create_kwargs['action_name'] = \
                    compiled_urls_actions_mapping[url_matcher]['action_name']
                create_kwargs['action_group'] = \
                    compiled_urls_actions_mapping[url_matcher]['action_group']

                create_kwargs['action_type'] = \
                    consts.ACTION_TYPES.http_request

                create_kwargs['additional_info'] = \
                    self._get_additional_info(env,
                                              request_body,
                                              response_to_analyse)

                # get cluster_id from url
                cluster_id = utils.get_group_from_matcher(
                    url_matcher, env['PATH_INFO'], 'cluster_id')
                if cluster_id:
                    cluster_id = int(cluster_id)

                create_kwargs['cluster_id'] = cluster_id

                db.add(ActionLog(**create_kwargs))
                db.commit()

                return response_to_propagate

        return self.app(env, start_response)
 def test_does_not_fail_if_there_is_deleted_task(self):
     task = models.Task(name=consts.TASK_NAMES.deployment,
                        deleted_at=datetime.datetime.now(),
                        cluster_id=self.cluster.id)
     db.add(task)
     db.flush()
     self.assertNotRaises(errors.TaskAlreadyRunning,
                          self.task_manager.check_running_task)
 def handler():
     node = models.Node(
         mac='60:a4:4c:35:28:95',
         timestamp=datetime.datetime.now(),
         status='batman'
     )
     db.add(node)
     db.flush()
 def handler():
     node = models.Node(
         mac='60:a4:4c:35:28:95',
         timestamp=datetime.datetime.now(),
         status='batman'
     )
     db.add(node)
     db.flush()
    def test_fails_if_there_is_task(self):
        task = models.Task(name=consts.TASK_NAMES.deployment,
                           cluster_id=self.cluster.id,
                           status=consts.TASK_STATUSES.pending)
        db.add(task)
        db.flush()

        self.assertRaises(errors.TaskAlreadyRunning,
                          self.task_manager.check_running_task)
        def handler():
            node = models.Node(
                mac='60:a4:4c:35:28:95',
                timestamp=datetime.datetime.now()
            )

            node.attributes = models.IPAddr()
            db.add(node)
            db.flush()
Example #10
0
 def test_does_not_fail_if_there_is_deleted_task(self):
     task = models.Task(name=consts.TASK_NAMES.deployment,
                        deleted_at=datetime.datetime.now(),
                        cluster_id=self.cluster.id)
     db.add(task)
     db.flush()
     self.assertNotRaises(
         errors.TaskAlreadyRunning, self.task_manager.check_running_task
     )
Example #11
0
        def handler():
            mac = '60:a4:4c:35:28:95'

            node1 = models.Node(mac=mac, timestamp=datetime.datetime.now())
            db.add(node1)
            db.flush()

            node2 = models.Node(mac=mac, timestamp=datetime.datetime.now())
            db.add(node2)
            db.flush()
        def handler():
            mac = '60:a4:4c:35:28:95'

            node1 = models.Node(mac=mac, timestamp=datetime.datetime.now())
            db.add(node1)
            db.flush()

            node2 = models.Node(mac=mac, timestamp=datetime.datetime.now())
            db.add(node2)
            db.flush()
Example #13
0
    def test_fails_if_there_is_task(self):
        task = models.Task(
            name=consts.TASK_NAMES.deployment, cluster_id=self.cluster.id,
            status=consts.TASK_STATUSES.pending
        )
        db.add(task)
        db.flush()

        self.assertRaises(
            errors.TaskAlreadyRunning, self.task_manager.check_running_task
        )
Example #14
0
    def test_does_not_fail_if_there_is_deleted_task(self):
        for task_name in DeploymentCheckMixin.deployment_tasks:
            task = models.Task(name=task_name,
                               deleted_at=datetime.datetime.now(),
                               cluster_id=self.cluster.id)
            db.add(task)
            db.flush()
            self.addCleanup(db.query(models.Task).delete)

            self.assertNotRaises(
                errors.DeploymentAlreadyStarted,
                DeploymentCheckMixin.check_no_running_deployment, self.cluster)
Example #15
0
    def test_fails_if_there_is_task(self):
        for task_name in DeploymentCheckMixin.deployment_tasks:
            task = models.Task(name=task_name, cluster_id=self.cluster.id)
            db.add(task)
            db.flush()
            self.assertRaisesWithMessage(
                errors.DeploymentAlreadyStarted,
                'Cannot perform the actions because there are '
                'running tasks {0}'.format([task]),
                DeploymentCheckMixin.check_no_running_deployment, self.cluster)

            db.query(models.Task).delete()
Example #16
0
    def test_does_not_fail_if_there_is_deleted_task(self):
        for task_name in DeploymentCheckMixin.deployment_tasks:
            task = models.Task(name=task_name,
                               deleted_at=datetime.datetime.now(),
                               cluster_id=self.cluster.id)
            db.add(task)
            db.flush()
            self.addCleanup(db.query(models.Task).delete)

            self.assertNotRaises(
                errors.DeploymentAlreadyStarted,
                DeploymentCheckMixin.check_no_running_deployment,
                self.cluster)
    def test_fails_if_there_is_task(self):
        for task_name in DeploymentCheckMixin.deployment_tasks:
            task = models.Task(name=task_name, cluster_id=self.cluster.id)
            db.add(task)
            db.flush()
            self.assertRaisesWithMessage(
                errors.DeploymentAlreadyStarted,
                'Cannot perform the actions because there are '
                'running tasks {0}'.format([task]),
                DeploymentCheckMixin.check_no_running_deployment,
                self.cluster)

            db.query(models.Task).delete()
Example #18
0
    def __call__(self, env, start_response):
        if env["REQUEST_METHOD"] in self.methods_to_analyze:
            url_matcher = self._get_url_matcher(url=env["PATH_INFO"])
            if url_matcher:
                request_body = utils.get_body_from_env(env)

                def save_headers_start_response(status, headers, *args):
                    """Hook for saving resp headers for further processing"""
                    self.status = status
                    return start_response(status, headers, *args)

                # Prepare arguments for ActionLog instance creation
                create_kwargs = {}

                actor_id = self._get_actor_id(env)
                create_kwargs["actor_id"] = actor_id

                # save actor_id in env for further processing
                env["fuel.action.actor_id"] = actor_id

                create_kwargs["start_timestamp"] = datetime.datetime.utcnow()
                response = self.app(env, save_headers_start_response)
                create_kwargs["end_timestamp"] = datetime.datetime.utcnow()

                # since responce is iterator to avoid its exhaustion in
                # analysing process we make two copies of it: one to be
                # processed in stats collection logic and the other to
                # propagate further on middleware stack
                response_to_analyse, response_to_propagate = itertools.tee(response)

                create_kwargs["action_name"] = compiled_urls_actions_mapping[url_matcher]["action_name"]
                create_kwargs["action_group"] = compiled_urls_actions_mapping[url_matcher]["action_group"]

                create_kwargs["action_type"] = consts.ACTION_TYPES.http_request

                create_kwargs["additional_info"] = self._get_additional_info(env, request_body, response_to_analyse)

                # get cluster_id from url
                cluster_id = utils.get_group_from_matcher(url_matcher, env["PATH_INFO"], "cluster_id")
                if cluster_id:
                    cluster_id = int(cluster_id)

                create_kwargs["cluster_id"] = cluster_id

                db.add(ActionLog(**create_kwargs))
                db.commit()

                return response_to_propagate

        return self.app(env, start_response)
Example #19
0
 def handler():
     node = models.Node(mac=None)
     db.add(node)
     db.flush()
        def handler():
            ip_addr = models.IPAddr()

            ip_addr.network_data = models.IPAddr()
            db.add(ip_addr)
            db.flush()
Example #21
0
 def _update_release_state(cls, release_id, state):
     release = db().query(Release).get(release_id)
     release.state = state
     db.add(release)
     db.commit()
Example #22
0
 def create_relation(cls, orig_cluster_id, seed_cluster_id):
     relation = models.UpgradeRelation(orig_cluster_id=orig_cluster_id,
                                       seed_cluster_id=seed_cluster_id)
     db.add(relation)
     db.flush()
        def handler():
            ip_addr = models.IPAddr()

            ip_addr.network_data = models.IPAddr()
            db.add(ip_addr)
            db.flush()
 def handler():
     node = models.Node(mac=None)
     db.add(node)
     db.flush()
Example #25
0
 def _update_release_state(cls, release_id, state):
     release = db().query(Release).get(release_id)
     release.state = state
     db.add(release)
     db.commit()
 def create_relation(cls, orig_cluster_id, seed_cluster_id):
     relation = models.UpgradeRelation(
         orig_cluster_id=orig_cluster_id,
         seed_cluster_id=seed_cluster_id)
     db.add(relation)
     db.flush()