def prepare():
    meta = base.reflect_db_metadata()

    # Fill in migration table with data
    db.execute(
        meta.tables[extensions_migration_buffer_table_name].insert(),
        [{
            'extension_name': 'volume_manager',
            'data': jsonutils.dumps({
                'node_id': 1,
                'volumes': [{
                    'volume': 1
                }]
            })
        }, {
            'extension_name': 'volume_manager',
            'data': jsonutils.dumps({
                'node_id': 2,
                'volumes': [{
                    'volume': 2
                }]
            })
        }, {
            'extension_name': 'some_different_extension',
            'data': 'some_data'
        }])

    db.commit()
Пример #2
0
def load_db_driver(handler):
    """Wrap all handlers calls in a special construction, that's call
    rollback if something wrong or commit changes otherwise. Please note,
    only HTTPError should be rised up from this function. All another
    possible errors should be handle.
    """
    try:
        # execute handler and commit changes if all is ok
        response = handler()
        db.commit()
        return response

    except web.HTTPError:
        # a special case: commit changes if http error ends with
        # 200, 201, 202, etc
        if web.ctx.status.startswith('2'):
            db.commit()
        else:
            db.rollback()
        raise

    except (sa_exc.IntegrityError, sa_exc.DataError) as exc:
        # respond a "400 Bad Request" if database constraints were broken
        db.rollback()
        raise BaseHandler.http(400, exc.message)

    except Exception:
        db.rollback()
        raise

    finally:
        db.remove()
Пример #3
0
    def __call__(self, env, start_response):
        if env['REQUEST_METHOD'] in self.methods_to_analyze:
            url_matcher = self._get_url_matcher(url=env['PATH_INFO'])
            if url_matcher:
                request_body = utils.get_body_from_env(env)

                def save_headers_start_response(status, headers, *args):
                    """Hook for saving response headers for further
                    processing
                    """
                    self.status = status
                    return start_response(status, headers, *args)

                # Prepare arguments for ActionLog instance creation
                create_kwargs = {}

                actor_id = self._get_actor_id(env)
                create_kwargs['actor_id'] = actor_id

                # save actor_id in env for further processing
                env['fuel.action.actor_id'] = actor_id

                create_kwargs['start_timestamp'] = datetime.datetime.now()
                response = self.app(env, save_headers_start_response)
                create_kwargs['end_timestamp'] = datetime.datetime.now()

                # since responce is iterator to avoid its exhaustion in
                # analysing process we make two copies of it: one to be
                # processed in stats collection logic and the other to
                # propagate further on middleware stack
                response_to_analyse, response_to_propagate = \
                    itertools.tee(response)

                create_kwargs['action_name'] = \
                    compiled_urls_actions_mapping[url_matcher]['action_name']
                create_kwargs['action_group'] = \
                    compiled_urls_actions_mapping[url_matcher]['action_group']

                create_kwargs['action_type'] = \
                    consts.ACTION_TYPES.http_request

                create_kwargs['additional_info'] = \
                    self._get_additional_info(env,
                                              request_body,
                                              response_to_analyse)

                # get cluster_id from url
                cluster_id = utils.get_group_from_matcher(
                    url_matcher, env['PATH_INFO'], 'cluster_id')
                if cluster_id:
                    cluster_id = int(cluster_id)

                create_kwargs['cluster_id'] = cluster_id

                db.add(ActionLog(**create_kwargs))
                db.commit()

                return response_to_propagate

        return self.app(env, start_response)
def prepare():
    meta = base.reflect_db_metadata()

    result = db.execute(
        meta.tables['releases'].insert(),
        [{
            'name': 'test_name',
            'version': '2015.1-8.0',
            'operating_system': 'ubuntu',
            'state': 'available',
            'networks_metadata': jsonutils.dumps({
                'neutron': {
                    'networks': [],
                    'config': {}
                }
            })
        }]
    )
    releaseid = result.inserted_primary_key[0]

    db.execute(
        meta.tables['clusters'].insert(),
        [{
            'name': 'test_env',
            'release_id': releaseid,
            'mode': 'ha_compact',
            'status': 'new',
            'net_provider': 'neutron',
            'grouping': 'roles',
            'fuel_version': '8.0',
            'deployment_tasks': '[]',
            'replaced_deployment_info': '{}'
        }])

    db.commit()
Пример #5
0
 def consume_msg(self, body, msg):
     callback = getattr(self.receiver, body["method"])
     try:
         callback(**body["args"])
     except errors.CannotFindTask as e:
         logger.warn(str(e))
         msg.ack()
     except OperationalError as e:
         if (
             'TransactionRollbackError' in e.message or
             'deadlock' in e.message
         ):
             logger.exception("Deadlock on message: %s", msg)
             msg.requeue()
         else:
             logger.exception("Operational error on message: %s", msg)
             msg.ack()
     except Exception:
         logger.exception("Message consume failed: %s", msg)
         msg.ack()
     except KeyboardInterrupt:
         logger.error("Receiverd interrupted.")
         msg.requeue()
         raise
     else:
         db.commit()
         msg.ack()
     finally:
         db.remove()
Пример #6
0
 def consume_msg(self, body, msg):
     callback = getattr(self.receiver, body["method"])
     try:
         callback(**body["args"])
     except errors.CannotFindTask as e:
         logger.warn(str(e))
         msg.ack()
     except OperationalError as e:
         if ('TransactionRollbackError' in e.message
                 or 'deadlock' in e.message):
             logger.exception("Deadlock on message: %s", msg)
             msg.requeue()
         else:
             logger.exception("Operational error on message: %s", msg)
             msg.ack()
     except Exception:
         logger.exception("Message consume failed: %s", msg)
         msg.ack()
     except KeyboardInterrupt:
         logger.error("Receiverd interrupted.")
         msg.requeue()
         raise
     else:
         db.commit()
         msg.ack()
     finally:
         db.remove()
Пример #7
0
    def execute(self):
        stop_running = db().query(Task).filter_by(
            cluster=self.cluster, name='stop_deployment').first()
        if stop_running:
            if stop_running.status == 'running':
                raise errors.StopAlreadyRunning("Stopping deployment task "
                                                "is already launched")
            else:
                db().delete(stop_running)
                db().commit()

        deployment_task = db().query(Task).filter_by(cluster=self.cluster,
                                                     name='deployment',
                                                     status='running').first()
        provisioning_task = db().query(Task).filter_by(
            cluster=self.cluster, name='provision', status='running').first()
        if not deployment_task and not provisioning_task:
            raise errors.DeploymentNotRunning(
                u"Nothing to stop - deployment is "
                u"not running on environment '{0}'".format(self.cluster.id))

        task = Task(name="stop_deployment", cluster=self.cluster)
        db().add(task)
        db.commit()
        self._call_silently(task,
                            tasks.StopDeploymentTask,
                            deploy_task=deployment_task,
                            provision_task=provisioning_task)
        return task
Пример #8
0
    def execute(self):
        deploy_running = db().query(Task).filter_by(
            cluster=self.cluster,
            name=consts.TASK_NAMES.deploy,
            status='running').first()
        if deploy_running:
            raise errors.DeploymentAlreadyStarted(
                u"Can't reset environment '{0}' when "
                u"deployment is running".format(self.cluster.id))

        obsolete_tasks = db().query(Task).filter_by(
            cluster_id=self.cluster.id, ).filter(
                Task.name.in_([
                    consts.TASK_NAMES.deploy, consts.TASK_NAMES.deployment,
                    consts.TASK_NAMES.stop_deployment
                ]))
        for task in obsolete_tasks:
            db().delete(task)
        db().commit()

        task = Task(name=consts.TASK_NAMES.reset_environment,
                    cluster=self.cluster)
        db().add(task)
        db.commit()
        self._call_silently(task, tasks.ResetEnvironmentTask)
        return task
Пример #9
0
def prepare():
    meta = base.reflect_db_metadata()

    releaseid = insert_table_row(
        meta.tables["releases"],
        {"name": "test_name", "version": "2014.2.2-6.1", "operating_system": "ubuntu", "state": "available"},
    )

    clusterid = insert_table_row(
        meta.tables["clusters"],
        {
            "name": "test_env",
            "release_id": releaseid,
            "mode": "ha_compact",
            "status": "new",
            "net_provider": "neutron",
            "grouping": "roles",
            "fuel_version": "6.1",
        },
    )

    db.execute(
        meta.tables["nodegroups"].insert(),
        [
            {"cluster_id": clusterid, "name": "test_nodegroup_a"},
            {"cluster_id": clusterid, "name": "test_nodegroup_a"},
            {"cluster_id": clusterid, "name": "test_nodegroup_b"},
            {"cluster_id": clusterid, "name": "test_nodegroup_b"},
        ],
    )

    db.commit()
Пример #10
0
def prepare():
    meta = base.reflect_db_metadata()

    result = db.execute(
        meta.tables['releases'].insert(),
        [{
            'name': 'test_name',
            'version': '2015.1-8.0',
            'operating_system': 'ubuntu',
            'state': 'available',
            'networks_metadata': jsonutils.dumps({
                'neutron': {
                    'networks': [],
                    'config': {}
                }
            })
        }]
    )
    releaseid = result.inserted_primary_key[0]

    db.execute(
        meta.tables['clusters'].insert(),
        [{
            'name': 'test_env',
            'release_id': releaseid,
            'mode': 'ha_compact',
            'status': 'new',
            'net_provider': 'neutron',
            'grouping': 'roles',
            'fuel_version': '8.0',
            'deployment_tasks': '[]',
            'replaced_deployment_info': '{}'
        }])

    db.commit()
Пример #11
0
 def change_cluster_list(env_id, cluster_list):
     cluster_db = db.query(Cluster).get(env_id)
     cluster_db.vmware_attributes.editable = deepcopy(
         cluster_db.vmware_attributes.editable)
     cluster_db.vmware_attributes.editable['value']['availability_zones'][
         0]['nova_computes'][0]['vsphere_cluster'] = cluster_list
     db.commit()
Пример #12
0
def collect(resource_type):
    try:
        operational_clusters = ClusterCollection.filter_by(
            iterable=None, status=consts.CLUSTER_STATUSES.operational).all()
        error_clusters = ClusterCollection.filter_by(
            iterable=None, status=consts.CLUSTER_STATUSES.error).all()

        all_envs_last_recs = \
            OpenStackWorkloadStatsCollection.get_last_by_resource_type(
                resource_type)
        ready_or_error_ids = set([c.id for c in operational_clusters] +
                                 [c.id for c in error_clusters])
        envs_ids_to_clear = set(r.cluster_id for r in all_envs_last_recs) - \
            ready_or_error_ids
        # Clear current resource data for unavailable clusters.
        # Current OSWL data is cleared for those clusters which status is not
        # 'operational' nor 'error' or when cluster was removed. Data is
        # cleared for cluster only if it was updated recently (today or
        # yesterday). While this collector is running with interval much
        # smaller than one day it should not miss any unavailable cluster.
        for id in envs_ids_to_clear:
            oswl_statistics_save(id, resource_type, [])

        # Collect current OSWL data and update data in DB
        for cluster in operational_clusters:
            try:
                client_provider = helpers.ClientProvider(cluster)
                proxy_for_os_api = utils.get_proxy_for_cluster(cluster)
                version_info = utils.get_version_info(cluster)

                with utils.set_proxy(proxy_for_os_api):
                    data = helpers.get_info_from_os_resource_manager(
                        client_provider, resource_type)
                    oswl_statistics_save(cluster.id, resource_type, data,
                                         version_info=version_info)

            except errors.StatsException as e:
                logger.error("Cannot collect OSWL resource {0} for cluster "
                             "with id {1}. Details: {2}."
                             .format(resource_type,
                                     cluster.id,
                                     six.text_type(e))
                             )
            except Exception as e:
                logger.exception("Error while collecting OSWL resource {0} "
                                 "for cluster with id {1}. Details: {2}."
                                 .format(resource_type,
                                         cluster.id,
                                         six.text_type(e))
                                 )

        db.commit()

    except Exception as e:
        logger.exception("Exception while collecting OS workloads "
                         "for resource name {0}. Details: {1}"
                         .format(resource_type, six.text_type(e)))
    finally:
        db.remove()
Пример #13
0
 def _insert_deployment_graph(self):
     result = db.execute(
         self.meta.tables['deployment_graphs'].insert(),
         [{'name': 'test_graph'}]
     )
     db.commit()
     deployment_graph_id = result.inserted_primary_key[0]
     return deployment_graph_id
Пример #14
0
 def _insert_deployment_graph(self):
     result = db.execute(self.meta.tables['deployment_graphs'].insert(),
                         [{
                             'name': 'test_graph'
                         }])
     db.commit()
     deployment_graph_id = result.inserted_primary_key[0]
     return deployment_graph_id
def setup_module():
    dropdb()
    alembic.command.upgrade(ALEMBIC_CONFIG, _prepare_revision)

    prepare()
    db.commit()

    alembic.command.downgrade(ALEMBIC_CONFIG, _test_revision)
def setup_module():
    dropdb()
    alembic.command.upgrade(ALEMBIC_CONFIG, _prepare_revision)

    prepare()
    db.commit()

    alembic.command.downgrade(ALEMBIC_CONFIG, _test_revision)
def prepare():
    meta = base.reflect_db_metadata()
    db.execute(
        meta.tables['releases'].insert(),
        [{
            'name': 'test_name',
            'version': '2015.1-8.0',
            'operating_system': 'ubuntu',
            'state': 'available',
            'networks_metadata': jsonutils.dumps({
                'neutron': {
                    'networks': [],
                    'config': {}
                }
            }),
            'volumes_metadata': jsonutils.dumps({})
        }])

    db.execute(
        meta.tables['nodes'].insert(),
        [{
            'uuid': '26b508d0-0d76-4159-bce9-f67ec2765480',
            'cluster_id': None,
            'group_id': None,
            'status': 'discover',
            'meta': '{}',
            'mac': 'aa:aa:aa:aa:aa:aa',
            'timestamp': datetime.datetime.utcnow(),
        }]
    )

    db.execute(
        meta.tables['tasks'].insert(),
        [
            {
                'id': 55,
                'uuid': '219eaafe-01a1-4f26-8edc-b9d9b0df06b3',
                'name': 'deployment',
                'status': 'running',
                'deployment_info': jsonutils.dumps({})
            },
        ]
    )
    db.execute(
        meta.tables['deployment_history'].insert(),
        [
            {
                'uuid': 'fake_uuid_0',
                'deployment_graph_task_name': 'fake',
                'node_id': 'fake_node_id',
                'task_id': 55,
                'status': 'pending',
                'summary': jsonutils.dumps({'fake': 'fake'}),
            }
        ]
    )

    db.commit()
Пример #18
0
    def execute(self, force=False, **kwargs):
        try:
            self.clear_tasks_history(force=force)
        except errors.TaskAlreadyRunning:
            raise errors.DeploymentAlreadyStarted(
                "Can't reset environment '{0}' when "
                "running deployment task exists.".format(
                    self.cluster.id
                )
            )

        # FIXME(aroma): remove updating of 'deployed_before'
        # when stop action is reworked. 'deployed_before'
        # flag identifies whether stop action is allowed for the
        # cluster. Please, refer to [1] for more details.
        # [1]: https://bugs.launchpad.net/fuel/+bug/1529691
        objects.Cluster.set_deployed_before_flag(self.cluster, value=False)

        nodes = objects.Cluster.get_nodes_by_role(
            self.cluster, consts.VIRTUAL_NODE_TYPES.virt
        )
        for node in nodes:
            objects.Node.reset_vms_created_state(node)

        objects.ClusterPluginLinkCollection.delete_by_cluster_id(
            self.cluster.id)

        db().commit()

        supertask = Task(
            name=consts.TASK_NAMES.reset_environment,
            cluster=self.cluster
        )
        db().add(supertask)
        al = TaskHelper.create_action_log(supertask)

        reset_nodes = supertask.create_subtask(
            consts.TASK_NAMES.reset_nodes
        )

        remove_keys_task = supertask.create_subtask(
            consts.TASK_NAMES.remove_keys
        )

        remove_ironic_bootstrap_task = supertask.create_subtask(
            consts.TASK_NAMES.remove_ironic_bootstrap
        )

        db.commit()

        rpc.cast('naily', [
            tasks.ResetEnvironmentTask.message(reset_nodes),
            tasks.RemoveIronicBootstrap.message(remove_ironic_bootstrap_task),
            tasks.RemoveClusterKeys.message(remove_keys_task)
        ])
        TaskHelper.update_action_log(supertask, al)
        return supertask
Пример #19
0
 def test_deployment_graph_creation(self):
     result = db.execute(self.meta.tables['deployment_graphs'].insert(),
                         [{
                             'name': 'test_graph'
                         }])
     db.commit()
     graph_key = result.inserted_primary_key[0]
     result = db.execute(sa.select([self.meta.tables['deployment_graphs']]))
     self.assertIn((graph_key, u'test_graph'), list(result))
Пример #20
0
    def execute(self, force=False, **kwargs):
        try:
            self.clear_tasks_history(force=force)
        except errors.TaskAlreadyRunning:
            raise errors.DeploymentAlreadyStarted(
                "Can't reset environment '{0}' when "
                "running deployment task exists.".format(
                    self.cluster.id
                )
            )

        # FIXME(aroma): remove updating of 'deployed_before'
        # when stop action is reworked. 'deployed_before'
        # flag identifies whether stop action is allowed for the
        # cluster. Please, refer to [1] for more details.
        # [1]: https://bugs.launchpad.net/fuel/+bug/1529691
        objects.Cluster.set_deployed_before_flag(self.cluster, value=False)

        nodes = objects.Cluster.get_nodes_by_role(
            self.cluster, consts.VIRTUAL_NODE_TYPES.virt
        )
        for node in nodes:
            objects.Node.reset_vms_created_state(node)

        objects.ClusterPluginLinkCollection.delete_by_cluster_id(
            self.cluster.id)

        db().commit()

        supertask = Task(
            name=consts.TASK_NAMES.reset_environment,
            cluster=self.cluster
        )
        db().add(supertask)
        al = TaskHelper.create_action_log(supertask)

        reset_nodes = supertask.create_subtask(
            consts.TASK_NAMES.reset_nodes
        )

        remove_keys_task = supertask.create_subtask(
            consts.TASK_NAMES.remove_keys
        )

        remove_ironic_bootstrap_task = supertask.create_subtask(
            consts.TASK_NAMES.remove_ironic_bootstrap
        )

        db.commit()

        rpc.cast('naily', [
            tasks.ResetEnvironmentTask.message(reset_nodes),
            tasks.RemoveIronicBootstrap.message(remove_ironic_bootstrap_task),
            tasks.RemoveClusterKeys.message(remove_keys_task)
        ])
        TaskHelper.update_action_log(supertask, al)
        return supertask
Пример #21
0
    def execute(self, **kwargs):

        # FIXME(aroma): remove updating of 'deployed_before'
        # when stop action is reworked. 'deployed_before'
        # flag identifies whether stop action is allowed for the
        # cluster. Please, refer to [1] for more details.
        # [1]: https://bugs.launchpad.net/fuel/+bug/1529691
        objects.Cluster.set_deployed_before_flag(self.cluster, value=False)

        deploy_running = db().query(Task).filter_by(
            cluster=self.cluster,
            name=consts.TASK_NAMES.deploy,
            status='running').first()
        if deploy_running:
            raise errors.DeploymentAlreadyStarted(
                u"Can't reset environment '{0}' when "
                u"deployment is running".format(self.cluster.id))

        obsolete_tasks = db().query(Task).filter_by(
            cluster_id=self.cluster.id, ).filter(
                Task.name.in_([
                    consts.TASK_NAMES.deploy, consts.TASK_NAMES.deployment,
                    consts.TASK_NAMES.stop_deployment
                ]))

        for task in obsolete_tasks:
            db().delete(task)

        nodes = objects.Cluster.get_nodes_by_role(
            self.cluster, consts.VIRTUAL_NODE_TYPES.virt)
        for node in nodes:
            objects.Node.reset_vms_created_state(node)

        db().commit()

        supertask = Task(name=consts.TASK_NAMES.reset_environment,
                         cluster=self.cluster)
        db().add(supertask)
        al = TaskHelper.create_action_log(supertask)

        remove_keys_task = supertask.create_subtask(
            consts.TASK_NAMES.reset_environment)

        remove_ironic_bootstrap_task = supertask.create_subtask(
            consts.TASK_NAMES.reset_environment)

        db.commit()

        rpc.cast('naily', [
            tasks.ResetEnvironmentTask.message(supertask),
            tasks.RemoveIronicBootstrap.message(remove_ironic_bootstrap_task),
            tasks.RemoveClusterKeys.message(remove_keys_task)
        ])
        TaskHelper.update_action_log(supertask, al)
        return supertask
Пример #22
0
    def execute(self):
        deploy_running = db().query(Task).filter_by(
            cluster=self.cluster,
            name=consts.TASK_NAMES.deploy,
            status='running'
        ).first()
        if deploy_running:
            raise errors.DeploymentAlreadyStarted(
                u"Can't reset environment '{0}' when "
                u"deployment is running".format(
                    self.cluster.id
                )
            )

        obsolete_tasks = db().query(Task).filter_by(
            cluster_id=self.cluster.id,
        ).filter(
            Task.name.in_([
                consts.TASK_NAMES.deploy,
                consts.TASK_NAMES.deployment,
                consts.TASK_NAMES.stop_deployment
            ])
        )

        for task in obsolete_tasks:
            db().delete(task)

        nodes = objects.Cluster.get_nodes_by_role(
            self.cluster, consts.VIRTUAL_NODE_TYPES.virt)
        for node in nodes:
            objects.Node.reset_vms_created_state(node)

        db().commit()

        supertask = Task(
            name=consts.TASK_NAMES.reset_environment,
            cluster=self.cluster
        )
        db().add(supertask)
        al = TaskHelper.create_action_log(supertask)

        remove_keys_task = supertask.create_subtask(
            consts.TASK_NAMES.reset_environment
        )

        db.commit()

        rpc.cast('naily', [
            tasks.ResetEnvironmentTask.message(supertask),
            tasks.RemoveClusterKeys.message(remove_keys_task)
        ])
        TaskHelper.update_action_log(supertask, al)
        return supertask
Пример #23
0
    def POST(self, cluster_id):
        cluster = self.get_object_or_404(objects.Cluster, cluster_id)
        data = self.checked_data()
        node_id = data["node_id"]
        node = self.get_object_or_404(objects.Node, node_id)

        netgroups_mapping = self.get_netgroups_map(node.cluster, cluster)

        orig_roles = node.roles

        objects.Node.update_roles(node, [])  # flush
        objects.Node.update_pending_roles(node, [])  # flush

        node.replaced_deployment_info = []
        node.deployment_info = []
        node.kernel_params = None
        node.cluster_id = cluster.id
        node.group_id = None

        objects.Node.assign_group(node)  # flush
        objects.Node.update_pending_roles(node, orig_roles)  # flush

        for ip in node.ip_addrs:
            ip.network = netgroups_mapping[ip.network]

        nic_assignments = db.query(models.NetworkNICAssignment).\
            join(models.NodeNICInterface).\
            filter(models.NodeNICInterface.node_id == node.id).\
            all()
        for nic_assignment in nic_assignments:
            nic_assignment.network_id = \
                netgroups_mapping[nic_assignment.network_id]

        bond_assignments = db.query(models.NetworkBondAssignment).\
            join(models.NodeBondInterface).\
            filter(models.NodeBondInterface.node_id == node.id).\
            all()
        for bond_assignment in bond_assignments:
            bond_assignment.network_id = \
                netgroups_mapping[bond_assignment.network_id]

        objects.Node.add_pending_change(node,
                                        consts.CLUSTER_CHANGES.interfaces)

        node.pending_addition = True
        node.pending_deletion = False

        task = models.Task(name=consts.TASK_NAMES.node_deletion,
                           cluster=cluster)

        db.commit()

        self.delete_node_by_astute(task, node)
Пример #24
0
def test_db_driver(handler):
    try:
        return handler()
    except web.HTTPError:
        if str(web.ctx.status).startswith(("4", "5")):
            db.rollback()
        raise
    except Exception:
        db.rollback()
        raise
    finally:
        db.commit()
Пример #25
0
def test_db_driver(handler):
    try:
        return handler()
    except web.HTTPError:
        if str(web.ctx.status).startswith(("4", "5")):
            db.rollback()
        raise
    except Exception:
        db.rollback()
        raise
    finally:
        db.commit()
Пример #26
0
 def test_deployment_graph_creation(self):
     result = db.execute(
         self.meta.tables['deployment_graphs'].insert(),
         [{'name': 'test_graph'}]
     )
     db.commit()
     graph_key = result.inserted_primary_key[0]
     result = db.execute(
         sa.select([
             self.meta.tables['deployment_graphs']
         ]))
     self.assertIn((graph_key, u'test_graph'), list(result))
Пример #27
0
    def POST(self, cluster_id):
        cluster = self.get_object_or_404(objects.Cluster, cluster_id)
        data = self.checked_data()
        node_id = data["node_id"]
        node = self.get_object_or_404(objects.Node, node_id)

        netgroups_mapping = self.get_netgroups_map(node.cluster, cluster)

        orig_roles = node.roles

        objects.Node.update_roles(node, [])  # flush
        objects.Node.update_pending_roles(node, [])  # flush

        node.replaced_deployment_info = []
        node.deployment_info = []
        node.kernel_params = None
        node.cluster_id = cluster.id
        node.group_id = None

        objects.Node.assign_group(node)  # flush
        objects.Node.update_pending_roles(node, orig_roles)  # flush

        for ip in node.ip_addrs:
            ip.network = netgroups_mapping[ip.network]

        nic_assignments = db.query(models.NetworkNICAssignment).\
            join(models.NodeNICInterface).\
            filter(models.NodeNICInterface.node_id == node.id).\
            all()
        for nic_assignment in nic_assignments:
            nic_assignment.network_id = \
                netgroups_mapping[nic_assignment.network_id]

        bond_assignments = db.query(models.NetworkBondAssignment).\
            join(models.NodeBondInterface).\
            filter(models.NodeBondInterface.node_id == node.id).\
            all()
        for bond_assignment in bond_assignments:
            bond_assignment.network_id = \
                netgroups_mapping[bond_assignment.network_id]

        objects.Node.add_pending_change(node,
                                        consts.CLUSTER_CHANGES.interfaces)

        node.pending_addition = True
        node.pending_deletion = False

        task = models.Task(name=consts.TASK_NAMES.node_deletion,
                           cluster=cluster)

        db.commit()

        self.delete_node_by_astute(task, node)
Пример #28
0
    def execute(self):
        stop_running = db().query(Task).filter_by(
            cluster=self.cluster,
            name='stop_deployment'
        ).first()
        if stop_running:
            if stop_running.status == 'running':
                raise errors.StopAlreadyRunning(
                    "Stopping deployment task "
                    "is already launched"
                )
            else:
                db().delete(stop_running)
                db().commit()

        deploy_running = db().query(Task).filter_by(
            cluster=self.cluster,
            name='deployment',
            status='running'
        ).first()
        if not deploy_running:
            provisioning_running = db().query(Task).filter_by(
                cluster=self.cluster,
                name='provision',
                status='running'
            ).first()
            if provisioning_running:
                raise errors.DeploymentNotRunning(
                    u"Provisioning interruption for environment "
                    u"'{0}' is not implemented right now".format(
                        self.cluster.id
                    )
                )
            raise errors.DeploymentNotRunning(
                u"Nothing to stop - deployment is "
                u"not running on environment '{0}'".format(
                    self.cluster.id
                )
            )

        task = Task(
            name="stop_deployment",
            cluster=self.cluster
        )
        db().add(task)
        db.commit()
        self._call_silently(
            task,
            tasks.StopDeploymentTask,
            deploy_task=deploy_running
        )
        return task
def prepare():
    meta = base.reflect_db_metadata()

    # Fill in migration table with data
    db.execute(
        meta.tables[extensions_migration_buffer_table_name].insert(),
        [{'extension_name': 'volume_manager',
          'data': jsonutils.dumps({'node_id': 1, 'volumes': [{'volume': 1}]})},
         {'extension_name': 'volume_manager',
          'data': jsonutils.dumps({'node_id': 2, 'volumes': [{'volume': 2}]})},
         {'extension_name': 'some_different_extension',
          'data': 'some_data'}])

    db.commit()
Пример #30
0
    def __call__(self, env, start_response):
        if env["REQUEST_METHOD"] in self.methods_to_analyze:
            url_matcher = self._get_url_matcher(url=env["PATH_INFO"])
            if url_matcher:
                request_body = utils.get_body_from_env(env)

                def save_headers_start_response(status, headers, *args):
                    """Hook for saving resp headers for further processing"""
                    self.status = status
                    return start_response(status, headers, *args)

                # Prepare arguments for ActionLog instance creation
                create_kwargs = {}

                actor_id = self._get_actor_id(env)
                create_kwargs["actor_id"] = actor_id

                # save actor_id in env for further processing
                env["fuel.action.actor_id"] = actor_id

                create_kwargs["start_timestamp"] = datetime.datetime.utcnow()
                response = self.app(env, save_headers_start_response)
                create_kwargs["end_timestamp"] = datetime.datetime.utcnow()

                # since responce is iterator to avoid its exhaustion in
                # analysing process we make two copies of it: one to be
                # processed in stats collection logic and the other to
                # propagate further on middleware stack
                response_to_analyse, response_to_propagate = itertools.tee(response)

                create_kwargs["action_name"] = compiled_urls_actions_mapping[url_matcher]["action_name"]
                create_kwargs["action_group"] = compiled_urls_actions_mapping[url_matcher]["action_group"]

                create_kwargs["action_type"] = consts.ACTION_TYPES.http_request

                create_kwargs["additional_info"] = self._get_additional_info(env, request_body, response_to_analyse)

                # get cluster_id from url
                cluster_id = utils.get_group_from_matcher(url_matcher, env["PATH_INFO"], "cluster_id")
                if cluster_id:
                    cluster_id = int(cluster_id)

                create_kwargs["cluster_id"] = cluster_id

                db.add(ActionLog(**create_kwargs))
                db.commit()

                return response_to_propagate

        return self.app(env, start_response)
def prepare():
    meta = base.reflect_db_metadata()
    db.execute(
        meta.tables['releases'].insert(),
        [{
            'name': 'test_name',
            'version': '2015.1-8.0',
            'operating_system': 'ubuntu',
            'state': 'available',
            'networks_metadata': jsonutils.dumps({
                'neutron': {
                    'networks': [],
                    'config': {}
                }
            }),
            'volumes_metadata': jsonutils.dumps({})
        }])
    db.commit()
Пример #32
0
    def remove_undeployed_nodes_from_db(cls, nodes_to_delete):
        """Removes undeployed nodes from the given list from the DB.

        :param List nodes_to_delete: List of nodes as returned by
            :meth:`DeletionTask.format_node_to_delete`
        :returns: Remaining (non-undeployed) nodes to delete.
        """

        node_names_dict = dict(
            (node['id'], node['slave_name']) for node in nodes_to_delete)

        objects.NodeCollection \
            .filter_by_list(None, 'id', six.iterkeys(node_names_dict)) \
            .filter(
                objects.Node.model.status == consts.NODE_STATUSES.discover
            ) \
            .delete(synchronize_session=False)
        db.commit()

        remaining_nodes_db = db().query(
            Node.id).filter(Node.id.in_(node_names_dict.keys()))

        remaining_nodes_ids = set([
            row[0] for row
            in remaining_nodes_db
        ])

        remaining_nodes = filter(
            lambda node: node['id'] in remaining_nodes_ids,
            nodes_to_delete
        )

        deleted_nodes_ids = set(node_names_dict).difference(
            remaining_nodes_ids)

        slave_names_joined = ', '.join([slave_name
                                        for id, slave_name
                                        in six.iteritems(node_names_dict)
                                        if id in deleted_nodes_ids])
        if len(slave_names_joined):
            logger.info("Nodes are not deployed yet, can't clean MBR: %s",
                        slave_names_joined)

        return remaining_nodes
Пример #33
0
    def remove_undeployed_nodes_from_db(cls, nodes_to_delete):
        """Removes undeployed nodes from the given list from the DB.

        :param List nodes_to_delete: List of nodes as returned by
            :meth:`DeletionTask.format_node_to_delete`
        :returns: Remaining (non-undeployed) nodes to delete.
        """

        node_names_dict = dict(
            (node['id'], node['slave_name']) for node in nodes_to_delete)

        objects.NodeCollection \
            .filter_by_list(None, 'id', six.iterkeys(node_names_dict)) \
            .filter(
                objects.Node.model.status == consts.NODE_STATUSES.discover
            ) \
            .delete(synchronize_session=False)
        db.commit()

        remaining_nodes_db = db().query(
            Node.id).filter(Node.id.in_(node_names_dict.keys()))

        remaining_nodes_ids = set([
            row[0] for row
            in remaining_nodes_db
        ])

        remaining_nodes = filter(
            lambda node: node['id'] in remaining_nodes_ids,
            nodes_to_delete
        )

        deleted_nodes_ids = set(node_names_dict).difference(
            remaining_nodes_ids)

        slave_names_joined = ', '.join([slave_name
                                        for id, slave_name
                                        in six.iteritems(node_names_dict)
                                        if id in deleted_nodes_ids])
        if len(slave_names_joined):
            logger.info("Nodes are not deployed yet, can't clean MBR: %s",
                        slave_names_joined)

        return remaining_nodes
Пример #34
0
    def execute(self):
        deploy_running = db().query(Task).filter_by(
            cluster=self.cluster,
            name=consts.TASK_NAMES.deploy,
            status='running'
        ).first()
        if deploy_running:
            raise errors.DeploymentAlreadyStarted(
                u"Can't reset environment '{0}' when "
                u"deployment is running".format(
                    self.cluster.id
                )
            )

        obsolete_tasks = db().query(Task).filter_by(
            cluster_id=self.cluster.id,
        ).filter(
            Task.name.in_([
                consts.TASK_NAMES.deploy,
                consts.TASK_NAMES.deployment,
                consts.TASK_NAMES.stop_deployment
            ])
        )
        for task in obsolete_tasks:
            db().delete(task)

        nodes = objects.Cluster.get_nodes_by_role(
            self.cluster, consts.VIRTUAL_NODE_TYPES.virt)
        for node in nodes:
            objects.Node.reset_vms_created_state(node)

        db().commit()

        task = Task(
            name=consts.TASK_NAMES.reset_environment,
            cluster=self.cluster
        )
        db().add(task)
        db.commit()
        self._call_silently(
            task,
            tasks.ResetEnvironmentTask
        )
        return task
Пример #35
0
 def consume_msg(self, body, msg):
     callback = getattr(self.receiver, body["method"])
     try:
         callback(**body["args"])
     except errors.CannotFindTask as e:
         logger.warn(str(e))
         msg.ack()
     except Exception:
         logger.error(traceback.format_exc())
         msg.ack()
     except KeyboardInterrupt:
         logger.error("Receiverd interrupted.")
         msg.requeue()
         raise
     else:
         db.commit()
         msg.ack()
     finally:
         db.remove()
Пример #36
0
    def execute(self):
        deploy_running = db().query(Task).filter_by(
            cluster=self.cluster,
            name=consts.TASK_NAMES.deploy,
            status='running').first()
        if deploy_running:
            raise errors.DeploymentAlreadyStarted(
                u"Can't reset environment '{0}' when "
                u"deployment is running".format(self.cluster.id))

        obsolete_tasks = db().query(Task).filter_by(
            cluster_id=self.cluster.id, ).filter(
                Task.name.in_([
                    consts.TASK_NAMES.deploy, consts.TASK_NAMES.deployment,
                    consts.TASK_NAMES.stop_deployment
                ]))

        for task in obsolete_tasks:
            db().delete(task)

        nodes = objects.Cluster.get_nodes_by_role(
            self.cluster, consts.VIRTUAL_NODE_TYPES.virt)
        for node in nodes:
            objects.Node.reset_vms_created_state(node)

        db().commit()

        supertask = Task(name=consts.TASK_NAMES.reset_environment,
                         cluster=self.cluster)
        db().add(supertask)
        al = TaskHelper.create_action_log(supertask)

        remove_keys_task = supertask.create_subtask(
            consts.TASK_NAMES.reset_environment)

        db.commit()

        rpc.cast('naily', [
            tasks.ResetEnvironmentTask.message(supertask),
            tasks.RemoveClusterKeys.message(remove_keys_task)
        ])
        TaskHelper.update_action_log(supertask, al)
        return supertask
Пример #37
0
    def remove_undeployed_nodes_from_db(cls, nodes_to_delete):
        """Removes undeployed nodes from the given list from the DB.

        :param List nodes_to_delete: List of nodes as returned by
            :meth:`DeletionTask.format_node_to_delete`
        :returns: Remaining (deployed) nodes to delete.
        """

        node_names_dict = dict(
            (node['id'], node['slave_name']) for node in nodes_to_delete)

        node_ids = [n['id'] for n in nodes_to_delete]
        discovery_ids = objects.NodeCollection.discovery_node_ids()

        objects.NodeCollection.delete_by_ids(
            set(discovery_ids) & set(node_ids))
        db.commit()

        remaining_nodes_db = db().query(
            Node.id).filter(Node.id.in_(node_names_dict.keys()))

        remaining_nodes_ids = set([
            row[0] for row
            in remaining_nodes_db
        ])

        remaining_nodes = filter(
            lambda node: node['id'] in remaining_nodes_ids,
            nodes_to_delete
        )

        deleted_nodes_ids = set(node_names_dict).difference(
            remaining_nodes_ids)

        slave_names_joined = ', '.join([slave_name
                                        for id, slave_name
                                        in six.iteritems(node_names_dict)
                                        if id in deleted_nodes_ids])
        if len(slave_names_joined):
            logger.info("Nodes are not deployed yet, can't clean MBR: %s",
                        slave_names_joined)

        return remaining_nodes
Пример #38
0
def collect(resource_type):
    try:
        operational_clusters = ClusterCollection.filter_by(
            iterable=None, status=consts.CLUSTER_STATUSES.operational).all()

        for cluster in operational_clusters:
            client_provider = utils.ClientProvider(cluster)
            proxy_for_os_api = utils.get_proxy_for_cluster(cluster)

            with utils.set_proxy(proxy_for_os_api):
                data = utils.get_info_from_os_resource_manager(
                    client_provider, resource_type)
                oswl_statistics_save(cluster.id, resource_type, data)
        db.commit()

    except Exception as e:
        logger.exception("Exception while collecting OS workloads "
                         "for resource name {0}. Details: {1}".format(
                             resource_type, six.text_type(e)))
    finally:
        db.remove()
Пример #39
0
    def execute(self):
        deploy_running = db().query(Task).filter_by(cluster=self.cluster, name="deploy", status="running").first()
        if deploy_running:
            raise errors.DeploymentAlreadyStarted(
                u"Can't reset environment '{0}' when " u"deployment is running".format(self.cluster.id)
            )

        obsolete_tasks = (
            db()
            .query(Task)
            .filter_by(cluster_id=self.cluster.id)
            .filter(Task.name.in_(["deploy", "deployment", "stop_deployment"]))
        )
        for task in obsolete_tasks:
            db().delete(task)
        db().commit()

        task = Task(name="reset_environment", cluster=self.cluster)
        db().add(task)
        db.commit()
        self._call_silently(task, tasks.ResetEnvironmentTask)
        return task
Пример #40
0
    def copy_vips(orig_cluster, new_cluster):
        orig_vips = {}
        for ng in orig_cluster.network_groups:
            vips = db.query(models.IPAddr).filter(
                models.IPAddr.network == ng.id,
                models.IPAddr.node.is_(None),
                models.IPAddr.vip_type.isnot(None),
            ).all()
            orig_vips[ng.name] = list(vips)

        new_vips = []
        for ng in new_cluster.network_groups:
            orig_ng_vips = orig_vips.get(ng.name)
            for vip in orig_ng_vips:
                ip_addr = models.IPAddr(
                    network=ng.id,
                    ip_addr=vip.ip_addr,
                    vip_type=vip.vip_type,
                )
                new_vips.append(ip_addr)
        db.add_all(new_vips)
        db.commit()
Пример #41
0
    def copy_vips(orig_cluster, new_cluster):
        orig_vips = {}
        for ng in orig_cluster.network_groups:
            vips = db.query(models.IPAddr).filter(
                models.IPAddr.network == ng.id,
                models.IPAddr.node.is_(None),
                models.IPAddr.vip_type.isnot(None),
            ).all()
            orig_vips[ng.name] = list(vips)

        new_vips = []
        for ng in new_cluster.network_groups:
            orig_ng_vips = orig_vips.get(ng.name)
            for vip in orig_ng_vips:
                ip_addr = models.IPAddr(
                    network=ng.id,
                    ip_addr=vip.ip_addr,
                    vip_type=vip.vip_type,
                )
                new_vips.append(ip_addr)
        db.add_all(new_vips)
        db.commit()
def prepare():
    meta = base.reflect_db_metadata()

    roles_metadata = jsonutils.dumps(
        {"mongo": {
            "name": "Mongo",
            "description": "Mongo role"
        }})

    result = db.execute(meta.tables['releases'].insert(), [{
        'name':
        'test_name',
        'version':
        '2014.2-6.0',
        'operating_system':
        'ubuntu',
        'state':
        'available',
        'roles_metadata':
        roles_metadata,
        'attributes_metadata':
        jsonutils.dumps({
            'editable': {
                'storage': {
                    'volumes_lvm': {},
                },
                'common': {},
            },
            'generated': {
                'cobbler': {
                    'profile': {
                        'generator_arg': 'ubuntu_1204_x86_64'
                    }
                }
            },
        }),
        'networks_metadata':
        jsonutils.dumps({
            'neutron': {
                'networks': [
                    {
                        'assign_vip': True,
                    },
                ]
            },
            'nova_network': {
                'networks': [
                    {
                        'assign_vip': False,
                    },
                ]
            },
        }),
        'is_deployable':
        True,
    }])
    releaseid = result.inserted_primary_key[0]

    result = db.execute(meta.tables['release_orchestrator_data'].insert(), [{
        'release_id':
        releaseid,
        'puppet_manifests_source':
        'rsync://0.0.0.0:/puppet/manifests',
        'puppet_modules_source':
        'rsync://0.0.0.0:/puppet/modules',
        'repo_metadata':
        jsonutils.dumps({
            'base': 'http://baseuri base-suite main',
            'test': 'http://testuri test-suite main',
        })
    }])

    result = db.execute(meta.tables['clusters'].insert(),
                        [{
                            'name': 'test_env',
                            'release_id': releaseid,
                            'mode': 'ha_compact',
                            'status': 'new',
                            'net_provider': 'neutron',
                            'grouping': 'roles',
                            'fuel_version': '6.0',
                        }])
    clusterid = result.inserted_primary_key[0]

    db.execute(
        meta.tables['attributes'].insert(),
        [{
            'cluster_id': clusterid,
            'editable': '{"common": {}}',
            'generated': '{"cobbler": {"profile": "ubuntu_1204_x86_64"}}',
        }])

    db.execute(meta.tables['ip_addrs'].insert(), [{
        'ip_addr': '192.168.0.2',
    }])

    db.execute(meta.tables['network_groups'].insert(),
               [{
                   'name': 'public',
                   'release': releaseid,
                   'meta': jsonutils.dumps({'assign_vip': True})
               }])

    db.commit()
Пример #43
0
def prepare():
    meta = base.reflect_db_metadata()
    db.execute(
        meta.tables['plugins'].insert(),
        [{
            'name': 'test_plugin',
            'title': 'Test plugin',
            'version': '1.0.0',
            'description': 'Test plugin for Fuel',
            'homepage': 'http://fuel_plugins.test_plugin.com',
            'package_version': '3.0.0',
            'groups': jsonutils.dumps(['tgroup']),
            'authors': jsonutils.dumps(['tauthor']),
            'licenses': jsonutils.dumps(['tlicense']),
            'releases': jsonutils.dumps([
                {'repository_path': 'repositories/ubuntu'}
            ]),
            'fuel_version': jsonutils.dumps(['6.1', '7.0']),
        }])

    result = db.execute(
        meta.tables['releases'].insert(),
        [{
            'name': 'test_name',
            'version': '2014.2-6.1',
            'operating_system': 'ubuntu',
            'state': 'available',
            'roles': jsonutils.dumps([
                'controller',
                'compute',
                'mongo',
            ]),
            'roles_metadata': jsonutils.dumps({
                'controller': {
                    'name': 'Controller',
                    'description': 'Controller role',
                    'has_primary': True,
                },
                'zabbix-server': {
                    'name': 'Zabbix Server',
                    'description': 'Zabbix Server role'
                },
                'cinder': {
                    'name': 'Cinder',
                    'description': 'Cinder role'
                },
                'mongo': {
                    'name': 'Telemetry - MongoDB',
                    'description': 'mongo is',
                    'has_primary': True,
                }
            }),
            'attributes_metadata': jsonutils.dumps({}),
            'networks_metadata': jsonutils.dumps({
                'bonding': {
                    'properties': {
                        'linux': {
                            'mode': [
                                {
                                    "values": ["balance-rr",
                                               "active-backup",
                                               "802.3ad"]
                                },
                                {
                                    "values": ["balance-xor",
                                               "broadcast",
                                               "balance-tlb",
                                               "balance-alb"],
                                    "condition": "'experimental' in "
                                                 "version:feature_groups"
                                }
                            ]
                        }
                    }
                },
            }),
            'is_deployable': True,
        }])
    releaseid = result.inserted_primary_key[0]

    result = db.execute(
        meta.tables['releases'].insert(),
        [{
            'name': 'test_name_2',
            'version': '2014.2-6.0',
            'operating_system': 'ubuntu',
            'state': 'available',
            'roles': jsonutils.dumps([
                'controller',
                'compute',
                'mongo',
            ]),
            'roles_metadata': jsonutils.dumps({}),
            'attributes_metadata': jsonutils.dumps({}),
            'networks_metadata': jsonutils.dumps({
                'bonding': {
                    'properties': {
                        'ovs': {
                            'mode': [
                                {
                                    "values": ["active-backup",
                                               "balance-slb",
                                               "lacp-balance-tcp"]
                                }
                            ]
                        }
                    }
                },
            }),
            'is_deployable': True
        }])

    db.execute(
        meta.tables['clusters'].insert(),
        [{
            'name': 'test_env',
            'release_id': releaseid,
            'mode': 'ha_compact',
            'status': 'new',
            'net_provider': 'neutron',
            'grouping': 'roles',
            'fuel_version': '6.1',
        }])

    result = db.execute(
        meta.tables['nodes'].insert(),
        [
            {
                'uuid': 'one',
                'cluster_id': None,
                'group_id': None,
                'status': 'ready',
                'meta': '{}',
                'mac': 'aa:aa:aa:aa:aa:aa',
                'pending_addition': True,
                'pending_deletion': False,
                'timestamp': datetime.datetime.utcnow(),
            }
        ])
    nodeid_a = result.inserted_primary_key[0]

    result = db.execute(
        meta.tables['nodes'].insert(),
        [
            {
                'uuid': 'two',
                'cluster_id': None,
                'group_id': None,
                'status': 'discover',
                'meta': '{}',
                'mac': 'bb:bb:bb:bb:bb:bb',
                'pending_addition': True,
                'pending_deletion': False,
                'timestamp': datetime.datetime.utcnow(),
            }
        ])
    nodeid_b = result.inserted_primary_key[0]

    result = db.execute(
        meta.tables['nodes'].insert(),
        [
            {
                'uuid': 'three',
                'cluster_id': None,
                'group_id': None,
                'status': 'discover',
                'meta': '{}',
                'mac': 'cc:cc:cc:cc:cc:cc',
                'pending_addition': True,
                'pending_deletion': False,
                'timestamp': datetime.datetime.utcnow(),
            }
        ])
    nodeid_c = result.inserted_primary_key[0]

    db.execute(
        meta.tables['node_attributes'].insert(),
        [
            {
                'node_id': nodeid_a,
                'volumes': jsonutils.dumps([{'volume': nodeid_a}])
            },
            {
                'node_id': nodeid_b,
                'volumes': jsonutils.dumps([{'volume': nodeid_b}])
            },
            {
                'node_id': nodeid_c,
                'volumes': jsonutils.dumps([{'volume': nodeid_c}])
            },
        ])

    result = db.execute(
        meta.tables['roles'].insert(),
        [
            {'release_id': releaseid, 'name': 'controller'},
        ])
    controllerroleid = result.inserted_primary_key[0]

    result = db.execute(
        meta.tables['roles'].insert(),
        [
            {'release_id': releaseid, 'name': 'mongo'},
        ])
    mongoroleid = result.inserted_primary_key[0]

    result = db.execute(
        meta.tables['node_roles'].insert(),
        [
            {'role': controllerroleid, 'node': nodeid_a, 'primary': False},
            {'role': controllerroleid, 'node': nodeid_b, 'primary': False},
            {'role': controllerroleid, 'node': nodeid_c, 'primary': True},
            {'role': mongoroleid, 'node': nodeid_a, 'primary': False},
        ])

    result = db.execute(
        meta.tables['pending_node_roles'].insert(),
        [
            {'role': mongoroleid, 'node': nodeid_b, 'primary': True},
            {'role': mongoroleid, 'node': nodeid_c, 'primary': False},
        ])

    db.execute(
        meta.tables['node_nic_interfaces'].insert(),
        [
            {
                'id': 1,
                'node_id': nodeid_a,
                'name': 'test_interface',
                'mac': '00:00:00:00:00:01',
                'max_speed': 200,
                'current_speed': 100,
                'ip_addr': '10.20.0.2',
                'netmask': '255.255.255.0',
                'state': 'test_state',
                'interface_properties': jsonutils.dumps(
                    {'test_property': 'test_value'}),
                'driver': 'test_driver',
                'bus_info': 'some_test_info'
            },
            {
                'id': 2,
                'node_id': nodeid_a,
                'name': 'test_interface_2',
                'mac': '00:00:00:00:00:02',
                'max_speed': 200,
                'current_speed': 100,
                'ip_addr': '10.30.0.2',
                'netmask': '255.255.255.0',
                'state': 'test_state',
                'interface_properties': jsonutils.dumps(
                    {'test_property': 'test_value'}),
                'driver': 'test_driver',
                'bus_info': 'some_test_info'
            },
            {
                'id': 3,
                'node_id': nodeid_a,
                'name': 'test_interface_3',
                'mac': '00:00:00:00:00:03',
                'max_speed': 200,
                'current_speed': 100,
                'ip_addr': '10.30.0.2',
                'netmask': '255.255.255.0',
                'state': 'test_state',
                'interface_properties': jsonutils.dumps(
                    {'test_property': 'test_value'}),
                'driver': 'test_driver',
                'bus_info': 'some_test_info'
            }])

    db.execute(
        meta.tables['node_bond_interfaces'].insert(),
        [{
            'node_id': nodeid_a,
            'name': 'test_bond_interface',
            'mode': 'active-backup',
            'bond_properties': jsonutils.dumps(
                {'test_property': 'test_value'})
        }])

    db.execute(
        meta.tables['network_groups'].insert(),
        [
            {
                'id': 1,
                'name': 'fuelweb_admin',
                'vlan_start': None,
                'cidr': '10.20.0.0/24',
                'gateway': '10.20.0.200',
            },
            {
                'id': 2,
                'name': 'public',
                'vlan_start': None,
                'cidr': '10.30.0.0/24',
                'gateway': '10.30.0.200'
            }
        ]
    )

    db.execute(
        meta.tables['net_nic_assignments'].insert(),
        [
            {
                'network_id': 1,
                'interface_id': 1
            },
            {
                'network_id': 2,
                'interface_id': 2
            },
            {
                'network_id': 2,
                'interface_id': 3
            }
        ]
    )

    db.commit()
Пример #44
0
def prepare():
    meta = base.reflect_db_metadata()

    releaseid = insert_table_row(
        meta.tables['releases'], {
            'name':
            'test_name',
            'version':
            '2014.2.2-6.1',
            'operating_system':
            'ubuntu',
            'state':
            'available',
            'networks_metadata':
            jsonutils.dumps({'neutron': {
                'networks': [],
                'config': {}
            }})
        })

    clusterid = insert_table_row(
        meta.tables['clusters'], {
            'name': 'test_env',
            'release_id': releaseid,
            'mode': 'ha_compact',
            'status': 'new',
            'net_provider': 'neutron',
            'grouping': 'roles',
            'fuel_version': '7.0',
        })

    db.execute(meta.tables['nodegroups'].insert(), [
        {
            'cluster_id': clusterid,
            'name': 'test_nodegroup_a'
        },
        {
            'cluster_id': clusterid,
            'name': 'test_nodegroup_a'
        },
        {
            'cluster_id': clusterid,
            'name': 'test_nodegroup_b'
        },
        {
            'cluster_id': clusterid,
            'name': 'test_nodegroup_b'
        },
    ])

    netconfigid = insert_table_row(
        meta.tables['networking_configs'], {
            'cluster_id': None,
            'dns_nameservers': ['8.8.8.8'],
            'floating_ranges': [],
            'configuration_template': None,
        })

    db.execute(meta.tables['neutron_config'].insert(),
               [{
                   'id': netconfigid,
                   'vlan_range': [],
                   'gre_id_range': [],
                   'base_mac': '00:00:00:00:00:00',
                   'internal_cidr': '10.10.10.00/24',
                   'internal_gateway': '10.10.10.01',
                   'segmentation_type': 'vlan',
                   'net_l23_provider': 'ovs'
               }])

    result = db.execute(meta.tables['plugins'].insert(), [{
        'name':
        'test_plugin_a',
        'title':
        'Test plugin A',
        'version':
        '1.0.0',
        'description':
        'Test plugin A for Fuel',
        'homepage':
        'http://fuel_plugins.test_plugin.com',
        'package_version':
        '3.0.0',
        'groups':
        jsonutils.dumps(['tgroup']),
        'authors':
        jsonutils.dumps(['tauthor']),
        'licenses':
        jsonutils.dumps(['tlicense']),
        'releases':
        jsonutils.dumps([{
            'repository_path': 'repositories/ubuntu'
        }]),
        'fuel_version':
        jsonutils.dumps(['6.1', '7.0']),
    }])
    pluginid_a = result.inserted_primary_key[0]

    result = db.execute(meta.tables['plugins'].insert(), [{
        'name':
        'test_plugin_b',
        'title':
        'Test plugin B',
        'version':
        '1.0.0',
        'description':
        'Test plugin B for Fuel',
        'homepage':
        'http://fuel_plugins.test_plugin.com',
        'package_version':
        '3.0.0',
        'groups':
        jsonutils.dumps(['tgroup']),
        'authors':
        jsonutils.dumps(['tauthor']),
        'licenses':
        jsonutils.dumps(['tlicense']),
        'releases':
        jsonutils.dumps([{
            'repository_path': 'repositories/ubuntu'
        }]),
        'fuel_version':
        jsonutils.dumps(['6.1', '7.0']),
    }])
    pluginid_b = result.inserted_primary_key[0]

    db.execute(meta.tables['cluster_plugins'].insert(),
               [{
                   'cluster_id': clusterid,
                   'plugin_id': pluginid_a
               }, {
                   'cluster_id': clusterid,
                   'plugin_id': pluginid_b
               }])

    db.execute(meta.tables['attributes'].insert(), [{
        'cluster_id':
        clusterid,
        'editable':
        jsonutils.dumps({
            'test_plugin_a': {
                'metadata': {
                    'plugin_id': pluginid_a,
                    'enabled': True,
                    'toggleable': True,
                    'weight': 70,
                },
                'attribute': {
                    'value': 'value',
                    'type': 'text',
                    'description': 'description',
                    'weight': 25,
                    'label': 'label'
                }
            },
            'test_plugin_b': {
                'metadata': {
                    'plugin_id': pluginid_b,
                    'enabled': False,
                    'toggleable': True,
                    'weight': 80,
                }
            }
        }),
        'generated':
        jsonutils.dumps({}),
    }])

    db.commit()
Пример #45
0
 def _update_release_state(cls, release_id, state):
     release = db().query(Release).get(release_id)
     release.state = state
     db.add(release)
     db.commit()
Пример #46
0
    def execute(self, **kwargs):

        # FIXME(aroma): remove updating of 'deployed_before'
        # when stop action is reworked. 'deployed_before'
        # flag identifies whether stop action is allowed for the
        # cluster. Please, refer to [1] for more details.
        # [1]: https://bugs.launchpad.net/fuel/+bug/1529691
        objects.Cluster.set_deployed_before_flag(self.cluster, value=False)

        deploy_running = db().query(Task).filter_by(
            cluster=self.cluster,
            name=consts.TASK_NAMES.deploy,
            status='running'
        ).first()
        if deploy_running:
            raise errors.DeploymentAlreadyStarted(
                u"Can't reset environment '{0}' when "
                u"deployment is running".format(
                    self.cluster.id
                )
            )

        obsolete_tasks = db().query(Task).filter_by(
            cluster_id=self.cluster.id,
        ).filter(
            Task.name.in_([
                consts.TASK_NAMES.deploy,
                consts.TASK_NAMES.deployment,
                consts.TASK_NAMES.stop_deployment
            ])
        )

        for task in obsolete_tasks:
            db().delete(task)

        nodes = objects.Cluster.get_nodes_by_role(
            self.cluster, consts.VIRTUAL_NODE_TYPES.virt)
        for node in nodes:
            objects.Node.reset_vms_created_state(node)

        db().commit()

        supertask = Task(
            name=consts.TASK_NAMES.reset_environment,
            cluster=self.cluster
        )
        db().add(supertask)
        al = TaskHelper.create_action_log(supertask)

        remove_keys_task = supertask.create_subtask(
            consts.TASK_NAMES.reset_environment
        )

        remove_ironic_bootstrap_task = supertask.create_subtask(
            consts.TASK_NAMES.reset_environment
        )

        db.commit()

        rpc.cast('naily', [
            tasks.ResetEnvironmentTask.message(supertask),
            tasks.RemoveIronicBootstrap.message(remove_ironic_bootstrap_task),
            tasks.RemoveClusterKeys.message(remove_keys_task)
        ])
        TaskHelper.update_action_log(supertask, al)
        return supertask
def prepare():
    meta = base.reflect_db_metadata()

    roles_metadata = jsonutils.dumps({
        "mongo": {
            "name": "Mongo",
            "description": "Mongo role"
        }
    })

    result = db.execute(
        meta.tables['releases'].insert(),
        [{
            'name': 'test_name',
            'version': '2014.2-6.0',
            'operating_system': 'ubuntu',
            'state': 'available',
            'roles_metadata': roles_metadata,
            'attributes_metadata': jsonutils.dumps({
                'editable': {
                    'storage': {
                        'volumes_lvm': {},
                    },
                    'common': {},
                },
                'generated': {
                    'cobbler': {'profile': {
                        'generator_arg': 'ubuntu_1204_x86_64'}}},
            }),
            'networks_metadata': jsonutils.dumps({
                'neutron': {
                    'networks': [
                        {
                            'assign_vip': True,
                        },
                    ]
                },
                'nova_network': {
                    'networks': [
                        {
                            'assign_vip': False,
                        },
                    ]
                },

            }),
            'is_deployable': True,
        }])
    releaseid = result.inserted_primary_key[0]

    result = db.execute(
        meta.tables['release_orchestrator_data'].insert(),
        [{
            'release_id': releaseid,
            'puppet_manifests_source': 'rsync://0.0.0.0:/puppet/manifests',
            'puppet_modules_source': 'rsync://0.0.0.0:/puppet/modules',
            'repo_metadata': jsonutils.dumps({
                'base': 'http://baseuri base-suite main',
                'test': 'http://testuri test-suite main',
            })
        }])

    result = db.execute(
        meta.tables['clusters'].insert(),
        [{
            'name': 'test_env',
            'release_id': releaseid,
            'mode': 'ha_compact',
            'status': 'new',
            'net_provider': 'neutron',
            'grouping': 'roles',
            'fuel_version': '6.0',
        }])
    clusterid = result.inserted_primary_key[0]

    db.execute(
        meta.tables['attributes'].insert(),
        [{
            'cluster_id': clusterid,
            'editable': '{"common": {}}',
            'generated': '{"cobbler": {"profile": "ubuntu_1204_x86_64"}}',
        }])

    db.execute(
        meta.tables['ip_addrs'].insert(),
        [{
            'ip_addr': '192.168.0.2',
        }])

    db.execute(
        meta.tables['network_groups'].insert(),
        [{
            'name': 'public',
            'release': releaseid,
            'meta': jsonutils.dumps({'assign_vip': True})
        }])

    db.commit()
Пример #48
0
def prepare():
    meta = base.reflect_db_metadata()
    attrs_with_sec_group = deepcopy(ATTRIBUTES_METADATA)
    attrs_with_sec_group.setdefault('editable', {}).setdefault(
        'common', {}).setdefault('security_groups', SECURITY_GROUPS)
    plugin = {
        'name': 'Test_P',
        'version': '3.0.0',
        'title': 'Test Plugin',
        'package_version': '5.0.0',
        'roles_metadata': jsonutils.dumps(PLUGIN_ROLE_META),
        'tags_metadata': jsonutils.dumps(PLUGIN_TAGS_META)
    }
    result = db.execute(meta.tables['plugins'].insert(), [plugin])

    for release_name, env_version, cluster_name, attrs in zip(
            ('release_1', 'release_2', 'release_3'),
            ('mitaka-9.0', 'liberty-8.0', 'mitaka-9.0'),
            ('cluster_1', 'cluster_2', 'cluster_3'),
            (ATTRIBUTES_METADATA, ATTRIBUTES_METADATA, attrs_with_sec_group)
    ):
        release = {
            'name': release_name,
            'version': env_version,
            'operating_system': 'ubuntu',
            'state': 'available',
            'deployment_tasks': '[]',
            'roles_metadata': jsonutils.dumps(ROLES_META),
            'tags_matadata': jsonutils.dumps(TAGS_META),
            'is_deployable': True,
            'networks_metadata': '{}',
            'attributes_metadata': jsonutils.dumps(attrs)
        }
        result = db.execute(meta.tables['releases'].insert(), [release])
        release_id = result.inserted_primary_key[0]

        result = db.execute(
            meta.tables['clusters'].insert(),
            [{
                'name': cluster_name,
                'release_id': release_id,
                'mode': 'ha_compact',
                'status': 'new',
                'net_provider': 'neutron',
                'grouping': 'roles',
                'fuel_version': '9.0',
                'deployment_tasks': '[]',
                'roles_metadata': jsonutils.dumps(ROLES_META),
                'tags_metadata': '{}',
            }])

        cluster_id = result.inserted_primary_key[0]
        editable = attrs.get('editable', {})
        db.execute(
            meta.tables['attributes'].insert(),
            [{
                'cluster_id': cluster_id,
                'editable': jsonutils.dumps(editable)
            }]
        )

    db.execute(
        meta.tables['nodes'].insert(),
        [{
            'uuid': 'fcd49872-3917-4a18-98f9-3f5acfe3fdec',
            'cluster_id': cluster_id,
            'group_id': None,
            'status': 'ready',
            'roles': ['role_x', 'role_y'],
            'primary_tags': ['role_y', 'test'],
            'meta': '{}',
            'mac': 'bb:aa:aa:aa:aa:aa',
            'timestamp': datetime.datetime.utcnow(),
        }]
    )

    new_node = db.execute(
        meta.tables['nodes'].insert(),
        [{
            'uuid': '26b508d0-0d76-4159-bce9-f67ec2765481',
            'cluster_id': None,
            'group_id': None,
            'status': 'discover',
            'mac': 'aa:aa:aa:aa:aa:aa',
            'timestamp': datetime.datetime.utcnow()
        }]
    )
    node_id = new_node.inserted_primary_key[0]

    bond_interface = db.execute(
        meta.tables['node_bond_interfaces'].insert(),
        [{
            'node_id': node_id,
            'name': 'test_bond_interface',
            'mode': 'balance-tlb',
            'attributes': jsonutils.dumps({
                'lacp_rate': {'value': {'value': ''}},
                'xmit_hash_policy': {'value': {'value': 'layer2'}},
                'offloading': {
                    'disable': {'value': True},
                    'modes': {'value': {'tx-checksumming': None,
                                        'tx-checksum-sctp': None}}
                },
                'mtu': {'value': {'value': 50}},
                'lacp': {'value': {'value': ''}},
                'mode': {'value': {'value': 'balance-tlb'}},
                'type__': {'value': 'linux'},
                'dpdk': {'enabled': {'value': False}}
            })
        }]
    )
    bond_id = bond_interface.inserted_primary_key[0]

    db.execute(
        meta.tables['node_nic_interfaces'].insert(),
        [{
            'node_id': node_id,
            'name': 'test_nic_empty_attributes',
            'mac': '00:00:00:00:00:01',
            'attributes': jsonutils.dumps({}),
            'meta': jsonutils.dumps({})
        }]
    )
    db.execute(
        meta.tables['node_nic_interfaces'].insert(),
        [{
            'node_id': node_id,
            'name': 'test_nic_attributes',
            'parent_id': bond_id,
            'mac': '00:00:00:00:00:01',
            'attributes': jsonutils.dumps({
                'offloading': {
                    'disable': {'value': 'test_disable_offloading'},
                    'modes': {
                        'value': {
                            'tx-checksum-ipv4': 'IPV4_STATE',
                            'tx-checksumming': 'TX_STATE',
                            'rx-checksumming': 'RX_STATE',
                            'tx-checksum-ipv6': 'IPV6_STATE'
                        }
                    }
                },
                'mtu': {
                    'value': {'value': 'test_mtu'}
                },
                'sriov': {
                    'numvfs': {'value': 'test_sriov_numfs'},
                    'enabled': {'value': 'test_sriov_enabled'},
                    'physnet': {'value': 'test_sriov_physnet'}
                },
                'dpdk': {
                    'enabled': {'value': 'test_dpdk_enabled'}
                }
            }),
            'meta': jsonutils.dumps({
                'offloading_modes': [{
                    'state': None,
                    'name': 'tx-checksumming',
                    'sub': [
                        {'state': False, 'name': 'tx-checksum-sctp',
                         'sub': []},
                        {'state': None, 'name': 'tx-checksum-ipv6',
                         'sub': []},
                        {'state': None, 'name': 'tx-checksum-ipv4',
                         'sub': []}
                    ]
                }, {
                    'state': None, 'name': 'rx-checksumming', 'sub': []
                }],
                'numa_node': 12345,
                'pci_id': 'test_pci_id',
                'sriov': {
                    'available': 'test_sriov_available',
                    'totalvfs': 6789,
                    'pci_id': 'test_sriov_pci_id'
                },
                'dpdk': {'available': True}
            })
        }]
    )

    db.commit()
nv = db().query(NodeVolumes).filter(NodeVolumes.node_id == node_id).first()

if not nv:
    raise Exception("No volumes info was found for node {0}".format(node_id))

volumes = nv.volumes

os_vg = next(disk for disk in volumes if 'id' in disk and disk['id'] == 'os')
volumes = [disk for disk in volumes if 'id' not in disk or disk['id'] != 'os']

for disk in volumes:
    disk_volumes = disk['volumes']
    disk['volumes'] = []
    for v in disk_volumes:
        if v['type'] == 'pv' and v['vg'] == 'os' and v['size'] > 0:
            for vv in os_vg['volumes']:
                partition = {'name': vv['name'],
                             'size': vv['size'],
                             'type': 'partition',
                             'mount': vv['mount'],
                             'file_system': vv['file_system']}
                disk['volumes'].append(partition)
        else:
            if v['type'] == 'lvm_meta_pool' or v['type'] == 'boot':
                v['size'] = 0
            disk['volumes'].append(v)

db().query(NodeVolumes).filter(NodeVolumes.node_id == node_id).update(
    {"volumes": volumes})
db.commit()
Пример #50
0
def prepare():
    meta = base.reflect_db_metadata()
    db.execute(meta.tables['plugins'].insert(), [{
        'name':
        'test_plugin',
        'title':
        'Test plugin',
        'version':
        '1.0.0',
        'description':
        'Test plugin for Fuel',
        'homepage':
        'http://fuel_plugins.test_plugin.com',
        'package_version':
        '3.0.0',
        'groups':
        jsonutils.dumps(['tgroup']),
        'authors':
        jsonutils.dumps(['tauthor']),
        'licenses':
        jsonutils.dumps(['tlicense']),
        'releases':
        jsonutils.dumps([{
            'repository_path': 'repositories/ubuntu'
        }]),
        'fuel_version':
        jsonutils.dumps(['6.1', '7.0']),
    }])

    result = db.execute(meta.tables['releases'].insert(), [{
        'name':
        'test_name',
        'version':
        '2014.2.2-6.1',
        'operating_system':
        'ubuntu',
        'state':
        'available',
        'roles':
        jsonutils.dumps([
            'controller',
            'compute',
            'mongo',
        ]),
        'roles_metadata':
        jsonutils.dumps({
            'controller': {
                'name': 'Controller',
                'description': 'Controller role',
                'has_primary': True,
            },
            'zabbix-server': {
                'name': 'Zabbix Server',
                'description': 'Zabbix Server role'
            },
            'cinder': {
                'name': 'Cinder',
                'description': 'Cinder role'
            },
            'mongo': {
                'name': 'Telemetry - MongoDB',
                'description': 'mongo is',
                'has_primary': True,
            }
        }),
        'attributes_metadata':
        jsonutils.dumps({}),
        'networks_metadata':
        jsonutils.dumps({
            'bonding': {
                'properties': {
                    'linux': {
                        'mode': [{
                            "values":
                            ["balance-rr", "active-backup", "802.3ad"]
                        }, {
                            "values": [
                                "balance-xor", "broadcast", "balance-tlb",
                                "balance-alb"
                            ],
                            "condition":
                            "'experimental' in "
                            "version:feature_groups"
                        }]
                    }
                }
            },
        }),
        'is_deployable':
        True,
    }])
    releaseid = result.inserted_primary_key[0]

    result = db.execute(meta.tables['releases'].insert(), [{
        'name':
        'test_name_2',
        'version':
        '2014.2-6.0',
        'operating_system':
        'ubuntu',
        'state':
        'available',
        'roles':
        jsonutils.dumps([
            'controller',
            'compute',
            'mongo',
        ]),
        'roles_metadata':
        jsonutils.dumps({}),
        'attributes_metadata':
        jsonutils.dumps({}),
        'networks_metadata':
        jsonutils.dumps({
            'bonding': {
                'properties': {
                    'ovs': {
                        'mode': [{
                            "values": [
                                "active-backup", "balance-slb",
                                "lacp-balance-tcp"
                            ]
                        }]
                    }
                }
            },
        }),
        'is_deployable':
        True
    }])

    result = db.execute(meta.tables['clusters'].insert(),
                        [{
                            'name': 'test_env',
                            'release_id': releaseid,
                            'mode': 'ha_compact',
                            'status': 'new',
                            'net_provider': 'neutron',
                            'grouping': 'roles',
                            'fuel_version': '6.1',
                        }])
    clusterid = result.inserted_primary_key[0]

    result = db.execute(meta.tables['networking_configs'].insert(),
                        [{
                            'cluster_id': None,
                            'dns_nameservers': ['8.8.8.8'],
                            'floating_ranges': [],
                            'configuration_template': None,
                        }])
    db.execute(meta.tables['neutron_config'].insert(),
               [{
                   'id': result.inserted_primary_key[0],
                   'vlan_range': [],
                   'gre_id_range': [],
                   'base_mac': '00:00:00:00:00:00',
                   'internal_cidr': '10.10.10.00/24',
                   'internal_gateway': '10.10.10.01',
                   'segmentation_type': 'vlan',
                   'net_l23_provider': 'ovs'
               }])

    result = db.execute(meta.tables['nodes'].insert(),
                        [{
                            'uuid': 'one',
                            'cluster_id': clusterid,
                            'group_id': None,
                            'status': 'ready',
                            'meta': '{}',
                            'mac': 'aa:aa:aa:aa:aa:aa',
                            'pending_addition': True,
                            'pending_deletion': False,
                            'timestamp': datetime.datetime.utcnow(),
                        }])
    nodeid_a = result.inserted_primary_key[0]

    result = db.execute(meta.tables['nodes'].insert(),
                        [{
                            'uuid': 'two',
                            'cluster_id': clusterid,
                            'group_id': None,
                            'status': 'discover',
                            'meta': '{}',
                            'mac': 'bb:bb:bb:bb:bb:bb',
                            'pending_addition': True,
                            'pending_deletion': False,
                            'timestamp': datetime.datetime.utcnow(),
                        }])
    nodeid_b = result.inserted_primary_key[0]

    result = db.execute(meta.tables['nodes'].insert(),
                        [{
                            'uuid': 'three',
                            'cluster_id': None,
                            'group_id': None,
                            'status': 'discover',
                            'meta': '{}',
                            'mac': 'cc:cc:cc:cc:cc:cc',
                            'pending_addition': True,
                            'pending_deletion': False,
                            'timestamp': datetime.datetime.utcnow(),
                        }])
    nodeid_c = result.inserted_primary_key[0]

    db.execute(meta.tables['node_attributes'].insert(), [
        {
            'node_id': nodeid_a,
            'volumes': jsonutils.dumps([{
                'volume': nodeid_a
            }])
        },
        {
            'node_id': nodeid_b,
            'volumes': jsonutils.dumps([{
                'volume': nodeid_b
            }])
        },
        {
            'node_id': nodeid_c,
            'volumes': jsonutils.dumps([{
                'volume': nodeid_c
            }])
        },
    ])

    result = db.execute(meta.tables['roles'].insert(), [
        {
            'release_id': releaseid,
            'name': 'controller'
        },
    ])
    controllerroleid = result.inserted_primary_key[0]

    result = db.execute(meta.tables['roles'].insert(), [
        {
            'release_id': releaseid,
            'name': 'mongo'
        },
    ])
    mongoroleid = result.inserted_primary_key[0]

    result = db.execute(meta.tables['node_roles'].insert(), [
        {
            'role': controllerroleid,
            'node': nodeid_a,
            'primary': False
        },
        {
            'role': controllerroleid,
            'node': nodeid_b,
            'primary': False
        },
        {
            'role': controllerroleid,
            'node': nodeid_c,
            'primary': True
        },
        {
            'role': mongoroleid,
            'node': nodeid_a,
            'primary': False
        },
    ])

    result = db.execute(meta.tables['pending_node_roles'].insert(), [
        {
            'role': mongoroleid,
            'node': nodeid_b,
            'primary': True
        },
        {
            'role': mongoroleid,
            'node': nodeid_c,
            'primary': False
        },
    ])

    db.execute(meta.tables['node_nic_interfaces'].insert(), [{
        'id':
        1,
        'node_id':
        nodeid_a,
        'name':
        'test_interface',
        'mac':
        '00:00:00:00:00:01',
        'max_speed':
        200,
        'current_speed':
        100,
        'ip_addr':
        '10.20.0.2',
        'netmask':
        '255.255.255.0',
        'state':
        'test_state',
        'interface_properties':
        jsonutils.dumps({'test_property': 'test_value'}),
        'driver':
        'test_driver',
        'bus_info':
        'some_test_info'
    }, {
        'id':
        2,
        'node_id':
        nodeid_a,
        'name':
        'test_interface_2',
        'mac':
        '00:00:00:00:00:02',
        'max_speed':
        200,
        'current_speed':
        100,
        'ip_addr':
        '10.30.0.2',
        'netmask':
        '255.255.255.0',
        'state':
        'test_state',
        'interface_properties':
        jsonutils.dumps({'test_property': 'test_value'}),
        'driver':
        'test_driver',
        'bus_info':
        'some_test_info'
    }, {
        'id':
        3,
        'node_id':
        nodeid_a,
        'name':
        'test_interface_3',
        'mac':
        '00:00:00:00:00:03',
        'max_speed':
        200,
        'current_speed':
        100,
        'ip_addr':
        '10.30.0.2',
        'netmask':
        '255.255.255.0',
        'state':
        'test_state',
        'interface_properties':
        jsonutils.dumps({'test_property': 'test_value'}),
        'driver':
        'test_driver',
        'bus_info':
        'some_test_info'
    }])

    db.execute(
        meta.tables['node_bond_interfaces'].insert(),
        [{
            'node_id': nodeid_a,
            'name': 'test_bond_interface',
            'mode': 'active-backup',
            'bond_properties': jsonutils.dumps({'test_property': 'test_value'})
        }])

    db.execute(meta.tables['network_groups'].insert(),
               [{
                   'id': 1,
                   'name': 'fuelweb_admin',
                   'vlan_start': None,
                   'cidr': '10.20.0.0/24',
                   'gateway': '10.20.0.200',
               }, {
                   'id': 2,
                   'name': 'public',
                   'vlan_start': None,
                   'cidr': '10.30.0.0/24',
                   'gateway': '10.30.0.200'
               }])

    db.execute(meta.tables['net_nic_assignments'].insert(), [{
        'network_id': 1,
        'interface_id': 1
    }, {
        'network_id': 2,
        'interface_id': 2
    }, {
        'network_id': 2,
        'interface_id': 3
    }])

    db.commit()
Пример #51
0
if not nv:
    raise Exception("No volumes info was found for node {0}".format(node_id))

volumes = nv.volumes

os_vg = next(disk for disk in volumes if 'id' in disk and disk['id'] == 'os')
volumes = [disk for disk in volumes if 'id' not in disk or disk['id'] != 'os']

for disk in volumes:
    disk_volumes = disk['volumes']
    disk['volumes'] = []
    for v in disk_volumes:
        if v['type'] == 'pv' and v['vg'] == 'os' and v['size'] > 0:
            for vv in os_vg['volumes']:
                partition = {
                    'name': vv['name'],
                    'size': vv['size'],
                    'type': 'partition',
                    'mount': vv['mount'],
                    'file_system': vv['file_system']
                }
                disk['volumes'].append(partition)
        else:
            if v['type'] == 'lvm_meta_pool' or v['type'] == 'boot':
                v['size'] = 0
            disk['volumes'].append(v)

db().query(NodeVolumes).filter(NodeVolumes.node_id == node_id).update(
    {"volumes": volumes})
db.commit()
Пример #52
0
def prepare():
    meta = base.reflect_db_metadata()
    db.execute(meta.tables['plugins'].insert(), [{
        'name':
        'test_plugin',
        'title':
        'Test plugin',
        'version':
        '1.0.0',
        'description':
        'Test plugin for Fuel',
        'homepage':
        'http://fuel_plugins.test_plugin.com',
        'package_version':
        '3.0.0',
        'groups':
        jsonutils.dumps(['tgroup']),
        'authors':
        jsonutils.dumps(['tauthor']),
        'licenses':
        jsonutils.dumps(['tlicense']),
        'releases':
        jsonutils.dumps([{
            'repository_path': 'repositories/ubuntu'
        }]),
        'fuel_version':
        jsonutils.dumps(['6.1', '7.0']),
    }])

    result = db.execute(meta.tables['releases'].insert(), [{
        'name':
        'test_name',
        'version':
        '2014.2-6.0',
        'operating_system':
        'ubuntu',
        'state':
        'available',
        'roles':
        jsonutils.dumps([
            'controller',
            'compute',
            'mongo',
        ]),
        'roles_metadata':
        jsonutils.dumps({
            'controller': {
                'name': 'Controller',
                'description': 'Controller role',
                'has_primary': True,
            },
            'zabbix-server': {
                'name': 'Zabbix Server',
                'description': 'Zabbix Server role'
            },
            'cinder': {
                'name': 'Cinder',
                'description': 'Cinder role'
            },
            'mongo': {
                'name': 'Telemetry - MongoDB',
                'description': 'mongo is',
                'has_primary': True,
            }
        }),
        'attributes_metadata':
        jsonutils.dumps({}),
        'networks_metadata':
        jsonutils.dumps({}),
        'is_deployable':
        True,
    }])
    releaseid = result.inserted_primary_key[0]

    result = db.execute(meta.tables['nodes'].insert(),
                        [{
                            'uuid': 'one',
                            'cluster_id': None,
                            'group_id': None,
                            'status': 'ready',
                            'meta': '{}',
                            'mac': 'aa:aa:aa:aa:aa:aa',
                            'pending_addition': True,
                            'pending_deletion': False,
                            'timestamp': datetime.datetime.utcnow(),
                        }])
    nodeid_a = result.inserted_primary_key[0]

    result = db.execute(meta.tables['nodes'].insert(),
                        [{
                            'uuid': 'two',
                            'cluster_id': None,
                            'group_id': None,
                            'status': 'discover',
                            'meta': '{}',
                            'mac': 'bb:bb:bb:bb:bb:bb',
                            'pending_addition': True,
                            'pending_deletion': False,
                            'timestamp': datetime.datetime.utcnow(),
                        }])
    nodeid_b = result.inserted_primary_key[0]

    result = db.execute(meta.tables['nodes'].insert(),
                        [{
                            'uuid': 'three',
                            'cluster_id': None,
                            'group_id': None,
                            'status': 'discover',
                            'meta': '{}',
                            'mac': 'cc:cc:cc:cc:cc:cc',
                            'pending_addition': True,
                            'pending_deletion': False,
                            'timestamp': datetime.datetime.utcnow(),
                        }])
    nodeid_c = result.inserted_primary_key[0]

    db.execute(meta.tables['node_attributes'].insert(), [
        {
            'node_id': nodeid_a,
            'volumes': jsonutils.dumps([{
                'volume': nodeid_a
            }])
        },
        {
            'node_id': nodeid_b,
            'volumes': jsonutils.dumps([{
                'volume': nodeid_b
            }])
        },
        {
            'node_id': nodeid_c,
            'volumes': jsonutils.dumps([{
                'volume': nodeid_c
            }])
        },
    ])

    result = db.execute(meta.tables['roles'].insert(), [
        {
            'release_id': releaseid,
            'name': 'controller'
        },
    ])
    controllerroleid = result.inserted_primary_key[0]

    result = db.execute(meta.tables['roles'].insert(), [
        {
            'release_id': releaseid,
            'name': 'mongo'
        },
    ])
    mongoroleid = result.inserted_primary_key[0]

    result = db.execute(meta.tables['node_roles'].insert(), [
        {
            'role': controllerroleid,
            'node': nodeid_a,
            'primary': False
        },
        {
            'role': controllerroleid,
            'node': nodeid_b,
            'primary': False
        },
        {
            'role': controllerroleid,
            'node': nodeid_c,
            'primary': True
        },
        {
            'role': mongoroleid,
            'node': nodeid_a,
            'primary': False
        },
    ])

    result = db.execute(meta.tables['pending_node_roles'].insert(), [
        {
            'role': mongoroleid,
            'node': nodeid_b,
            'primary': True
        },
        {
            'role': mongoroleid,
            'node': nodeid_c,
            'primary': False
        },
    ])

    db.execute(
        meta.tables['node_bond_interfaces'].insert(),
        [{
            'node_id': nodeid_a,
            'name': 'test_bond_interface',
            'mode': 'active-backup',
            'bond_properties': jsonutils.dumps({'test_property': 'test_value'})
        }])

    db.execute(meta.tables['node_nic_interfaces'].insert(), [{
        'node_id':
        nodeid_a,
        'name':
        'test_interface',
        'mac':
        '00:00:00:00:00:01',
        'max_speed':
        200,
        'current_speed':
        100,
        'ip_addr':
        '10.20.0.2',
        'netmask':
        '255.255.255.0',
        'state':
        'test_state',
        'interface_properties':
        jsonutils.dumps({'test_property': 'test_value'}),
        'parent_id':
        1,
        'driver':
        'test_driver',
        'bus_info':
        'some_test_info'
    }])

    db.commit()
def prepare():
    meta = base.reflect_db_metadata()
    db.execute(
        meta.tables["plugins"].insert(),
        [
            {
                "name": "test_plugin",
                "title": "Test plugin",
                "version": "1.0.0",
                "description": "Test plugin for Fuel",
                "homepage": "http://fuel_plugins.test_plugin.com",
                "package_version": "3.0.0",
                "groups": jsonutils.dumps(["tgroup"]),
                "authors": jsonutils.dumps(["tauthor"]),
                "licenses": jsonutils.dumps(["tlicense"]),
                "releases": jsonutils.dumps([{"repository_path": "repositories/ubuntu"}]),
                "fuel_version": jsonutils.dumps(["6.1", "7.0"]),
            }
        ],
    )

    result = db.execute(
        meta.tables["releases"].insert(),
        [
            {
                "name": "test_name",
                "version": "2014.2.2-6.1",
                "operating_system": "ubuntu",
                "state": "available",
                "roles": jsonutils.dumps(["controller", "compute", "mongo"]),
                "roles_metadata": jsonutils.dumps(
                    {
                        "controller": {"name": "Controller", "description": "Controller role", "has_primary": True},
                        "zabbix-server": {"name": "Zabbix Server", "description": "Zabbix Server role"},
                        "cinder": {"name": "Cinder", "description": "Cinder role"},
                        "mongo": {"name": "Telemetry - MongoDB", "description": "mongo is", "has_primary": True},
                    }
                ),
                "attributes_metadata": jsonutils.dumps({}),
                "networks_metadata": jsonutils.dumps(
                    {
                        "bonding": {
                            "properties": {
                                "linux": {
                                    "mode": [
                                        {"values": ["balance-rr", "active-backup", "802.3ad"]},
                                        {
                                            "values": ["balance-xor", "broadcast", "balance-tlb", "balance-alb"],
                                            "condition": "'experimental' in " "version:feature_groups",
                                        },
                                    ]
                                }
                            }
                        }
                    }
                ),
                "is_deployable": True,
            }
        ],
    )
    releaseid = result.inserted_primary_key[0]

    result = db.execute(
        meta.tables["releases"].insert(),
        [
            {
                "name": "test_name_2",
                "version": "2014.2-6.0",
                "operating_system": "ubuntu",
                "state": "available",
                "roles": jsonutils.dumps(["controller", "compute", "mongo"]),
                "roles_metadata": jsonutils.dumps({}),
                "attributes_metadata": jsonutils.dumps({}),
                "networks_metadata": jsonutils.dumps(
                    {
                        "bonding": {
                            "properties": {
                                "ovs": {"mode": [{"values": ["active-backup", "balance-slb", "lacp-balance-tcp"]}]}
                            }
                        }
                    }
                ),
                "is_deployable": True,
            }
        ],
    )

    result = db.execute(
        meta.tables["clusters"].insert(),
        [
            {
                "name": "test_env",
                "release_id": releaseid,
                "mode": "ha_compact",
                "status": "new",
                "net_provider": "neutron",
                "grouping": "roles",
                "fuel_version": "6.1",
            }
        ],
    )
    clusterid = result.inserted_primary_key[0]

    result = db.execute(
        meta.tables["networking_configs"].insert(),
        [{"cluster_id": None, "dns_nameservers": ["8.8.8.8"], "floating_ranges": [], "configuration_template": None}],
    )
    db.execute(
        meta.tables["neutron_config"].insert(),
        [
            {
                "id": result.inserted_primary_key[0],
                "vlan_range": [],
                "gre_id_range": [],
                "base_mac": "00:00:00:00:00:00",
                "internal_cidr": "10.10.10.00/24",
                "internal_gateway": "10.10.10.01",
                "segmentation_type": "vlan",
                "net_l23_provider": "ovs",
            }
        ],
    )

    result = db.execute(
        meta.tables["nodes"].insert(),
        [
            {
                "uuid": "one",
                "cluster_id": clusterid,
                "group_id": None,
                "status": "ready",
                "meta": "{}",
                "mac": "aa:aa:aa:aa:aa:aa",
                "pending_addition": True,
                "pending_deletion": False,
                "timestamp": datetime.datetime.utcnow(),
            }
        ],
    )
    nodeid_a = result.inserted_primary_key[0]

    result = db.execute(
        meta.tables["nodes"].insert(),
        [
            {
                "uuid": "two",
                "cluster_id": clusterid,
                "group_id": None,
                "status": "discover",
                "meta": "{}",
                "mac": "bb:bb:bb:bb:bb:bb",
                "pending_addition": True,
                "pending_deletion": False,
                "timestamp": datetime.datetime.utcnow(),
            }
        ],
    )
    nodeid_b = result.inserted_primary_key[0]

    result = db.execute(
        meta.tables["nodes"].insert(),
        [
            {
                "uuid": "three",
                "cluster_id": None,
                "group_id": None,
                "status": "discover",
                "meta": "{}",
                "mac": "cc:cc:cc:cc:cc:cc",
                "pending_addition": True,
                "pending_deletion": False,
                "timestamp": datetime.datetime.utcnow(),
            }
        ],
    )
    nodeid_c = result.inserted_primary_key[0]

    db.execute(
        meta.tables["node_attributes"].insert(),
        [
            {"node_id": nodeid_a, "volumes": jsonutils.dumps([{"volume": nodeid_a}])},
            {"node_id": nodeid_b, "volumes": jsonutils.dumps([{"volume": nodeid_b}])},
            {"node_id": nodeid_c, "volumes": jsonutils.dumps([{"volume": nodeid_c}])},
        ],
    )

    result = db.execute(meta.tables["roles"].insert(), [{"release_id": releaseid, "name": "controller"}])
    controllerroleid = result.inserted_primary_key[0]

    result = db.execute(meta.tables["roles"].insert(), [{"release_id": releaseid, "name": "mongo"}])
    mongoroleid = result.inserted_primary_key[0]

    result = db.execute(
        meta.tables["node_roles"].insert(),
        [
            {"role": controllerroleid, "node": nodeid_a, "primary": False},
            {"role": controllerroleid, "node": nodeid_b, "primary": False},
            {"role": controllerroleid, "node": nodeid_c, "primary": True},
            {"role": mongoroleid, "node": nodeid_a, "primary": False},
        ],
    )

    result = db.execute(
        meta.tables["pending_node_roles"].insert(),
        [
            {"role": mongoroleid, "node": nodeid_b, "primary": True},
            {"role": mongoroleid, "node": nodeid_c, "primary": False},
        ],
    )

    db.execute(
        meta.tables["node_nic_interfaces"].insert(),
        [
            {
                "id": 1,
                "node_id": nodeid_a,
                "name": "test_interface",
                "mac": "00:00:00:00:00:01",
                "max_speed": 200,
                "current_speed": 100,
                "ip_addr": "10.20.0.2",
                "netmask": "255.255.255.0",
                "state": "test_state",
                "interface_properties": jsonutils.dumps({"test_property": "test_value"}),
                "driver": "test_driver",
                "bus_info": "some_test_info",
            },
            {
                "id": 2,
                "node_id": nodeid_a,
                "name": "test_interface_2",
                "mac": "00:00:00:00:00:02",
                "max_speed": 200,
                "current_speed": 100,
                "ip_addr": "10.30.0.2",
                "netmask": "255.255.255.0",
                "state": "test_state",
                "interface_properties": jsonutils.dumps({"test_property": "test_value"}),
                "driver": "test_driver",
                "bus_info": "some_test_info",
            },
            {
                "id": 3,
                "node_id": nodeid_a,
                "name": "test_interface_3",
                "mac": "00:00:00:00:00:03",
                "max_speed": 200,
                "current_speed": 100,
                "ip_addr": "10.30.0.2",
                "netmask": "255.255.255.0",
                "state": "test_state",
                "interface_properties": jsonutils.dumps({"test_property": "test_value"}),
                "driver": "test_driver",
                "bus_info": "some_test_info",
            },
        ],
    )

    db.execute(
        meta.tables["node_bond_interfaces"].insert(),
        [
            {
                "node_id": nodeid_a,
                "name": "test_bond_interface",
                "mode": "active-backup",
                "bond_properties": jsonutils.dumps({"test_property": "test_value"}),
            }
        ],
    )

    db.execute(
        meta.tables["network_groups"].insert(),
        [
            {"id": 1, "name": "fuelweb_admin", "vlan_start": None, "cidr": "10.20.0.0/24", "gateway": "10.20.0.200"},
            {"id": 2, "name": "public", "vlan_start": None, "cidr": "10.30.0.0/24", "gateway": "10.30.0.200"},
        ],
    )

    db.execute(
        meta.tables["net_nic_assignments"].insert(),
        [
            {"network_id": 1, "interface_id": 1},
            {"network_id": 2, "interface_id": 2},
            {"network_id": 2, "interface_id": 3},
        ],
    )

    db.commit()
Пример #54
0
def prepare():
    meta = base.reflect_db_metadata()
    result = db.execute(
        meta.tables['releases'].insert(),
        [{
            'name': 'test_name',
            'version': '2015.1-9.0',
            'operating_system': 'ubuntu',
            'state': 'available',
            'deployment_tasks': jsonutils.dumps(JSON_TASKS),
            'roles': jsonutils.dumps([
                'controller',
                'compute',
                'virt',
                'compute-vmware',
                'ironic',
                'cinder',
                'cinder-block-device',
                'cinder-vmware',
                'ceph-osd',
                'mongo',
                'base-os',
            ]),
            'roles_metadata': jsonutils.dumps({
                'controller': {
                    'name': 'Controller',
                },
                'compute': {
                    'name': 'Compute',
                },
                'virt': {
                    'name': 'Virtual',
                },
                'compute-vmware': {
                    'name': 'Compute VMware',
                },
                'ironic': {
                    'name': 'Ironic',
                },
                'cinder': {
                    'name': 'Cinder',
                },
                'cinder-block-device': {
                    'name': 'Cinder Block Device',
                },
                'cinder-vmware': {
                    'name': 'Cinder Proxy to VMware Datastore',
                },
                'ceph-osd': {
                    'name': 'Ceph OSD',
                },
                'mongo': {
                    'name': 'Telemetry - MongoDB',
                },
                'base-os': {
                    'name': 'Operating System',
                }
            }),
            'is_deployable': True,
            'networks_metadata': jsonutils.dumps({
                'neutron': {
                    'networks': [],
                    'config': {}
                },
                'dpdk_drivers': {
                    'igb_uio': ['qwe']
                },
            }),
            'volumes_metadata': jsonutils.dumps({}),
            'attributes_metadata': jsonutils.dumps(ATTRIBUTES_METADATA),
            'vmware_attributes_metadata':
                jsonutils.dumps(VMWARE_ATTRIBUTES_METADATA)
        }])

    release_id = result.inserted_primary_key[0]

    db.execute(
        meta.tables['releases'].insert(),
        [{
            'name': 'test_old',
            'version': '2015.1-8.0',
            'operating_system': 'ubuntu',
            'state': 'available',
            'deployment_tasks': jsonutils.dumps(JSON_TASKS),
            'roles': '[]',
            'roles_metadata': '{}',
            'is_deployable': True,
            'networks_metadata': '{}',
        }]
    )

    cluster_ids = []
    for cluster_name in ['test_env1', 'test_env2']:
        result = db.execute(
            meta.tables['clusters'].insert(),
            [{
                'name': cluster_name,
                'release_id': release_id,
                'mode': 'ha_compact',
                'status': 'new',
                'net_provider': 'neutron',
                'grouping': 'roles',
                'fuel_version': '9.0',
                'deployment_tasks': jsonutils.dumps(JSON_TASKS)
            }])
        cluster_ids.append(result.inserted_primary_key[0])

    result = db.execute(
        meta.tables['nodes'].insert(),
        [{
            'uuid': '26b508d0-0d76-4159-bce9-f67ec2765480',
            'cluster_id': None,
            'group_id': None,
            'status': 'discover',
            'meta': '{}',
            'mac': 'aa:aa:aa:aa:aa:aa',
            'timestamp': datetime.datetime.utcnow(),
        }]
    )
    node_id = result.inserted_primary_key[0]

    result = db.execute(
        meta.tables['plugins'].insert(),
        [{
            'name': 'test_plugin_a',
            'title': 'Test plugin A',
            'version': '2.0.0',
            'description': 'Test plugin A for Fuel',
            'homepage': 'http://fuel_plugins.test_plugin.com',
            'package_version': '5.0.0',
            'groups': jsonutils.dumps(['tgroup']),
            'authors': jsonutils.dumps(['tauthor']),
            'licenses': jsonutils.dumps(['tlicense']),
            'releases': jsonutils.dumps([
                {'repository_path': 'repositories/ubuntu'}
            ]),
            'deployment_tasks': jsonutils.dumps(JSON_TASKS),
            'fuel_version': jsonutils.dumps(['9.0']),
            'network_roles_metadata': jsonutils.dumps([{
                'id': 'admin/vip',
                'default_mapping': 'fuelweb_admin',
                'properties': {
                    'subnet': True,
                    'gateway': False,
                    'vip': [
                        {
                            'name': 'my-vip1',
                            'namespace': 'my-namespace1',
                        },
                        {
                            'name': 'my-vip2',
                            'namespace': 'my-namespace2',
                        }
                    ]
                }
            }])
        }]
    )
    plugin_a_id = result.inserted_primary_key[0]

    result = db.execute(
        meta.tables['plugins'].insert(),
        [{
            'name': 'test_plugin_b',
            'title': 'Test plugin B',
            'version': '2.0.0',
            'description': 'Test plugin B for Fuel',
            'homepage': 'http://fuel_plugins.test_plugin.com',
            'package_version': '5.0.0',
            'groups': jsonutils.dumps(['tgroup']),
            'authors': jsonutils.dumps(['tauthor']),
            'licenses': jsonutils.dumps(['tlicense']),
            'releases': jsonutils.dumps([
                {'repository_path': 'repositories/ubuntu'}
            ]),
            'fuel_version': jsonutils.dumps(['9.0']),
            'network_roles_metadata': jsonutils.dumps([{
                'id': 'admin/vip',
                'default_mapping': 'fuelweb_admin',
                'properties': {
                    'subnet': True,
                    'gateway': False,
                    'vip': [
                        {
                            'name': 'my-vip3',
                            'namespace': 'my-namespace3',
                        },
                        {
                            'name': 'my-vip4',
                            'namespace': 'my-namespace4',
                        }
                    ]
                }
            }])
        }]
    )
    plugin_b_id = result.inserted_primary_key[0]

    db.execute(
        meta.tables['cluster_plugins'].insert(),
        [
            {'cluster_id': cluster_ids[0], 'plugin_id': plugin_a_id},
            {'cluster_id': cluster_ids[0], 'plugin_id': plugin_b_id}
        ]
    )

    db.execute(
        meta.tables['node_nic_interfaces'].insert(),
        [{
            'id': 1,
            'node_id': node_id,
            'name': 'test_interface',
            'mac': '00:00:00:00:00:01',
            'max_speed': 200,
            'current_speed': 100,
            'ip_addr': '10.20.0.2',
            'netmask': '255.255.255.0',
            'state': 'test_state',
            'interface_properties': jsonutils.dumps(
                {'test_property': 'test_value'}),
            'driver': 'test_driver',
            'bus_info': 'some_test_info'
        }]
    )

    db.execute(
        meta.tables['node_bond_interfaces'].insert(),
        [{
            'node_id': node_id,
            'name': 'test_bond_interface',
            'mode': 'active-backup',
            'bond_properties': jsonutils.dumps(
                {'test_property': 'test_value'})
        }]
    )

    db.execute(
        meta.tables['tasks'].insert(),
        [
            {
                'id': 55,
                'uuid': '219eaafe-01a1-4f26-8edc-b9d9b0df06b3',
                'name': 'deployment',
                'status': 'running',
                'deployment_info': jsonutils.dumps({}),
            },
        ]
    )
    db.execute(
        meta.tables['deployment_history'].insert(),
        [
            {
                'uuid': 'fake_uuid_0',
                'deployment_graph_task_name': 'fake',
                'node_id': 'fake_node_id',
                'task_id': 55,
                'status': 'pending',
                'summary': jsonutils.dumps({'fake': 'fake'}),
            }
        ]
    )

    result = db.execute(
        meta.tables['tasks'].insert(),
        [
            {
                'id': 102,
                'uuid': '219eaafe-01a1-4f26-8edc-b9d9b0df06b3',
                'name': 'deployment',
                'status': 'running',
                'deployment_info': jsonutils.dumps(DEPLOYMENT_INFO[102])
            },
            {
                'id': 103,
                'uuid': 'a45fbbcd-792c-4245-a619-f4fb2f094d38',
                'name': 'deployment',
                'status': 'running',
                'deployment_info': jsonutils.dumps(DEPLOYMENT_INFO[103])
            }
        ]
    )

    db.commit()
Пример #55
0
 def _update_release_state(cls, release_id, state):
     release = db().query(Release).get(release_id)
     release.state = state
     db.add(release)
     db.commit()
Пример #56
0
def prepare():
    meta = base.reflect_db_metadata()

    result = db.execute(meta.tables['releases'].insert(), [{
        'name':
        'test_name',
        'version':
        '2015.1-10.0',
        'operating_system':
        'ubuntu',
        'state':
        'available',
        'deployment_tasks':
        jsonutils.dumps(JSON_TASKS),
        'roles':
        jsonutils.dumps([
            'controller',
            'compute',
            'virt',
            'compute-vmware',
            'ironic',
            'cinder',
            'cinder-block-device',
            'cinder-vmware',
            'ceph-osd',
            'mongo',
            'base-os',
        ]),
        'roles_metadata':
        jsonutils.dumps({
            'controller': {
                'name': 'Controller',
            },
            'compute': {
                'name': 'Compute',
            },
            'virt': {
                'name': 'Virtual',
            },
            'compute-vmware': {
                'name': 'Compute VMware',
            },
            'ironic': {
                'name': 'Ironic',
            },
            'cinder': {
                'name': 'Cinder',
            },
            'cinder-block-device': {
                'name': 'Cinder Block Device',
            },
            'cinder-vmware': {
                'name': 'Cinder Proxy to VMware Datastore',
            },
            'ceph-osd': {
                'name': 'Ceph OSD',
            },
            'mongo': {
                'name': 'Telemetry - MongoDB',
            },
            'base-os': {
                'name': 'Operating System',
            }
        }),
        'is_deployable':
        True
    }])

    release_id = result.inserted_primary_key[0]

    cluster_ids = []
    for cluster_name in ['test_env1', 'test_env2']:
        result = db.execute(meta.tables['clusters'].insert(),
                            [{
                                'name': cluster_name,
                                'release_id': release_id,
                                'mode': 'ha_compact',
                                'status': 'new',
                                'net_provider': 'neutron',
                                'grouping': 'roles',
                                'fuel_version': '10.0',
                                'deployment_tasks': jsonutils.dumps(JSON_TASKS)
                            }])
        cluster_ids.append(result.inserted_primary_key[0])

    node_id = 1
    result = db.execute(meta.tables['nodes'].insert(),
                        [{
                            'id': node_id,
                            'uuid': '26b508d0-0d76-4159-bce9-f67ec2765480',
                            'cluster_id': None,
                            'group_id': None,
                            'status': 'discover',
                            'meta': '{}',
                            'mac': 'aa:aa:aa:aa:aa:aa',
                            'timestamp': datetime.datetime.utcnow(),
                        }])

    result = db.execute(meta.tables['plugins'].insert(), [{
        'name':
        'test_plugin_a',
        'title':
        'Test plugin A',
        'version':
        '2.0.0',
        'description':
        'Test plugin A for Fuel',
        'homepage':
        'http://fuel_plugins.test_plugin.com',
        'package_version':
        '5.0.0',
        'groups':
        jsonutils.dumps(['tgroup']),
        'authors':
        jsonutils.dumps(['tauthor']),
        'licenses':
        jsonutils.dumps(['tlicense']),
        'releases':
        jsonutils.dumps([{
            'repository_path': 'repositories/ubuntu'
        }]),
        'deployment_tasks':
        jsonutils.dumps(JSON_TASKS),
        'fuel_version':
        jsonutils.dumps(['10.0']),
        'network_roles_metadata':
        jsonutils.dumps([{
            'id': 'admin/vip',
            'default_mapping': 'fuelweb_admin',
            'properties': {
                'subnet':
                True,
                'gateway':
                False,
                'vip': [{
                    'name': 'my-vip1',
                    'namespace': 'my-namespace1',
                }, {
                    'name': 'my-vip2',
                    'namespace': 'my-namespace2',
                }]
            }
        }])
    }])
    plugin_a_id = result.inserted_primary_key[0]

    result = db.execute(meta.tables['plugins'].insert(), [{
        'name':
        'test_plugin_b',
        'title':
        'Test plugin B',
        'version':
        '2.0.0',
        'description':
        'Test plugin B for Fuel',
        'homepage':
        'http://fuel_plugins.test_plugin.com',
        'package_version':
        '5.0.0',
        'groups':
        jsonutils.dumps(['tgroup']),
        'authors':
        jsonutils.dumps(['tauthor']),
        'licenses':
        jsonutils.dumps(['tlicense']),
        'releases':
        jsonutils.dumps([{
            'repository_path': 'repositories/ubuntu'
        }]),
        'fuel_version':
        jsonutils.dumps(['10.0']),
        'network_roles_metadata':
        jsonutils.dumps([{
            'id': 'admin/vip',
            'default_mapping': 'fuelweb_admin',
            'properties': {
                'subnet':
                True,
                'gateway':
                False,
                'vip': [{
                    'name': 'my-vip3',
                    'namespace': 'my-namespace3',
                }, {
                    'name': 'my-vip4',
                    'namespace': 'my-namespace4',
                }]
            }
        }])
    }])
    plugin_b_id = result.inserted_primary_key[0]

    db.execute(
        meta.tables['cluster_plugin_links'].insert(),
        [
            {
                'cluster_id': cluster_ids[0],
                'title': 'title',
                'url': 'http://www.zzz.com',
                'description': 'description',
                'hidden': False
            },
            # this is duplicate, should be deleted during migration
            {
                'cluster_id': cluster_ids[1],
                'title': 'title',
                'url': 'http://www.zzz.com',
                'description': 'description_duplicate',
                'hidden': False
            },
            # duplicate by URL but in another cluster, should
            # not be deleted
            {
                'cluster_id': cluster_ids[0],
                'title': 'title',
                'url': 'http://www.zzz.com',
                'description': 'description',
                'hidden': False
            }
        ])

    db.execute(meta.tables['cluster_plugins'].insert(),
               [{
                   'cluster_id': cluster_ids[0],
                   'plugin_id': plugin_a_id
               }, {
                   'cluster_id': cluster_ids[0],
                   'plugin_id': plugin_b_id
               }])

    db.execute(
        meta.tables['plugin_links'].insert(),
        [
            {
                'plugin_id': plugin_a_id,
                'title': 'title',
                'url': 'http://www.zzz.com',
                'description': 'description',
                'hidden': False
            },
            # this is duplicate, should be deleted during migration
            {
                'plugin_id': plugin_b_id,
                'title': 'title',
                'url': 'http://www.zzz.com',
                'description': 'description_duplicate',
                'hidden': False
            }
        ])

    db.execute(meta.tables['node_nic_interfaces'].insert(), [{
        'id':
        1,
        'node_id':
        node_id,
        'name':
        'test_interface',
        'mac':
        '00:00:00:00:00:01',
        'max_speed':
        200,
        'current_speed':
        100,
        'ip_addr':
        '10.20.0.2',
        'netmask':
        '255.255.255.0',
        'state':
        'test_state',
        'interface_properties':
        jsonutils.dumps({'test_property': 'test_value'}),
        'driver':
        'test_driver',
        'bus_info':
        'some_test_info'
    }])

    db.execute(
        meta.tables['node_bond_interfaces'].insert(),
        [{
            'node_id': node_id,
            'name': 'test_bond_interface',
            'mode': 'active-backup',
            'bond_properties': jsonutils.dumps({'test_property': 'test_value'})
        }])

    result = db.execute(
        meta.tables['tasks'].insert(),
        [{
            'id': 55,
            'uuid': '219eaafe-01a1-4f26-8edc-b9d9b0df06b3',
            'name': 'deployment',
            'status': 'running',
            'deployment_info': jsonutils.dumps(DEPLOYMENT_INFO[55])
        }, {
            'id': 56,
            'uuid': 'a45fbbcd-792c-4245-a619-f4fb2f094d38',
            'name': 'deployment',
            'status': 'running',
            'deployment_info': jsonutils.dumps(DEPLOYMENT_INFO[56])
        }])

    result = db.execute(meta.tables['nodes'].insert(),
                        [{
                            'id': 2,
                            'uuid': 'fcd49872-3917-4a18-98f9-3f5acfe3fdec',
                            'cluster_id': cluster_ids[0],
                            'group_id': None,
                            'status': 'ready',
                            'roles': ['controller', 'ceph-osd'],
                            'meta': '{}',
                            'mac': 'bb:aa:aa:aa:aa:aa',
                            'timestamp': datetime.datetime.utcnow(),
                        }])

    TestRequiredComponentTypesField.prepare(meta)
    db.commit()