예제 #1
0
    def test_create_port(self):
        # retrieve neutron client API class
        neutron_client = client.neutron_client()

        # set an existing network_id and unique name to use
        network_name = "private"
        networks = neutron_client.list_networks(name=network_name)
        network = networks['networks'][0]
        CreatePortTests.task_store['network_id'] = network['id']
        CreatePortTests.task_store['port_name'] = "port_" + str(uuid.uuid4())

        # create flow with "CreatePort" task, given neutron client
        flow = linear_flow.Flow('create port').add(
            neutron_task.CreatePort(os_client=neutron_client,
                                    provides='neutron_port_id'))

        # execute flow with parameters required for "CreatePort" task
        engines.run(flow, store=CreatePortTests.task_store)

        # retrieve list of ports from Neutron service
        port_list = neutron_client.list_ports()

        # find our newly created port
        found = False
        for port in port_list['ports']:
            if port['network_id'] == CreatePortTests.task_store['network_id']:
                if port['name'] == CreatePortTests.task_store['port_name']:
                    found = True
                    break

        self.assertTrue(found, "New port was not found")
예제 #2
0
    def setUp(self):
        super(CreateVmTests, self).setUp()

        flavor_name = "m1.tiny"
        network_name = "private"

        self.nova_client = client.nova_client()
        self.neutron_client = client.neutron_client()

        self.new_vm_name = str(uuid.uuid4())
        self.new_vm_id = None

        image_list = self.nova_client.images.list()
        for image in image_list:
            if (image.name.startswith("cirros")) and (
                    image.name.endswith("kernel")):
                break
        self.valid_image = image

        self.valid_flavor = self.nova_client.flavors.find(name=flavor_name)

        network_list = self.neutron_client.list_networks(name=network_name)
        self.valid_network = network_list['networks'][0]

        self.flow = linear_flow.Flow("create vm flow")
        self.flow.add(
            create_vm.CreateVm(os_client=self.nova_client,
                               requires=('name', 'image', 'flavor', 'nics'),
                               provides='new_vm',
                               rebind={'name': 'vm_name'}))
예제 #3
0
    def test_update_cluster_status_fail(self):
        """Verifies UpdateClusterRecord task failed flow scenario.

        This test simulates a failed flow with UpdateClusterRecord task
        followed by CreatePort task.  The CreateFlow task will be configured to
        fail which will in-turn fail the overall flow.  The failed flow will
        trigger UpdateClusterRecord task's revert method which will set Cluster
        status to ERROR state.
        """

        # retrieve neutron client API class
        neutron_client = client.neutron_client()

        # setup a test cluster in DB for this test
        cluster_values = {
            "project_id": self.context.tenant_id,
            "name": "RabbitCluster",
            "network_id": str(uuid.uuid4()),
            "flavor": "1",
            "size": 3,
        }

        new_cluster = objects.Cluster(**cluster_values)
        new_cluster.create(self.context)

        # verify new cluster is in BUILDING state
        self.assertEqual(models.Status.BUILDING, new_cluster.status,
                         "Invalid status")

        # setup require task input variables for "UpdateClusterRecord" task
        UpdateClusterRecordTest.task_store['context'] = self.context.to_dict()
        UpdateClusterRecordTest.task_store['cluster_id'] = new_cluster.id

        # create flow with "UpdateClusterRecord" task
        self.flow = linear_flow.Flow(name="update cluster status").add(
            update_cluster_status.UpdateClusterRecord(
                name="get RabbitMQ status",
                inject={'cluster_values': {
                    'status': models.Status.BUILDING
                }}),
            neutron_task.CreatePort(os_client=neutron_client,
                                    provides='neutron_port_id'))

        # start engine to run task
        self.assertRaises(exceptions.NetworkNotFoundClient,
                          engines.run,
                          self.flow,
                          store=UpdateClusterRecordTest.task_store)

        # verify cluster status is now in ERROR state
        cluster_after = objects.Cluster.get_cluster_by_id(
            self.context, new_cluster.id)
        self.assertEqual(models.Status.ERROR, cluster_after.status,
                         "Invalid status")
예제 #4
0
    def setUp(self):
        super(DeletePortTests, self).setUp()

        network_name = 'private'

        self.neutron_client = client.neutron_client()

        network_list = self.neutron_client.list_networks(name=network_name)
        self.valid_network = network_list['networks'][0]

        # Delete port using DeletePort task
        self.flow = linear_flow.Flow('create port').add(
            neutron_task.DeletePorts(os_client=self.neutron_client))
예제 #5
0
    def test_create_port_invalid_network(self):
        # retrieve neutron client API class
        neutron_client = client.neutron_client()

        # create flow with "CreatePort" task
        flow = linear_flow.Flow('create port').add(
            neutron_task.CreatePort(os_client=neutron_client,
                                    provides='neutron_port_id'))

        # generate a new UUID for an 'invalid' network_id
        CreatePortTests.task_store['network_id'] = str(uuid.uuid4())

        self.assertRaises(exceptions.NetworkNotFoundClient,
                          engines.run,
                          flow,
                          store=CreatePortTests.task_store)
예제 #6
0
    def setUp(self):

        super(DeleteClusterTests, self).setUp()

        flavor_name = "m1.tiny"
        network_name = "private"
        management_network_name = "cue_management_net"

        self.nova_client = client.nova_client()
        self.neutron_client = client.neutron_client()
        self.port = '5672'

        self.new_vm_name = str(uuid.uuid4())
        self.new_vm_list = []

        image_list = self.nova_client.images.list()
        for image in image_list:
            if (image.name.startswith("cirros")) and (
                    image.name.endswith("kernel")):
                break
        self.valid_image = image

        self.valid_flavor = self.nova_client.flavors.find(name=flavor_name)

        network_list = self.neutron_client.list_networks(name=network_name)
        self.valid_network = network_list['networks'][0]

        network_list = self.neutron_client.list_networks(
            name=management_network_name)
        self.management_network = network_list['networks'][0]

        # Todo(Dan) If testing becomes asynchronous, then there is no guarantee
        # that these urllib return results will come in the proper order.  Will
        # have to update the urllib2 fixture to respond appropriately for the
        # url passed in.
        urllib2_fixture.Urllib2ResultDetails.set_urllib2_result(
            ['{"status": "ok"}',
             '[{"name": "/"}]',
             '{"status": "ok"}',
             '[{"name": "/"}]',
             '{"status": "ok"}',
             '[{"name": "/"}]']
        )
예제 #7
0
    def setUp(self):
        super(GetVmInterfacesTests, self).setUp()

        flavor_name = "m1.tiny"

        self.nova_client = client.nova_client()
        self.neutron_client = client.neutron_client()

        self.valid_vm_name = str(uuid.uuid4())

        image_list = self.nova_client.images.list()
        for image in image_list:
            if (image.name.startswith("cirros")) and (
                    image.name.endswith("kernel")):
                break
        valid_image = image

        valid_flavor = self.nova_client.flavors.find(name=flavor_name)

        network_name = "private"
        networks = self.neutron_client.list_networks(name=network_name)
        network = networks['networks'][0]
        self.valid_net_id = network['id']
        nics = [{'net-id': self.valid_net_id}]

        new_vm = self.nova_client.servers.create(name=self.valid_vm_name,
                                                 image=valid_image,
                                                 flavor=valid_flavor,
                                                 nics=nics)
        self.valid_vm_id = new_vm.id

        self.flow = linear_flow.Flow("create vm flow")
        self.flow.add(
            list_vm_interfaces.ListVmInterfaces(os_client=self.nova_client,
                                                requires=('server', ),
                                                provides=('interface_list')))
예제 #8
0
    def create_vm(self,
                  name,
                  image,
                  flavor,
                  nics=None,
                  security_groups=None,
                  scheduler_hints=None,
                  **kwargs):
        """Mock'd version of novaclient...create_vm().

        Create a Nova VM.

        :param body: Dictionary with vm information.
        :return: An updated copy of the 'body' that was passed in, with other
                 information populated.
        """
        if len(self._vm_list) >= self._vm_limit:
            raise nova_exc.OverLimit(413)
        try:
            flavor_id = flavor.id
        except AttributeError:
            flavor_id = flavor

        try:
            image_id = image.id
        except AttributeError:
            image_id = image

        if flavor_id not in self._flavor_list:
            raise nova_exc.BadRequest(400)

        if image_id not in self._image_list:
            raise nova_exc.BadRequest(400)

        port_list = list()
        if nics is not None:
            neutron_client = client.neutron_client()
            for nic in nics:
                if 'net-id' in nic:
                    network_list = neutron_client.list_networks(
                        id=nic['net-id'])
                    if (not network_list or 'networks' not in network_list
                            or len(network_list['networks']) == 0):
                        raise nova_exc.BadRequest(400)

                    else:
                        body_value = {
                            "port": {
                                "admin_state_up": True,
                                "name": "test port",
                                "network_id": nic['net-id'],
                            }
                        }
                        port_info = neutron_client.create_port(body=body_value)
                        port_id = port_info['port']['id']
                        port_list.append(
                            InterfaceDetails(net_id=nic['net-id'],
                                             port_id=port_id))
                if 'port-id' in nic:
                    port_list.append(InterfaceDetails(port_id=nic['port-id']))

        if security_groups is not None:
            missing = set(security_groups) - set(self._security_group_list)
            if missing:
                raise nova_exc.BadRequest(400)

        newVm = VmDetails(vm_id=uuid.uuid4(),
                          name=name,
                          flavor=flavor,
                          image=image,
                          port_list=port_list,
                          status='BUILDING')

        if scheduler_hints is not None:
            try:
                group_id = scheduler_hints['group']
            except AttributeError:
                group_id = scheduler_hints

            if group_id not in self._vm_group_list:
                raise nova_exc.BadRequest(400)

            newVm.host_id = str(uuid.uuid4())

        self._vm_list[str(newVm.id)] = newVm

        return newVm
예제 #9
0
def create_cluster(cluster_id, node_ids, user_network_id,
                   management_network_id):
    """Create Cluster flow factory function

    This factory function uses :func:`cue.taskflow.flow.create_cluster_node` to
    create a multi node cluster.

    :param cluster_id: A unique ID assigned to the cluster being created
    :type cluster_id: string
    :param node_ids: The Cue Node id's associated with each node in the cluster
    :type node_ids: list of uuid strings
    :param user_network_id: The user's network id
    :type user_network_id: string
    :param management_network_id: The management network id
    :type management_network_id: string
    :return: A flow instance that represents the workflow for creating a
             cluster
    """
    cluster_name = "cue[%s]" % cluster_id
    flow = graph_flow.Flow("creating cluster %s" % cluster_id)
    start_flow_cluster_update = {
        'cluster_id': cluster_id,
        'cluster_values': {'status': models.Status.BUILDING}}

    extract_scheduler_hints = lambda vm_group: {'group': str(vm_group['id'])}
    end_flow_cluster_update = lambda vm_group: {
        'status': models.Status.ACTIVE,
        'group_id': str(vm_group['id'])}

    create_cluster_start_task = cue_tasks.UpdateClusterRecord(
        name="update cluster status start %s" % cluster_name,
        inject=start_flow_cluster_update)
    flow.add(create_cluster_start_task)

    cluster_anti_affinity = cfg.CONF.taskflow.cluster_node_anti_affinity
    if cluster_anti_affinity:
        create_vm_group = nova.CreateVmGroup(
            name="create cluster group %s" % cluster_name,
            os_client=client.nova_client(),
            requires=('name', 'policies'),
            inject={'name': "cue_group_%s" % cluster_id,
                    'policies': ['anti-affinity']},
            provides="cluster_group")
        flow.add(create_vm_group)

        get_scheduler_hints = os_common.Lambda(
            extract_scheduler_hints,
            name="extract scheduler hints %s" % cluster_name,
            rebind={'vm_group': "cluster_group"},
            provides="scheduler_hints")
        flow.add(get_scheduler_hints)

        build_cluster_info = os_common.Lambda(
            end_flow_cluster_update,
            name="build new cluster update values %s" % cluster_name,
            rebind={'vm_group': "cluster_group"},
            provides="cluster_values")
        flow.add(build_cluster_info)

        flow.link(create_cluster_start_task, create_vm_group)
        flow.link(create_vm_group, get_scheduler_hints)
        flow.link(get_scheduler_hints, build_cluster_info)
        create_node_start_task = build_cluster_info
        create_cluster_end_task = cue_tasks.UpdateClusterRecord(
            name="update cluster status end %s" % cluster_name,
            inject={'cluster_id': cluster_id})
    else:
        create_node_start_task = create_cluster_start_task
        end_flow_cluster_update = {
            'cluster_id': cluster_id,
            'cluster_values': {'status': models.Status.ACTIVE}}
        create_cluster_end_task = cue_tasks.UpdateClusterRecord(
            name="update cluster status end %s" % cluster_name,
            inject=end_flow_cluster_update)

    flow.add(create_cluster_end_task)

    show_network = os_neutron.ShowNetwork(
        name="get tenant network information",
        os_client=client.neutron_client(),
        inject={'network': user_network_id},
        provides="tenant_network_info"
    )
    flow.add(show_network)
    flow.link(create_node_start_task, show_network)

    validate_network_info = (lambda tenant_network_info, tenant_id:
                             tenant_network_info['shared'] or
                             tenant_network_info['tenant_id'] == tenant_id)

    validate_tenant_network = os_common.Assert(
        validate_network_info,
        name="validate tenant network info",
        requires=('tenant_network_info', 'tenant_id')
    )
    flow.add(validate_tenant_network)
    flow.link(show_network, validate_tenant_network)

    node_check_timeout = cfg.CONF.taskflow.cluster_node_check_timeout
    node_check_max_count = cfg.CONF.taskflow.cluster_node_check_max_count

    check_rabbit_online = linear_flow.Flow(
        name="wait for RabbitMQ ready state %s" % cluster_name,
        retry=retry.Times(node_check_max_count, revert_all=True))
    check_rabbit_online.add(
        cue_tasks.GetRabbitClusterStatus(
            name="get RabbitMQ status %s" % cluster_name,
            rebind={'vm_ip': "vm_management_ip_0"},
            provides="clustering_status",
            inject={'proto': 'http'}),
        os_common.CheckFor(
            name="check cluster status %s" % cluster_name,
            details="waiting for RabbitMQ clustered status",
            rebind={'check_var': "clustering_status"},
            check_value='OK',
            retry_delay_seconds=node_check_timeout),
    )
    flow.add(check_rabbit_online)

    flow.link(check_rabbit_online, create_cluster_end_task)

    #todo(dagnello): verify node_ids is a list and not a string
    for i, node_id in enumerate(node_ids):
        generate_userdata = cue_tasks.ClusterNodeUserData(
            name="generate userdata %s_%d" % (cluster_name, i),
            node_count=len(node_ids),
            node_ip_prefix="vm_management_ip_",
            inject={'node_name': "rabbit-node-%d" % i,
                    'cluster_id': cluster_id})
        flow.add(generate_userdata)

        create_cluster_node.create_cluster_node(cluster_id, i, node_id, flow,
                                                generate_userdata,
                                                validate_tenant_network,
                                                check_rabbit_online,
                                                user_network_id,
                                                management_network_id)

    return flow
예제 #10
0
def delete_cluster_node(cluster_id, node_number, node_id):
    """Delete Cluster Node factory function

    This factory function deletes a flow for deleting a node of a cluster.

    :param cluster_id: Unique ID for the cluster that the node is part of.
    :type cluster_id: string
    :param node_number: Cluster node # for the node being deleted.
    :type node_number: number
    :param node_id: Unique ID for the node.
    :type node_id: string
    :return: A flow instance that represents the workflow for deleting a
             cluster node.
    """
    flow_name = "delete cluster %s node %d" % (cluster_id, node_number)
    node_name = "cluster[%s].node[%d]" % (cluster_id, node_number)

    extract_vm_id = lambda node: node['instance_id']
    extract_port_ids = lambda interfaces: [i['port_id'] for i in interfaces]

    deleted_node_values = {'status': models.Status.DELETED,
                           'deleted': True}

    deleted_endpoints_values = {'deleted': True}

    flow = linear_flow.Flow(flow_name)
    flow.add(
        cue_tasks.GetNode(
            name="Get Node %s" % node_name,
            inject={'node_id': node_id},
            provides="node_%d" % node_number),
        os_common.Lambda(
            extract_vm_id,
            name="extract vm id %s" % node_name,
            rebind={'node': "node_%d" % node_number},
            provides="vm_id_%d" % node_number),
        nova.ListVmInterfaces(
            os_client=client.nova_client(),
            name="list vm interfaces %s" % node_name,
            rebind={'server': "vm_id_%d" % node_number},
            inject={'ignore_nova_not_found_exception': True},
            provides="vm_interfaces_%d" % node_number),
        os_common.Lambda(
            extract_port_ids,
            name="extract port ids %s" % node_name,
            rebind={'interfaces': "vm_interfaces_%d" % node_number},
            provides="vm_port_list_%d" % node_number),
        nova.DeleteVm(
            os_client=client.nova_client(),
            name="delete vm %s" % node_name,
            rebind={'server': "vm_id_%d" % node_number}),
        neutron.DeletePorts(
            os_client=client.neutron_client(),
            name="delete vm %s ports" % node_name,
            rebind={'port_ids': "vm_port_list_%d" % node_number}),
        cue_tasks.UpdateNodeRecord(
            name="update node %s" % node_name,
            inject={'node_id': node_id,
                    'node_values': deleted_node_values}),
        cue_tasks.UpdateEndpointsRecord(
            name="update endpoint for node %s" % node_name,
            inject={'node_id': node_id,
                    'endpoints_values': deleted_endpoints_values}
        ))
    return flow
예제 #11
0
def create_cluster_node(cluster_id, node_number, node_id, graph_flow,
                        generate_userdata, start_task, post_task,
                        user_network_id, management_network_id):
    """Create Cluster Node factory function

    This factory function creates a flow for creating a node of a cluster.

    :param cluster_id: Unique ID for the cluster that the node is part of.
    :type cluster_id: string
    :param node_number: Cluster node # for the node being created.
    :type node_number: number
    :param node_id: Unique ID for the node.
    :type node_id: string
    :param graph_flow: TaskFlow graph flow which contains create cluster flow
    :type graph_flow: taskflow.patterns.graph_flow
    :param start_task: Update cluster status start task
    :type start_task: cue.taskflow.task.UpdateClusterRecord
    :param post_task: Task/Subflow to run after the flow created here
    :type post_task: taskflow task or flow
    :param generate_userdata: generate user data task
    :type generate_userdata: cue.taskflow.task.ClusterNodeUserData
    :param user_network_id: The user's network id
    :type user_network_id: string
    :param management_network_id: The management network id
    :type management_network_id: string
    :return: A flow instance that represents the workflow for creating a
             cluster node.
    """
    node_name = "cue[%s].node[%d]" % (cluster_id, node_number)

    extract_port_info = (
        lambda user_port_info, management_port_info: (
            [  # nova boot requires a list of port-id's
                {
                    'port-id': user_port_info['port']['id']
                }, {
                    'port-id': management_port_info['port']['id']
                }
            ],
            # user port ip
            user_port_info['port']['fixed_ips'][0]['ip_address'],
            # management port ip
            management_port_info['port']['fixed_ips'][0]['ip_address']))

    extract_vm_id = lambda vm_info: str(vm_info['id'])

    new_node_values = lambda nova_vm_id, vm_management_ip: {
        'status': models.Status.ACTIVE,
        'instance_id': nova_vm_id,
        'management_ip': vm_management_ip
    }

    new_endpoint_values = lambda vm_ip: {
        'node_id': node_id,
        'uri': vm_ip + ':',
        'type': 'AMQP'
    }

    create_user_port = neutron.CreatePort(name="create port %s" % node_name,
                                          os_client=client.neutron_client(),
                                          inject={
                                              'network_id': user_network_id,
                                              'port_name': 'user_' + node_name
                                          },
                                          provides="user_port_info_%d" %
                                          node_number)
    graph_flow.add(create_user_port)
    graph_flow.link(start_task, create_user_port)

    create_management_port = neutron.CreatePort(
        name="create management port %s" % node_name,
        os_client=client.neutron_client(),
        inject={
            'network_id': management_network_id,
            'port_name': 'management_' + node_name
        },
        provides="management_port_info_%d" % node_number)
    graph_flow.add(create_management_port)
    graph_flow.link(start_task, create_management_port)

    extract_port_data = os_common.Lambda(
        extract_port_info,
        name="extract port id %s" % node_name,
        rebind={
            'user_port_info': "user_port_info_%d" % node_number,
            'management_port_info': "management_port_info_%d" % node_number
        },
        provides=("port_ids_%d" % node_number, "vm_user_ip_%d" % node_number,
                  "vm_management_ip_%d" % node_number))
    graph_flow.add(extract_port_data)
    graph_flow.link(create_user_port, extract_port_data)

    create_vm = nova.CreateVm(name="create vm %s" % node_name,
                              os_client=client.nova_client(),
                              requires=('name', 'image', 'flavor', 'nics'),
                              inject={'name': node_name},
                              rebind={'nics': "port_ids_%d" % node_number},
                              provides="vm_info_%d" % node_number)
    graph_flow.add(create_vm)
    graph_flow.link(create_management_port, create_vm)
    graph_flow.link(generate_userdata, create_vm)

    get_vm_id = os_common.Lambda(
        extract_vm_id,
        name="extract vm id %s" % node_name,
        rebind={'vm_info': "vm_info_%d" % node_number},
        provides="vm_id_%d" % node_number)

    graph_flow.add(get_vm_id)
    graph_flow.link(create_vm, get_vm_id)

    retry_count = CONF.flow_options.create_cluster_node_vm_active_retry_count
    check_vm_active = linear_flow.Flow(
        name="wait for VM active state %s" % node_name,
        retry=retry.ExceptionTimes(
            revert_exception_list=[cue_exceptions.VmErrorException],
            attempts=retry_count,
            revert_all=True))

    check_vm_active.add(
        nova.GetVmStatus(os_client=client.nova_client(),
                         name="get vm %s" % node_name,
                         rebind={'nova_vm_id': "vm_id_%d" % node_number},
                         provides="vm_status_%d" % node_number),
        cue_tasks.CheckForVmStatus(
            name="check vm status %s" % node_name,
            details="waiting for ACTIVE VM status",
            rebind={'check_var': "vm_status_%d" % node_number},
            retry_delay_seconds=10),
    )

    graph_flow.add(check_vm_active)
    graph_flow.link(get_vm_id, check_vm_active)

    build_node_info = os_common.Lambda(new_node_values,
                                       name="build new node values %s" %
                                       node_name,
                                       rebind={
                                           'nova_vm_id':
                                           "vm_id_%d" % node_number,
                                           'vm_management_ip':
                                           "vm_management_ip_%d" % node_number
                                       },
                                       provides="node_values_%d" % node_number)
    graph_flow.add(build_node_info)
    graph_flow.link(get_vm_id, build_node_info)

    update_node = cue_tasks.UpdateNodeRecord(
        name="update node %s" % node_name,
        rebind={'node_values': "node_values_%d" % node_number},
        inject={'node_id': node_id})
    graph_flow.add(update_node)
    graph_flow.link(build_node_info, update_node)

    build_endpoint_info = os_common.Lambda(
        new_endpoint_values,
        name="build new endpoint values %s" % node_name,
        rebind={'vm_ip': "vm_user_ip_%d" % node_number},
        inject={'node_id': node_id},
        provides="endpoint_values_%d" % node_number)
    graph_flow.add(build_endpoint_info)
    graph_flow.link(check_vm_active, build_endpoint_info)

    create_endpoint = cue_tasks.CreateEndpoint(
        name="update endpoint for node %s" % node_name,
        rebind={'endpoint_values': "endpoint_values_%d" % node_number})
    graph_flow.add(create_endpoint)
    graph_flow.link(check_vm_active, create_endpoint)

    graph_flow.link(update_node, post_task)
    graph_flow.link(create_endpoint, post_task)