Exemple #1
0
 def __init__(self, *args, **kwargs):
     self.nova_client = nova_client.Client('2', *args, **kwargs)
Exemple #2
0
    username=config['username'],
    password=config['password'],
    tenant_name=config['admin_project_name']
)
keystoneClient = KeystoneClient(session=KeystoneSession(auth=auth), endpoint=config['nova_api_url'])

projects = []
for tenant in keystoneClient.tenants.list():
    projects.append(tenant.name)

aliases = {}
for project in projects:
    client = novaclient.Client(
        "1.1",
        config['username'],
        config['password'],
        project,
        config['nova_api_url']
    )

    for server in client.servers.list():
        serverAddresses = {}
        try:
            private = [
                str(ip['addr']) for ip in server.addresses['public'] if ip['OS-EXT-IPS:type'] == 'fixed'
            ]
            public = [
                str(ip['addr']) for ip in server.addresses['public'] if ip['OS-EXT-IPS:type'] == 'floating'
            ]
            if public:
                # Match all possible public IPs to all possible private ones
Exemple #3
0
floating_ip_pool_name = None
floating_ip = None
image_name = "Ubuntu 16.04 LTS (Xenial Xerus) - latest"

loader = loading.get_plugin_loader('password')

auth = loader.load_from_options(auth_url=env['OS_AUTH_URL'],
                                username=env['OS_USERNAME'],
                                password=env['OS_PASSWORD'],
                                project_name=env['OS_PROJECT_NAME'],
                                project_domain_name=env['OS_USER_DOMAIN_NAME'],
                                project_id=env['OS_PROJECT_ID'],
                                user_domain_name=env['OS_USER_DOMAIN_NAME'])

sess = session.Session(auth=auth)
nova = client.Client('2.1', session=sess)
print "user authorization completed."

image = nova.glance.find_image(image_name)

flavor = nova.flavors.find(name=flavor)

if private_net != None:
    net = nova.neutron.find_network(private_net)
    nics = [{'net-id': net.id}]
else:
    sys.exit("private-net not defined.")

#print("Path at terminal when executing this file")
#print(os.getcwd() + "\n")
cfg_file_path = os.getcwd() + '/cloud-cfg.txt'
def get_clients():

    ks_version = int(env.get('OS_IDENTITY_API_VERSION', 2))

    if ks_version == 2:
        from keystoneclient.v2_0 import client as keystone_client
        # Legacy v2 env vars:
        # OS_USERNAME OS_PASSWORD OS_TENANT_NAME OS_AUTH_URL OS_REGION_NAME
        ks_creds = get_creds_dict("username", "password", "tenant_name",
                                  "auth_url", "region_name")

        cacert = maybe_get_cacert()
        if cacert:
            ks_creds["cacert"] = cacert
        nova_creds = [2] + get_creds_list("username", "password",
                                          "tenant_name", "auth_url")
        cinder_creds = get_creds_list("username", "password", "tenant_name",
                                      "auth_url")
        keystone = keystone_client.Client(**ks_creds)
        nova = nova_client.Client(*nova_creds, cacert=cacert)
        neutron = neutron_client.Client(**ks_creds)
        cinder = cinder_client.Client(*cinder_creds, cacert=cacert)

    elif ks_version == 3:

        from keystoneauth1.identity import v3
        from keystoneauth1 import session
        from keystoneclient.v3 import client
        # A little helper for the poor human trying to figure out which env vars
        # are needed, it worked for me (jjo) having:
        #  OS_USERNAME OS_PASSWORD OS_USER_DOMAIN_NAME OS_AUTH_URL
        #  OS_PROJECT_DOMAIN_NAME OS_PROJECT_DOMAIN_ID OS_PROJECT_ID OS_DOMAIN_NAME
        # Keystone needs domain creds for e.g. project list

        # project and project_domain are needed for listing projects
        ks_creds_domain = get_creds_dict("username", "password",
                                         "user_domain_name", "auth_url",
                                         "project_domain_name", "project_name",
                                         "project_domain_id", "project_id")
        # Need non-domain creds to get full catalog

        ks_creds_admin = get_creds_dict("username", "password",
                                        "user_domain_name", "auth_url",
                                        "project_domain_name", "project_name",
                                        "project_domain_id", "project_id")

        auth_domain = v3.Password(**ks_creds_domain)
        auth_admin = v3.Password(**ks_creds_admin)

        # Need to pass in cacert separately
        verify = maybe_get_cacert()

        if verify is None:
            verify = True
        sess_domain = session.Session(auth=auth_domain, verify=verify)
        sess_admin = session.Session(auth=auth_admin, verify=verify)

        interface = env.get('OS_INTERFACE', 'admin')
        # Keystone has not switched from interface to endpoint_type yet
        keystone = client.Client(session=sess_domain, interface=interface)
        nova = nova_client.Client(2,
                                  session=sess_admin,
                                  endpoint_type=interface)
        neutron = neutron_client.Client(session=sess_admin,
                                        endpoint_type=interface)
        cinder = cinder_client.Client(session=sess_admin,
                                      endpoint_type=interface)

    else:
        raise (ValueError("Invalid OS_IDENTITY_API_VERSION=%s" % ks_version))
    log.debug("Client setup done, keystone ver {}".format(ks_version))
    return (keystone, nova, neutron, cinder)
    def __init__(self, instance_id, common_config, source_config, dest_config):

        self.dnsdomain = common_config['dnsdomain']
        self.instance_id = instance_id
        self.source_config = source_config
        self.dest_config = dest_config
        self.common_config = common_config

        source_auth = KeystonePassword(
            auth_url=self.common_config['keystone_url'],
            username=self.common_config['user'],
            password=self.common_config['password'],
            user_domain_name='Default',
            project_domain_name='Default',
            project_name='admin')
        source_session = keystone_session.Session(auth=source_auth)
        self.source_novaclient = novaclient.Client(
            '2', session=source_session, region_name=source_config['region'])

        self.refresh_instance()
        self.project_id = self.source_instance.tenant_id
        self.user_id = self.source_instance.user_id

        project_auth = KeystonePassword(
            auth_url=self.common_config['keystone_url'],
            username=self.common_config['user'],
            password=self.common_config['password'],
            user_domain_name='Default',
            project_domain_name='Default',
            project_name=self.project_id)
        project_session = keystone_session.Session(auth=project_auth)
        self.designateclient = designateclient.Client(
            session=project_session, region_name=source_config['region'])

        self.novaclient_projectscope = novaclient.Client(
            '2', session=project_session, region_name=dest_config['region'])

        wmflabs_auth = KeystonePassword(
            auth_url=self.common_config['keystone_url'],
            username=self.common_config['user'],
            password=self.common_config['password'],
            user_domain_name='Default',
            project_domain_name='Default',
            project_name='wmflabsdotorg')
        wmflabs_session = keystone_session.Session(auth=wmflabs_auth)
        self.wmflabsdesignateclient = designateclient.Client(
            session=wmflabs_session, region_name=source_config['region'])

        dest_auth = KeystonePassword(
            auth_url=self.common_config['keystone_url'],
            username=self.common_config['user'],
            password=self.common_config['password'],
            user_domain_name='Default',
            project_domain_name='Default',
            project_name='admin')
        self.dest_session = keystone_session.Session(auth=dest_auth)

        self.dest_novaclient = novaclient.Client(
            '2', session=self.dest_session, region_name=dest_config['region'])
        self.dest_neutronclient = neutronclient.Client(
            session=self.dest_session, region_name=dest_config['region'])
        self.dest_keystoneclient = keystoneclient.Client(
            session=self.dest_session, region_name=dest_config['region'])
        self.proxy_endpoint = self.get_proxy_endpoint(self.dest_keystoneclient,
                                                      dest_config['region'])
    def _initialize_tests(self):
        """Perform final initialization before tests get run."""
        # Access the sentries for inspecting service units
        self.pxc_sentry = self.d.sentry['percona-cluster'][0]
        self.keystone_sentry = self.d.sentry['keystone'][0]
        self.rabbitmq_sentry = self.d.sentry['rabbitmq-server'][0]
        self.nova_cc_sentry = self.d.sentry['nova-cloud-controller'][0]
        self.nova_compute_sentry = self.d.sentry['nova-compute'][0]
        self.glance_sentry = self.d.sentry['glance'][0]

        u.log.debug('openstack release val: {}'.format(
            self._get_openstack_release()))
        u.log.debug('openstack release str: {}'.format(
            self._get_openstack_release_string()))

        # Authenticate admin with keystone
        self.keystone_session, self.keystone = u.get_default_keystone_session(
            self.keystone_sentry,
            openstack_release=self._get_openstack_release())

        force_v1_client = False
        if self._get_openstack_release() == self.trusty_icehouse:
            # Updating image properties (such as arch or hypervisor) using the
            # v2 api in icehouse results in:
            # https://bugs.launchpad.net/python-glanceclient/+bug/1371559
            u.log.debug('Forcing glance to use v1 api')
            force_v1_client = True

        # Authenticate admin with glance endpoint
        self.glance = u.authenticate_glance_admin(
            self.keystone, force_v1_client=force_v1_client)

        # Authenticate admin with nova endpoint
        self.nova = nova_client.Client(2, session=self.keystone_session)

        keystone_ip = self.keystone_sentry.info['public-address']

        # Create a demo tenant/role/user
        self.demo_tenant = 'demoTenant'
        self.demo_role = 'demoRole'
        self.demo_user = '******'
        self.demo_project = 'demoProject'
        self.demo_domain = 'demoDomain'
        if self._get_openstack_release() >= self.xenial_queens:
            self.create_users_v3()
            self.demo_user_session, auth = u.get_keystone_session(
                keystone_ip,
                self.demo_user,
                'password',
                api_version=3,
                user_domain_name=self.demo_domain,
                project_domain_name=self.demo_domain,
                project_name=self.demo_project)
            self.keystone_demo = keystone_client_v3.Client(
                session=self.demo_user_session)
            self.nova_demo = nova_client.Client(2,
                                                session=self.demo_user_session)
        else:
            self.create_users_v2()
            # Authenticate demo user with keystone
            self.keystone_demo = \
                u.authenticate_keystone_user(
                    self.keystone, user=self.demo_user,
                    password='******',
                    tenant=self.demo_tenant)
            # Authenticate demo user with nova-api
            self.nova_demo = u.authenticate_nova_user(self.keystone,
                                                      user=self.demo_user,
                                                      password='******',
                                                      tenant=self.demo_tenant)
def create_flavors():
    logger.info("Creating overcloud flavors...")

    flavors = [{
        "id": "1",
        "name": "m1.tiny",
        "memory": 512,
        "disk": 1,
        "cpus": 1
    }, {
        "id": "2",
        "name": "m1.small",
        "memory": 2048,
        "disk": 20,
        "cpus": 1
    }, {
        "id": "3",
        "name": "m1.medium",
        "memory": 4096,
        "disk": 40,
        "cpus": 2
    }, {
        "id": "4",
        "name": "m1.large",
        "memory": 8192,
        "disk": 80,
        "cpus": 4
    }, {
        "id": "5",
        "name": "m1.xlarge",
        "memory": 16384,
        "disk": 160,
        "cpus": 8
    }]

    os_auth_url, os_tenant_name, os_username, os_password, \
        os_user_domain_name, os_project_domain_name = \
        CredentialHelper.get_overcloud_creds()

    kwargs = {
        'username': os_username,
        'password': os_password,
        'auth_url': os_auth_url,
        'project_name': os_tenant_name,
        'user_domain_name': os_user_domain_name,
        'project_domain_name': os_project_domain_name
    }
    n_client = nova_client.Client(2, **kwargs)

    existing_flavor_ids = []
    for existing_flavor in n_client.flavors.list(detailed=False):
        existing_flavor_ids.append(existing_flavor.id)

    for flavor in flavors:
        if flavor["id"] not in existing_flavor_ids:
            print '    Creating ' + flavor["name"]
            n_client.flavors.create(flavor["name"],
                                    flavor["memory"],
                                    flavor["cpus"],
                                    flavor["disk"],
                                    flavorid=flavor["id"])
        else:
            print '    Flavor ' + flavor["name"] + " already exists"
Exemple #8
0
def get_nova_client():
    nova = nova_client.Client(2, ADMIN_USERNAME, ADMIN_PASSWORD, ADMIN_TENANT,
                              ADMIN_AUTH_URL)
    return nova
#!/usr/bin/env python

from os import environ
from keystoneauth1 import loading
from keystoneauth1 import session
from cinderclient import client as cinderclient
from novaclient import client as novaclient

loader = loading.get_plugin_loader('password')
auth = loader.load_from_options(
    auth_url=environ['OS_AUTH_URL'],
    username=environ['OS_USERNAME'],
    password=environ['OS_PASSWORD'],
    project_name=environ['OS_PROJECT_NAME'],
    project_domain_name=environ['OS_PROJECT_DOMAIN_NAME'],
    user_domain_name=environ['OS_PROJECT_DOMAIN_NAME'])
sess = session.Session(auth=auth)

# List volumes for all projects
cinder = cinderclient.Client(3, session=sess)
volumes = cinder.volumes.list(search_opts={'all_tenants': True})
for v in volumes:
    print("Volume = %s" % v)

# List instances for all projects
nova = novaclient.Client(2, session=sess)
instances = nova.servers.list(search_opts={'all_tenants': True})
for i in instances:
    print("Instance = %s" % i)
Exemple #10
0
def get_nova_client():      # pragma: no cover
    sess = get_session()
    return novaclient.Client(get_nova_client_version(), session=sess)
Exemple #11
0
        p.read(path1)
    elif os.path.exists(path2):
        p.read(path2)
    elif os.path.exists(path3):
        p.read(path3)
    else:
        return None
    return p


config = nova_load_config_file()

client = nova_client.Client(version=config.get('openstack', 'version'),
                            username=config.get('openstack', 'username'),
                            api_key=config.get('openstack', 'api_key'),
                            auth_url=config.get('openstack', 'auth_url'),
                            region_name=config.get('openstack', 'region_name'),
                            project_id=config.get('openstack', 'project_id'),
                            auth_system=config.get('openstack', 'auth_system'))

if len(sys.argv) == 2 and (sys.argv[1] == '--list'):
    groups = {}

    # Cycle on servers
    for f in client.servers.list():
        # Define group (or set to empty string)
        group = f.metadata['group'] if f.metadata.has_key(
            'group') else 'undefined'

        # Create group if not exist
        if group not in groups:
Exemple #12
0
import collections

from keystoneclient.session import Session as KeystoneSession
from keystoneclient.auth.identity.v3 import Password as KeystonePassword
from keystoneclient.v3 import Client as KeystoneClient
from novaclient import client as novaclient

def get_keystone_session(project):
    return KeystoneSession(auth=KeystonePassword(
        auth_url="http://cloudcontrol1003.wikimedia.org:5000/v3",
        username="******",
        password=open('novaobserver_password').read(),
        project_name=project,
        user_domain_name='default',
        project_domain_name='default'
    ))

keystone_client = KeystoneClient(
    session=get_keystone_session('bastion'),
    endpoint="http://cloudcontrol1003.wikimedia.org:5000/v3",
    interface='public'
)

for project in keystone_client.projects.list():
    if project.name not in ['admin', 'tools', 'toolsbeta']:
        session = get_keystone_session(project.name)
        for instance in novaclient.Client("2.0", session=session, region_name='eqiad').servers.list():
            print(project.name, instance.name, instance.status)
import os
from osauth import os_session, VERSION
from novaclient import client
from pprint import pprint


nova = client.Client(VERSION, session=os_session)
pprint(nova.glance.list())
Exemple #14
0
def get_nova_client(session=None):
    if not session:
        session = login_lib.load_dumped_session()

    compute = nova_client.Client(2, session=session)
    return compute
Exemple #15
0
from keystoneauth1 import session
from keystoneauth1.identity import v3
from novaclient import client

import Config

config = ConfigParser.RawConfigParser()
config.read('hass.conf')
auth = v3.Password(auth_url='http://controller:5000/v3',
                   username=config.get("openstack", "openstack_admin_account"),
                   password=config.get("openstack", "openstack_admin_password"),
                   project_name=config.get("openstack", "openstack_admin_account"),
                   user_domain_name=config.get("openstack", "openstack_user_domain_id"),
                   project_domain_name=config.get("openstack", "openstack_project_domain_id"))
sess = session.Session(auth=auth)
novaClient = client.Client(2.25, session=sess)


def create_provider_instance():
    if not _getInstanceByName(Config.INSTANCE_NAME):
        novaClient.servers.create(name=Config.INSTANCE_NAME,
                                  image=Config.IMAGE_ID,
                                  flavor=Config.FLAVOR_ID,
                                  availability_zone=Config.AVAILABILITY_ZONE,
                                  block_device_mapping=Config.BLOCK_DEVICE_MAPPING,
                                  nics=[{'net-id': Config.NETWORK_PROVIDER_ID}]
                                  )
    else:
        print "Preprocess : instance %s already exists!" % Config.INSTANCE_NAME
    if _InstanceActive(Config.INSTANCE_NAME):
        print "Preprocess : create instance %s success!" % Config.INSTANCE_NAME
    return nova.floating_ips.create('external_network')


if __name__ == "__main__":
    # Authenticate using ENV variables
    auth = v2.Password(auth_url=env['OS_AUTH_URL'],
                       username=env['OS_USERNAME'],
                       password=env['OS_PASSWORD'],
                       tenant_id=env['OS_TENANT_ID'])
    # Open auth session
    sess = session.Session(auth=auth)

    # Authenticate against required services
    keystone = ksclient.Client(session=sess)
    glance = glclient.Client(session=sess)
    nova = nvclient.Client("2", session=sess)
    swift = swclient.Connection(user=env['OS_USERNAME'],
                                key=env['OS_PASSWORD'],
                                authurl=env['OS_AUTH_URL'],
                                auth_version="2",
                                tenant_name=env['OS_TENANT_NAME'])

    # Try to download the given input case from Swift.
    obj = swift.get_object(config['container'], config['input_case'])
    temppath = tempfile.mkdtemp(prefix='ofcloud-')
    casepath = path.join(temppath, "case")

    os.makedirs(casepath)
    casefile = path.join(casepath, config['input_case'])
    with open(casefile, 'w') as f:
        f.write(obj[1])
 def nova(self):
     return nc.Client('2', self.authinfo['OS_USERNAME'], self.authinfo['OS_PASSWORD'], self.authinfo['OS_TENANT_NAME'],
                      auth_url=self.authinfo['OS_AUTH_URL'])
Exemple #18
0
def get_client():
    n_client = nova_client.Client(version=2,
    session=get_session(),
    region_name=args.region_name)
    return n_client
Exemple #19
0
def main():

    # ############### GENERAL INITIALISATION ################

    if not os.path.exists(VIMS_DATA_DIR):
        os.makedirs(VIMS_DATA_DIR)

    ks_creds = os_utils.get_credentials("keystone")
    nv_creds = os_utils.get_credentials("nova")
    nt_creds = os_utils.get_credentials("neutron")

    logger.info("Prepare OpenStack plateform (create tenant and user)")
    keystone = ksclient.Client(**ks_creds)

    user_id = os_utils.get_user_id(keystone, ks_creds['username'])
    if user_id == '':
        step_failure("init", "Error : Failed to get id of " +
                     ks_creds['username'])

    tenant_id = os_utils.create_tenant(
        keystone, TENANT_NAME, TENANT_DESCRIPTION)
    if tenant_id == '':
        step_failure("init", "Error : Failed to create " +
                     TENANT_NAME + " tenant")

    roles_name = ["admin", "Admin"]
    role_id = ''
    for role_name in roles_name:
        if role_id == '':
            role_id = os_utils.get_role_id(keystone, role_name)

    if role_id == '':
        logger.error("Error : Failed to get id for %s role" % role_name)

    if not os_utils.add_role_user(keystone, user_id, role_id, tenant_id):
        logger.error("Error : Failed to add %s on tenant" %
                     ks_creds['username'])

    user_id = os_utils.create_user(
        keystone, TENANT_NAME, TENANT_NAME, None, tenant_id)
    if user_id == '':
        logger.error("Error : Failed to create %s user" % TENANT_NAME)

    logger.info("Update OpenStack creds informations")
    ks_creds.update({
        "username": TENANT_NAME,
        "password": TENANT_NAME,
        "tenant_name": TENANT_NAME,
    })

    nt_creds.update({
        "tenant_name": TENANT_NAME,
    })

    nv_creds.update({
        "project_id": TENANT_NAME,
    })

    logger.info("Upload some OS images if it doesn't exist")
    glance_endpoint = keystone.service_catalog.url_for(
        service_type='image', endpoint_type='publicURL')
    glance = glclient.Client(1, glance_endpoint, token=keystone.auth_token)

    for img in IMAGES.keys():
        image_name = IMAGES[img]['image_name']
        image_url = IMAGES[img]['image_url']

        image_id = os_utils.get_image_id(glance, image_name)

        if image_id == '':
            logger.info("""%s image doesn't exist on glance repository. Try
            downloading this image and upload on glance !""" % image_name)
            image_id = download_and_add_image_on_glance(
                glance, image_name, image_url)

        if image_id == '':
            step_failure(
                "init",
                "Error : Failed to find or upload required OS "
                "image for this deployment")

    nova = nvclient.Client("2", **nv_creds)

    logger.info("Update security group quota for this tenant")
    neutron = ntclient.Client(**nt_creds)
    if not os_utils.update_sg_quota(neutron, tenant_id, 50, 100):
        step_failure(
            "init",
            "Failed to update security group quota for tenant " + TENANT_NAME)

    logger.info("Update cinder quota for this tenant")
    from cinderclient import client as cinderclient

    creds_cinder = os_utils.get_credentials("cinder")
    cinder_client = cinderclient.Client('1', creds_cinder['username'],
                                        creds_cinder['api_key'],
                                        creds_cinder['project_id'],
                                        creds_cinder['auth_url'],
                                        service_type="volume")
    if not os_utils.update_cinder_quota(cinder_client, tenant_id, 20, 10, 150):
        step_failure(
            "init", "Failed to update cinder quota for tenant " + TENANT_NAME)

    # ############### CLOUDIFY INITIALISATION ################

    cfy = orchestrator(VIMS_DATA_DIR, CFY_INPUTS, logger)

    cfy.set_credentials(username=ks_creds['username'], password=ks_creds[
                        'password'], tenant_name=ks_creds['tenant_name'],
                        auth_url=ks_creds['auth_url'])

    logger.info("Collect flavor id for cloudify manager server")
    nova = nvclient.Client("2", **nv_creds)

    flavor_name = "m1.medium"
    flavor_id = os_utils.get_flavor_id(nova, flavor_name)
    for requirement in CFY_MANAGER_REQUIERMENTS:
        if requirement == 'ram_min':
            flavor_id = os_utils.get_flavor_id_by_ram_range(
                nova, CFY_MANAGER_REQUIERMENTS['ram_min'], 8196)

    if flavor_id == '':
        logger.error(
            "Failed to find %s flavor. "
            "Try with ram range default requirement !" % flavor_name)
        flavor_id = os_utils.get_flavor_id_by_ram_range(nova, 4000, 8196)

    if flavor_id == '':
        step_failure("orchestrator",
                     "Failed to find required flavor for this deployment")

    cfy.set_flavor_id(flavor_id)

    image_name = "centos_7"
    image_id = os_utils.get_image_id(glance, image_name)
    for requirement in CFY_MANAGER_REQUIERMENTS:
        if requirement == 'os_image':
            image_id = os_utils.get_image_id(
                glance, CFY_MANAGER_REQUIERMENTS['os_image'])

    if image_id == '':
        step_failure(
            "orchestrator",
            "Error : Failed to find required OS image for cloudify manager")

    cfy.set_image_id(image_id)

    ext_net = os_utils.get_external_net(neutron)
    if not ext_net:
        step_failure("orchestrator", "Failed to get external network")

    cfy.set_external_network_name(ext_net)

    ns = functest_utils.get_resolvconf_ns()
    if ns:
        cfy.set_nameservers(ns)

    logger.info("Prepare virtualenv for cloudify-cli")
    cmd = "chmod +x " + VIMS_DIR + "create_venv.sh"
    functest_utils.execute_command(cmd, logger)
    time.sleep(3)
    cmd = VIMS_DIR + "create_venv.sh " + VIMS_DATA_DIR
    functest_utils.execute_command(cmd, logger)

    cfy.download_manager_blueprint(
        CFY_MANAGER_BLUEPRINT['url'], CFY_MANAGER_BLUEPRINT['branch'])

    # ############### CLOUDIFY DEPLOYMENT ################
    start_time_ts = time.time()
    end_time_ts = start_time_ts
    logger.info("Cloudify deployment Start Time:'%s'" % (
        datetime.datetime.fromtimestamp(start_time_ts).strftime(
            '%Y-%m-%d %H:%M:%S')))

    error = cfy.deploy_manager()
    if error:
        step_failure("orchestrator", error)

    end_time_ts = time.time()
    duration = round(end_time_ts - start_time_ts, 1)
    logger.info("Cloudify deployment duration:'%s'" % duration)
    set_result("orchestrator", duration, "")

    # ############### CLEARWATER INITIALISATION ################

    cw = clearwater(CW_INPUTS, cfy, logger)

    logger.info("Collect flavor id for all clearwater vm")
    nova = nvclient.Client("2", **nv_creds)

    flavor_name = "m1.small"
    flavor_id = os_utils.get_flavor_id(nova, flavor_name)
    for requirement in CW_REQUIERMENTS:
        if requirement == 'ram_min':
            flavor_id = os_utils.get_flavor_id_by_ram_range(
                nova, CW_REQUIERMENTS['ram_min'], 8196)

    if flavor_id == '':
        logger.error(
            "Failed to find %s flavor. Try with ram range "
            "default requirement !" % flavor_name)
        flavor_id = os_utils.get_flavor_id_by_ram_range(nova, 4000, 8196)

    if flavor_id == '':
        step_failure(
            "vIMS", "Failed to find required flavor for this deployment")

    cw.set_flavor_id(flavor_id)

    image_name = "ubuntu_14.04"
    image_id = os_utils.get_image_id(glance, image_name)
    for requirement in CW_REQUIERMENTS:
        if requirement == 'os_image':
            image_id = os_utils.get_image_id(
                glance, CW_REQUIERMENTS['os_image'])

    if image_id == '':
        step_failure(
            "vIMS",
            "Error : Failed to find required OS image for cloudify manager")

    cw.set_image_id(image_id)

    ext_net = os_utils.get_external_net(neutron)
    if not ext_net:
        step_failure("vIMS", "Failed to get external network")

    cw.set_external_network_name(ext_net)

    # ############### CLEARWATER DEPLOYMENT ################

    start_time_ts = time.time()
    end_time_ts = start_time_ts
    logger.info("vIMS VNF deployment Start Time:'%s'" % (
        datetime.datetime.fromtimestamp(start_time_ts).strftime(
            '%Y-%m-%d %H:%M:%S')))

    error = cw.deploy_vnf(CW_BLUEPRINT)
    if error:
        step_failure("vIMS", error)

    end_time_ts = time.time()
    duration = round(end_time_ts - start_time_ts, 1)
    logger.info("vIMS VNF deployment duration:'%s'" % duration)
    set_result("vIMS", duration, "")

    # ############### CLEARWATER TEST ################

    test_clearwater()

    # ########## CLEARWATER UNDEPLOYMENT ############

    cw.undeploy_vnf()

    # ########### CLOUDIFY UNDEPLOYMENT #############

    cfy.undeploy_manager()

    # ############## GENERAL CLEANUP ################
    if args.noclean:
        exit(0)

    ks_creds = os_utils.get_credentials("keystone")

    keystone = ksclient.Client(**ks_creds)

    logger.info("Removing %s tenant .." % CFY_INPUTS['keystone_tenant_name'])
    tenant_id = os_utils.get_tenant_id(
        keystone, CFY_INPUTS['keystone_tenant_name'])
    if tenant_id == '':
        logger.error("Error : Failed to get id of %s tenant" %
                     CFY_INPUTS['keystone_tenant_name'])
    else:
        if not os_utils.delete_tenant(keystone, tenant_id):
            logger.error("Error : Failed to remove %s tenant" %
                         CFY_INPUTS['keystone_tenant_name'])

    logger.info("Removing %s user .." % CFY_INPUTS['keystone_username'])
    user_id = os_utils.get_user_id(
        keystone, CFY_INPUTS['keystone_username'])
    if user_id == '':
        logger.error("Error : Failed to get id of %s user" %
                     CFY_INPUTS['keystone_username'])
    else:
        if not os_utils.delete_user(keystone, user_id):
            logger.error("Error : Failed to remove %s user" %
                         CFY_INPUTS['keystone_username'])
            temp1 = "{\"value\": \"" + temp + "\"},"
            response = response + temp1
        response = response[:-1] + "]"
    response = response + ",\"button\":\"" + str(button) + "\""
    response = response + "}"
    return response


auth = v3.Password(auth_url='http://192.168.0.12:5000/v3',
                   username='******',
                   password='******',
                   project_name='admin',
                   user_domain_id='default',
                   project_domain_id='default')
sess = session.Session(auth=auth)
nova = client.Client("2.1", session=sess)
#nova.servers.create("nish",flavor="m1.tiny")
neutron = neutron_client.Client(session=sess)
#response = createJSONResponse("AZ",nova.availability_zones.list(),"msg")
#print(response)
list = nova.flavors.list()

network_list = neutron.list_networks()
netlist = []
temp_list = network_list['networks']
for i in temp_list:
    for k, v in i.iteritems():
        if str(k) == 'name':
            k1 = '<' + str(k)
            v1 = str(v) + '>'
            w = k1 + ':' + v1
Exemple #21
0
ip_info = dict()
found_ip = None

if not args.router:
    # if the user didn't specify to search only routers
    found_ip = floating_ip_search(args.ip, neutron)
    if found_ip:
        ip_info.update({
            'ip_type': 'floating ip',
            'ip_id': found_ip['id'],
            'ip_addr': found_ip['floating_ip_address']
        })
        try:
            port = neutron.show_port(found_ip['port_id'])
            server_id = port['port']['device_id']
            nova = novaclient.Client(nova_version, session=sess)
            server = nova.servers.get(server_id)
            ip_info.update({
                'device_id': server.id,
                'device_name': server.name
            })
        except NeutronNotFound:
            ip_info.update({'device_id': None, 'device_name': None})

if not args.floatingip and not found_ip:
    # if user didn't specify to search only floating IPs,
    # and the given IP is not already found
    found_ip = router_search(args.ip, neutron)
    if found_ip:
        ip_info.update({
            'ip_type': 'router',
Exemple #22
0
    def _on_project_create(self, project_id):

        LOG.warning("Beginning wmf hooks for project creation: %s" % project_id)

        roledict = self._get_role_dict()

        if CONF.wmfhooks.observer_role_name not in roledict.keys():
            LOG.error("Failed to find id for role %s" % CONF.wmfhooks.observer_role_name)
            raise exception.NotImplemented()
        if CONF.wmfhooks.admin_role_name not in roledict.keys():
            LOG.error("Failed to find id for role %s" % CONF.wmfhooks.admin_role_name)
            raise exception.NotImplemented()
        if CONF.wmfhooks.user_role_name not in roledict.keys():
            LOG.error("Failed to find id for role %s" % CONF.wmfhooks.user_role_name)
            raise exception.NotImplemented()

        self.assignment_api.add_role_to_user_and_project(CONF.wmfhooks.admin_user,
                                                         project_id,
                                                         roledict[CONF.wmfhooks.admin_role_name])
        self.assignment_api.add_role_to_user_and_project(CONF.wmfhooks.admin_user,
                                                         project_id,
                                                         roledict[CONF.wmfhooks.user_role_name])
        self.assignment_api.add_role_to_user_and_project(CONF.wmfhooks.observer_user,
                                                         project_id,
                                                         roledict[CONF.wmfhooks.observer_role_name])

        # Use the nova api to set up security groups for the new project
        auth = generic.Password(
            auth_url=CONF.wmfhooks.auth_url,
            username=CONF.wmfhooks.admin_user,
            password=CONF.wmfhooks.admin_pass,
            user_domain_name='Default',
            project_domain_name='Default',
            project_name=project_id)
        session = keystone_session.Session(auth=auth)
        client = nova_client.Client('2', session=session, connect_retries=5)
        allgroups = client.security_groups.list()
        defaultgroup = filter(lambda group: group.name == 'default', allgroups)
        if defaultgroup:
            groupid = defaultgroup[0].id
            try:
                client.security_group_rules.create(groupid,
                                                   ip_protocol='icmp',
                                                   from_port='-1',
                                                   to_port='-1',
                                                   cidr='0.0.0.0/0')
            except (exceptions.ClientException):
                LOG.warning("icmp security rule already exists.")
            try:
                client.security_group_rules.create(groupid,
                                                   ip_protocol='tcp',
                                                   from_port='22',
                                                   to_port='22',
                                                   cidr='10.0.0.0/8')
            except (exceptions.ClientException):
                LOG.warning("Port 22 security rule already exists.")
            try:
                client.security_group_rules.create(groupid,
                                                   ip_protocol='tcp',
                                                   from_port='5666',
                                                   to_port='5666',
                                                   cidr='10.0.0.0/8')
            except (exceptions.ClientException):
                LOG.warning("Port 5666 security rule already exists.")
            try:
                client.security_group_rules.create(groupid,
                                                   ip_protocol='tcp',
                                                   from_port='1',
                                                   to_port='65535',
                                                   cidr='',
                                                   group_id=groupid)
            except (exceptions.ClientException):
                LOG.warning("Project security rule for TCP already exists.")

            try:
                client.security_group_rules.create(groupid,
                                                   ip_protocol='udp',
                                                   from_port='1',
                                                   to_port='65535',
                                                   cidr='',
                                                   group_id=groupid)
            except (exceptions.ClientException):
                LOG.warning("Project security rule for UDP already exists.")

            try:
                client.security_group_rules.create(groupid,
                                                   ip_protocol='icmp',
                                                   from_port='1',
                                                   to_port='65535',
                                                   cidr='',
                                                   group_id=groupid)
            except (exceptions.ClientException):
                LOG.warning("Project security rule for ICMP already exists.")
        else:
            LOG.warning("Failed to find default security group in new project.")

        assignments = self._get_current_assignments(project_id)
        ldapgroups.sync_ldap_project_group(project_id, assignments)

        # Set up default sudoers in ldap
        ldapgroups.create_sudo_defaults(project_id)
        self._create_project_page(project_id)
Exemple #23
0
    def main(self, argv):
        # Parse args once to find version and debug settings
        parser = self.get_base_parser()
        (options, args) = parser.parse_known_args(argv)
        self.setup_debugging(options.debug)

        # build available subcommands based on version
        self.extensions = self._discover_extensions(
            options.os_compute_api_version)
        self._run_extension_hooks('__pre_parse_args__')

        # NOTE(dtroyer): Hackery to handle --endpoint_type due to argparse
        #                thinking usage-list --end is ambiguous; but it
        #                works fine with only --endpoint-type present
        #                Go figure.
        if '--endpoint_type' in argv:
            spot = argv.index('--endpoint_type')
            argv[spot] = '--endpoint-type'

        subcommand_parser = self.get_subcommand_parser(
            options.os_compute_api_version)
        self.parser = subcommand_parser

        if options.help and len(args) == 0:
            subcommand_parser.print_help()
            return 0

        args = subcommand_parser.parse_args(argv)
        self._run_extension_hooks('__post_parse_args__', args)

        # Short-circuit and deal with help right away.
        if args.func == self.do_help:
            self.do_help(args)
            return 0
        elif args.func == self.do_bash_completion:
            self.do_bash_completion(args)
            return 0

        (os_username, os_password, os_tenant_name, os_auth_url, os_region_name,
         os_auth_system, endpoint_type, insecure, service_type, service_name,
         volume_service_name, username, apikey, projectid, url, region_name,
         bypass_url,
         no_cache) = (args.os_username, args.os_password, args.os_tenant_name,
                      args.os_auth_url, args.os_region_name,
                      args.os_auth_system, args.endpoint_type, args.insecure,
                      args.service_type, args.service_name,
                      args.volume_service_name, args.username, args.apikey,
                      args.projectid, args.url, args.region_name,
                      args.bypass_url, args.no_cache)

        if not endpoint_type:
            endpoint_type = DEFAULT_NOVA_ENDPOINT_TYPE

        if not service_type:
            service_type = DEFAULT_NOVA_SERVICE_TYPE
            service_type = utils.get_service_type(args.func) or service_type

        #FIXME(usrleon): Here should be restrict for project id same as
        # for os_username or os_password but for compatibility it is not.

        if not utils.isunauthenticated(args.func):
            if not os_username:
                if not username:
                    raise exc.CommandError(
                        "You must provide a username "
                        "via either --os-username or env[OS_USERNAME]")
                else:
                    os_username = username

            if not os_password:
                if not apikey:
                    raise exc.CommandError("You must provide a password "
                                           "via either --os-password or via "
                                           "env[OS_PASSWORD]")
                else:
                    os_password = apikey

            if not os_tenant_name:
                if not projectid:
                    raise exc.CommandError("You must provide a tenant name "
                                           "via either --os-tenant-name or "
                                           "env[OS_TENANT_NAME]")
                else:
                    os_tenant_name = projectid

            if not os_auth_url:
                if not url:
                    if os_auth_system and os_auth_system != 'keystone':
                        os_auth_url = \
                            client.get_auth_system_url(os_auth_system)
                else:
                    os_auth_url = url

            if not os_auth_url:
                raise exc.CommandError(
                    "You must provide an auth url "
                    "via either --os-auth-url or env[OS_AUTH_URL] "
                    "or specify an auth_system which defines a "
                    "default url with --os-auth-system "
                    "or env[OS_AUTH_SYSTEM")

            if not os_region_name and region_name:
                os_region_name = region_name

        if (options.os_compute_api_version
                and options.os_compute_api_version != '1.0'):
            if not os_tenant_name:
                raise exc.CommandError(
                    "You must provide a tenant name "
                    "via either --os-tenant-name or env[OS_TENANT_NAME]")

            if not os_auth_url:
                raise exc.CommandError(
                    "You must provide an auth url "
                    "via either --os-auth-url or env[OS_AUTH_URL]")

        self.cs = client.Client(options.os_compute_api_version,
                                os_username,
                                os_password,
                                os_tenant_name,
                                os_auth_url,
                                insecure,
                                region_name=os_region_name,
                                endpoint_type=endpoint_type,
                                extensions=self.extensions,
                                service_type=service_type,
                                service_name=service_name,
                                auth_system=os_auth_system,
                                volume_service_name=volume_service_name,
                                timings=args.timings,
                                bypass_url=bypass_url,
                                no_cache=no_cache,
                                http_log_debug=options.debug)

        try:
            if not utils.isunauthenticated(args.func):
                self.cs.authenticate()
        except exc.Unauthorized:
            raise exc.CommandError("Invalid OpenStack Nova credentials.")
        except exc.AuthorizationFailure:
            raise exc.CommandError("Unable to authorize user")

        args.func(self.cs, args)

        if args.timings:
            self._dump_timings(self.cs.get_timings())
Exemple #24
0
def novaclient(context,
               admin_endpoint=False,
               privileged_user=False,
               timeout=None):
    """Returns a Nova client

    @param admin_endpoint: If True, use the admin endpoint template from
        configuration ('nova_endpoint_admin_template' and 'nova_catalog_info')
    @param privileged_user: If True, use the account from configuration
        (requires 'os_privileged_user_name', 'os_privileged_user_password' and
        'os_privileged_user_tenant' to be set)
    @param timeout: Number of seconds to wait for an answer before raising a
        Timeout exception (None to disable)
    """
    # FIXME: the novaclient ServiceCatalog object is mis-named.
    #        It actually contains the entire access blob.
    # Only needed parts of the service catalog are passed in, see
    # nova/context.py.
    compat_catalog = {
        'access': {
            'serviceCatalog': context.service_catalog or []
        }
    }
    sc = service_catalog.ServiceCatalog(compat_catalog)

    nova_endpoint_template = CONF.nova_endpoint_template
    nova_catalog_info = CONF.nova_catalog_info

    if admin_endpoint:
        nova_endpoint_template = CONF.nova_endpoint_admin_template
        nova_catalog_info = CONF.nova_catalog_admin_info
    service_type, service_name, endpoint_type = nova_catalog_info.split(':')

    # Extract the region if set in configuration
    if CONF.os_region_name:
        region_filter = {'attr': 'region', 'filter_value': CONF.os_region_name}
    else:
        region_filter = {}

    if privileged_user and CONF.os_privileged_user_name:
        context = ctx.RequestContext(
            CONF.os_privileged_user_name,
            None,
            auth_token=CONF.os_privileged_user_password,
            project_name=CONF.os_privileged_user_tenant,
            service_catalog=context.service_catalog)

        # When privileged_user is used, it needs to authenticate to Keystone
        # before querying Nova, so we set auth_url to the identity service
        # endpoint.
        if CONF.os_privileged_user_auth_url:
            url = CONF.os_privileged_user_auth_url
        else:
            # We then pass region_name, endpoint_type, etc. to the
            # Client() constructor so that the final endpoint is
            # chosen correctly.
            url = sc.url_for(service_type='identity',
                             endpoint_type=endpoint_type,
                             **region_filter)

        LOG.debug('Creating a Nova client using "%s" user',
                  CONF.os_privileged_user_name)
    else:
        if nova_endpoint_template:
            url = nova_endpoint_template % context.to_dict()
        else:
            url = sc.url_for(service_type=service_type,
                             service_name=service_name,
                             endpoint_type=endpoint_type,
                             **region_filter)

        LOG.debug('Nova client connection created using URL: %s', url)

    # Now that we have the correct auth_url, username, password and
    # project_name, let's build a Keystone session.
    loader = keystoneauth1.loading.get_plugin_loader('password')
    auth = loader.load_from_options(auth_url=url,
                                    username=context.user_id,
                                    password=context.auth_token,
                                    project_name=context.project_name)
    keystone_session = keystoneauth1.session.Session(auth=auth)

    c = nova_client.Client(NOVA_API_VERSION,
                           session=keystone_session,
                           insecure=CONF.nova_api_insecure,
                           timeout=timeout,
                           region_name=CONF.os_region_name,
                           endpoint_type=endpoint_type,
                           cacert=CONF.nova_ca_certificates_file,
                           extensions=nova_extensions)

    if not privileged_user:
        # noauth extracts user_id:project_id from auth_token
        c.client.auth_token = (context.auth_token or '%s:%s' %
                               (context.user_id, context.project_id))
        c.client.management_url = url
    return c
Exemple #25
0
#!/usr/bin/python
__author__ = "Bassim Aly"
__EMAIL__ = "*****@*****.**"

from keystoneauth1.identity import v3
from keystoneauth1 import session
from novaclient import client as nclient
from pprint import pprint

auth = v3.Password(auth_url="http://10.10.10.150:5000/v3",
                   username="******",
                   password="******",
                   project_name="admin",
                   user_domain_name="Default",
                   project_domain_name="Default")

sess = session.Session(auth=auth, verify=False)

nova = nclient.Client(2.1, session=sess, http_log_debug=True)
instance_flavor = nova.flavors.find(name="m1.small")
print(
    "===============================Flavor Details==============================="
)
pprint(instance_flavor)
Exemple #26
0
    def _get_nodes(self):
        os_auth_url, os_tenant_name, os_username, os_password, \
        os_user_domain_name, os_project_domain_name = \
            CredentialHelper.get_undercloud_creds()
        auth_url = os_auth_url + "v3"

        provisioning_network = NetworkHelper.get_provisioning_network()

        kwargs = {
            'os_username': os_username,
            'os_password': os_password,
            'os_auth_url': os_auth_url,
            'os_tenant_name': os_tenant_name,
            'os_user_domain_name': os_user_domain_name,
            'os_project_domain_name': os_project_domain_name
        }
        i_client = ironic_client.get_client(1, **kwargs)

        auth = v3.Password(auth_url=auth_url,
                           username=os_username,
                           password=os_password,
                           project_name=os_tenant_name,
                           user_domain_name=os_user_domain_name,
                           project_domain_name=os_project_domain_name)

        sess = session.Session(auth=auth)
        n_client = nova_client.Client(2, session=sess)

        # Build up a dictionary that maps roles to a list of IPs for that role
        self.node_roles_to_nodes = {}

        self.logger.debug("Querying ironic and nova for nodes")
        nodes = i_client.node.list(
            fields=["uuid", "instance_uuid", "properties"])
        for node in nodes:
            uuid = node.uuid
            instance_uuid = node.instance_uuid

            # Handle the case where we have a node in ironic that's not in nova
            # (possibly due to the node being in maintenance mode in ironic or
            #  the user not assigning a role to a node, etc)
            if instance_uuid is None:
                self.logger.debug("Ironic node " + uuid + " has no "
                                  "corresponding instance in nova.  Skipping")
                continue

            capabilities = node.properties["capabilities"]
            capabilities = dict(c.split(':') for c in capabilities.split(','))

            # Role is the 'profile' capability when node placement is not
            # in use. Otherwise it's encoded in the 'node' capability.
            if 'profile' in capabilities:
                role = capabilities['profile']
            elif 'node' in capabilities:
                role = capabilities['node']
                # Trim the trailing "-N" where N is the node number
                role = role[:role.rindex('-')]
            else:
                self.logger.error(
                    "Failed to determine role of node {}".format(node))
                sys.exit(1)

            server = n_client.servers.get(instance_uuid)
            for address in server.addresses["ctlplane"]:
                ip = address["addr"]
                if IPAddress(ip) in provisioning_network:
                    break

            self.logger.debug("Got node:\n"
                              "    uuid=" + uuid + "\n"
                              "    ip=" + ip + "\n"
                              "    role=" + role + "\n"
                              "    instance_uuid=" + instance_uuid)

            if role not in self.node_roles_to_nodes:
                self.node_roles_to_nodes[role] = []

            self.node_roles_to_nodes[role].append(ip)

        self.logger.debug("node_roles_to_nodes: " +
                          str(self.node_roles_to_nodes))
Exemple #27
0
    def import_instance(self, ctxt, connection_info, target_environment,
                        instance_name, export_info):
        session = keystone.create_keystone_session(ctxt, connection_info)

        glance_api_version = connection_info.get("image_api_version",
                                                 GLANCE_API_VERSION)

        nova = nova_client.Client(NOVA_API_VERSION, session=session)
        glance = glance_client.Client(glance_api_version, session=session)
        neutron = neutron_client.Client(NEUTRON_API_VERSION, session=session)
        cinder = cinder_client.Client(CINDER_API_VERSION, session=session)

        os_type = export_info.get('os_type')
        LOG.info("os_type: %s", os_type)

        glance_upload = target_environment.get(
            "glance_upload", CONF.openstack_migration_provider.glance_upload)
        target_disk_format = target_environment.get(
            "disk_format", CONF.openstack_migration_provider.disk_format)
        container_format = target_environment.get(
            "container_format",
            CONF.openstack_migration_provider.container_format)
        hypervisor_type = target_environment.get(
            "hypervisor_type",
            CONF.openstack_migration_provider.hypervisor_type)
        fip_pool_name = target_environment.get(
            "fip_pool_name", CONF.openstack_migration_provider.fip_pool_name)
        network_map = target_environment.get("network_map", {})
        keypair_name = target_environment.get("keypair_name")

        migr_image_name = target_environment.get(
            "migr_image_name",
            target_environment.get("migr_image_name_map", {}).get(
                os_type,
                CONF.openstack_migration_provider.migr_image_name_map.get(
                    os_type)))
        migr_flavor_name = target_environment.get(
            "migr_flavor_name",
            CONF.openstack_migration_provider.migr_flavor_name)

        migr_fip_pool_name = target_environment.get(
            "migr_fip_pool_name", fip_pool_name
            or CONF.openstack_migration_provider.fip_pool_name)
        migr_network_name = target_environment.get(
            "migr_network_name",
            CONF.openstack_migration_provider.migr_network_name)

        flavor_name = target_environment.get("flavor_name", migr_flavor_name)

        if not migr_image_name:
            raise exception.MigrationToolException(
                "No matching migration image type found")

        LOG.info("Migration image name: %s", migr_image_name)

        if not migr_network_name:
            if len(network_map) != 1:
                raise exception.MigrationToolException(
                    'If "migr_network_name" is not provided, "network_map" '
                    'must contain exactly one entry')
            migr_network_name = network_map.values()[0]

        disks_info = export_info["devices"]["disks"]

        images = []
        volumes = []
        ports = []

        try:
            if glance_upload:
                for disk_info in disks_info:
                    disk_path = disk_info["path"]
                    disk_file_info = utils.get_disk_info(disk_path)

                    # if target_disk_format == disk_file_info["format"]:
                    #    target_disk_path = disk_path
                    # else:
                    #    target_disk_path = (
                    #        "%s.%s" % (os.path.splitext(disk_path)[0],
                    #                   target_disk_format))
                    #    utils.convert_disk_format(disk_path, target_disk_path,
                    #                              target_disk_format)

                    self._event_manager.progress_update(
                        "Uploading Glance image")

                    disk_format = disk_file_info["format"]
                    image = self._create_image(glance, self._get_unique_name(),
                                               disk_path, disk_format,
                                               container_format,
                                               hypervisor_type)
                    images.append(image)

                    self._event_manager.progress_update(
                        "Waiting for Glance image to become active")
                    self._wait_for_image(nova, image)

                    virtual_disk_size = disk_file_info["virtual-size"]
                    if disk_format != constants.DISK_FORMAT_RAW:
                        virtual_disk_size += DISK_HEADER_SIZE

                    self._event_manager.progress_update(
                        "Creating Cinder volume")

                    volume_size_gb = math.ceil(virtual_disk_size / units.Gi)
                    volume = nova.volumes.create(
                        size=volume_size_gb,
                        display_name=self._get_unique_name(),
                        imageRef=image.id)
                    volumes.append(volume)

            migr_resources = self._deploy_migration_resources(
                nova, glance, neutron, os_type, migr_image_name,
                migr_flavor_name, migr_network_name, migr_fip_pool_name)

            nics_info = export_info["devices"].get("nics", [])

            try:
                for i, volume in enumerate(volumes):
                    self._wait_for_volume(nova, volume, 'available')

                    self._event_manager.progress_update(
                        "Attaching volume to worker instance")

                    self._attach_volume(nova, migr_resources.get_instance(),
                                        volume)

                    conn_info = migr_resources.get_guest_connection_info()

                osmorphing_hv_type = self._get_osmorphing_hypervisor_type(
                    hypervisor_type)

                self._event_manager.progress_update(
                    "Preparing instance for target platform")
                osmorphing_manager.morph_image(conn_info, os_type,
                                               osmorphing_hv_type,
                                               constants.PLATFORM_OPENSTACK,
                                               nics_info, self._event_manager)
            finally:
                self._event_manager.progress_update(
                    "Removing worker instance resources")
                migr_resources.delete()

            self._event_manager.progress_update("Renaming volumes")

            for i, volume in enumerate(volumes):
                new_volume_name = "%s %s" % (instance_name, i + 1)
                cinder.volumes.update(volume.id, name=new_volume_name)

            for nic_info in nics_info:
                self._event_manager.progress_update(
                    "Creating Neutron port for migrated instance")

                origin_network_name = nic_info.get("network_name")
                if not origin_network_name:
                    self._warn(
                        "Origin network name not provided for for nic: "
                        "%s, skipping", nic_info.get("name"))
                    continue

                network_name = network_map.get(origin_network_name)
                if not network_name:
                    raise exception.MigrationToolException(
                        "Network not mapped in network_map: %s" %
                        origin_network_name)

                ports.append(
                    self._create_neutron_port(neutron, network_name,
                                              nic_info.get("mac_address")))

            self._event_manager.progress_update("Creating migrated instance")

            self._create_target_instance(nova, flavor_name, instance_name,
                                         keypair_name, ports, volumes)
        except Exception:
            self._event_manager.progress_update("Deleting volumes")
            for volume in volumes:

                @utils.ignore_exceptions
                @utils.retry_on_error()
                def _del_volume():
                    volume.delete()

                _del_volume()
            self._event_manager.progress_update("Deleting Neutron ports")
            for port in ports:

                @utils.ignore_exceptions
                @utils.retry_on_error()
                def _del_port():
                    neutron.delete_port(port["id"])

                _del_port()
            raise
        finally:
            self._event_manager.progress_update("Deleting Glance images")
            for image in images:

                @utils.ignore_exceptions
                @utils.retry_on_error()
                def _del_image():
                    image.delete()

                _del_image()
Exemple #28
0
import os
import sys
import yaml

settings = [
    'OS_API_VER', 'OS_USERNAME', 'OS_PASSWORD', 'OS_TENANT_NAME', 'OS_AUTH_URL',
    'INFLUX_HOST', 'INFLUX_PORT', 'INFLUX_USERNAME', 'INFLUX_PASSWORD', 'INFLUX_DB'
]

for setting in settings:
    if setting not in os.environ:
        print('Missing environment variable %s\n' % setting)
        sys.exit(1)

try:
    with client.Client(os.environ['OS_API_VER'], os.environ['OS_USERNAME'], os.environ['OS_PASSWORD'], os.environ['OS_TENANT_NAME'], os.environ['OS_AUTH_URL']) as nova:
        hypervisors = nova.hypervisors.list(detailed=True)
except Exception as e:
    sys.stderr.write('Failed getting hypervisor details: %s\n' % e)
    sys.exit(1)

now = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')

metrics = {}
metrics['vcpus'] = {}
metrics['vcpus']['TOTAL'] = 0
metrics['vcpus_used'] = {}
metrics['vcpus_used']['TOTAL'] = 0
metrics['memory_mb'] = {}
metrics['memory_mb']['TOTAL'] = 0
metrics['memory_mb_used'] = {}
import glanceclient.v2.client as glclient
import novaclient.client as novaclient
import neutronclient.v2_0.client as netclient
import cinderclient.client as cinclient

username = '******'
password = '******'
tenant_name = 'tenant-zzh'
auth_url = 'https://identity.az1.dc1.fusionsphere.com:443/identity/v2.0'
tenant_id = ''
glance_endpoint = ''

image_id = '2756a49f-46bf-4445-9fd0-32b20ed7e22f'


def prn_obj(obj):
    print ', '.join(['%s:%s' % item for item in obj.__dict__.items()])


if __name__ == '__main__':
    nova_client = novaclient.Client(version='2',
                                    username=username,
                                    api_key=password,
                                    auth_url=auth_url,
                                    project_id=tenant_name,
                                    insecure=True)

    a = nova_client.images.delete(image_id)

    print type(a)
Exemple #30
0
def get_flavors_vms_and_nodes(conf):
    """Returns information about flavors and VMs in the source cloud.

    Return format:
    ({
        <VM ID>: {
            "id": <VM ID>,
            "flavor": <VM flavor>,
            "host": <host VM is running on>,
        }
    },
    {
        <flavor ID>: {
            "fl_id": <flavor ID>,
            "core": <number of cores for flavor>,
            "name": <flavor name>,
            "ram": <amount of RAM required for flavor>,
            "ephemeral": <amount of ephemeral storage required for flavor>,
            "swap": <swap space needed for flavor>
        }
    },
    {
        <hostname>: {
            'core': <number of cores/CPUs>,
            'ram': <amount of RAM>,
            'core_ratio': <CPU allocation ratio>,
            'ram_ratio': <RAM allocation ratio>,
        }
    })"""
    src = conf['src']
    username = src['user']
    password = src['password']
    tenant = src['tenant']
    auth_url = src['auth_url']

    dst_comp = conf['dst_compute']
    core_ratio = dst_comp['cpu_allocation_ratio']
    ram_ratio = dst_comp['ram_allocation_ratio']

    cli = client.Client(2, username, password, tenant, auth_url)
    servers = cli.servers.list(search_opts={"all_tenants": True})
    nova_flavors = cli.flavors.list()

    flavors = {
        i.id: {
            "fl_id": i.id,
            "core": i.vcpus,
            "name": i.name,
            "ram": i.ram,
            "ephemeral": i.ephemeral,
            "swap": i.swap
        }
        for i in nova_flavors
    }

    hypervisors = {}

    down_hosts = set([
        service.host for service in cli.services.findall(binary='nova-compute',
                                                         state='down')
    ])

    def vm_host_is_up(vm):
        host_is_up = (getattr(vm, nova_compute.INSTANCE_HOST_ATTRIBUTE)
                      not in down_hosts)
        if not host_is_up:
            LOG.warning("VM '%s' is running on a down host! Skipping.", vm.id)

        return host_is_up

    def vm_is_in_valid_state(vm):
        return vm.status in nova_compute.ALLOWED_VM_STATUSES

    vms = {
        vm.id: {
            "id": vm.id,
            "flavor": vm.flavor.get("id"),
            "host": getattr(vm, nova_compute.INSTANCE_HOST_ATTRIBUTE)
        }
        for vm in servers if vm_host_is_up(vm) and vm_is_in_valid_state(vm)
    }

    for hypervisor in cli.hypervisors.list():
        host = hypervisor.hypervisor_hostname
        if host not in down_hosts:
            hypervisors[host] = {
                'core': hypervisor.vcpus,
                'ram': hypervisor.memory_mb,
                'core_ratio': core_ratio,
                'ram_ratio': ram_ratio
            }

    return flavors, vms, hypervisors