Beispiel #1
0
 def launch_stack(self, name, template_path, parameters):
     with open(template_path) as f:
         template = f.read()
         stack = self.heat.stacks.create(**{
             'stack_name': name,
             'template': template,
             'parameters': parameters
         })['stack']
         get_the_stack = lambda: self.heat.stacks.get(stack['id'])
         utils.wait_until(
             lambda: get_the_stack().status == 'CREATE_COMPLETE')
         stack = get_the_stack()
         return (stack, {o['output_key']: o['output_value']
                         for o in stack.outputs})
Beispiel #2
0
 def launch_stack(self, name, template_path, parameters):
     with open(template_path) as f:
         template = f.read()
         stack = self.heat.stacks.create(**{
             'stack_name': name,
             'template': template,
             'parameters': parameters
         })['stack']
         get_the_stack = lambda: self.heat.stacks.get(stack['id'])
         utils.wait_until(
             lambda: get_the_stack().status == 'CREATE_COMPLETE')
         stack = get_the_stack()
         return (stack, {
             o['output_key']: o['output_value']
             for o in stack.outputs
         })
    def setUpClass(cls):
        # Fabric's environment variables
        env.disable_known_hosts = True
        env.abort_exception = Exception
        env.key_filename = os.path.join(WORKSPACE, 'id_rsa')
        env.user = '******'

        # Parameters
        ID = int(time.time())
        USER_DATA_YAML = 'files/2-role/user-data.yaml'
        LIBVIRT_IMGS = '/var/lib/libvirt/images'
        UBUNTU_CLOUD_IMG = os.path.join(
            WORKSPACE, 'devstack-trusty.template.openstack.org.qcow')
        TITANIUM_IMG = os.path.join(WORKSPACE, 'titanium.qcow')
        DISK_SIZE = 20
        ADMIN_NET = cls.get_free_subnet('192.168.0.0/16', 24)
        MGMT_NET = IPNetwork('192.168.254.0/24')
        cls.TITANIUM = 'titanium-{0}'.format(ID)
        cls.BRIDGE_MGMT = 'br{0}-m'.format(ID)
        cls.BRIDGE1 = 'br{0}-1'.format(ID)
        cls.BRIDGE2 = 'br{0}-2'.format(ID)
        cls.ADMIN_NAME = 'admin-{0}'.format(ID)
        cls.MGMT_NAME = 'mgmt-{0}'.format(ID)

        VirtualMachine = namedtuple('VirtualMachine',
                                    ['ip', 'mac', 'port', 'name'])
        cls.VMs = {
            'control': VirtualMachine(ip=str(ADMIN_NET[2]),
                                      mac=cls.rand_mac(),
                                      port='2/1',
                                      name='control-{0}'.format(ID),),
            'compute': VirtualMachine(ip=str(ADMIN_NET[3]),
                                      mac=cls.rand_mac(),
                                      port='2/2',
                                      name='compute-{0}'.format(ID))}

        ubuntu_img_path = os.path.join(LIBVIRT_IMGS,
                                       'ubuntu-cloud{0}.qcow'.format(ID))
        local('sudo qemu-img convert -O qcow2 {source} {dest}'.format(
            source=UBUNTU_CLOUD_IMG, dest=ubuntu_img_path))

        # Create admin network
        admin_net_xml = os.path.join(PARENT_FOLDER_PATH,
                                     'files/2-role/admin-net.xml')
        with open(admin_net_xml) as f:
            tmpl = f.read().format(
                name=cls.ADMIN_NAME, ip=ADMIN_NET[1],
                ip_start=ADMIN_NET[2], ip_end=ADMIN_NET[254],
                control_servers_mac=cls.VMs['control'].mac,
                control_servers_ip=cls.VMs['control'].ip,
                compute_servers_mac=cls.VMs['compute'].mac,
                compute_servers_ip=cls.VMs['compute'].ip)
            tmpl_path = '/tmp/admin-net{0}.xml'.format(ID)
            with open(tmpl_path, 'w') as o:
                o.write(tmpl)
            local('sudo virsh net-define {file}'.format(file=tmpl_path))
            local('sudo virsh net-autostart {0}'.format(cls.ADMIN_NAME))
            local('sudo virsh net-start {0}'.format(cls.ADMIN_NAME))

        # Create bridges
        for br in (cls.BRIDGE1, cls.BRIDGE2, cls.BRIDGE_MGMT):
            local('sudo brctl addbr {0}'.format(br))
            local('sudo ip link set dev {0} up'.format(br))

        # Create control-server
        control_server_disk = os.path.join(LIBVIRT_IMGS,
                                           'control{0}.qcow'.format(ID))
        control_conf_disk = os.path.join(LIBVIRT_IMGS,
                                         'control-config{0}.qcow'.format(ID))
        local('sudo qemu-img create -f qcow2 -b {s} {d} {size}G'.format(
            s=ubuntu_img_path, d=control_server_disk, size=DISK_SIZE))
        local('sudo cloud-localds {d} {user_data}'.format(
            d=control_conf_disk, user_data=USER_DATA_YAML))

        cntrl_server_xml = os.path.join(PARENT_FOLDER_PATH,
                                        'files/2-role/control-server.xml')
        with open(cntrl_server_xml) as f:
            tmpl = f.read().format(
                name=cls.VMs['control'].name,
                admin_net_name=cls.ADMIN_NAME,
                bridge_mgmt=cls.BRIDGE_MGMT,
                disk=control_server_disk, disk_config=control_conf_disk,
                admin_mac=cls.VMs['control'].mac, bridge=cls.BRIDGE1)
            tmpl_path = '/tmp/control-server{0}.xml'.format(ID)
            with open(tmpl_path, 'w') as o:
                o.write(tmpl)
            local('sudo virsh define {s}'.format(s=tmpl_path))
            local('sudo virsh start {0}'.format(cls.VMs['control'].name))

        # Create compute-server
        compute_server_disk = os.path.join(LIBVIRT_IMGS,
                                           'compute{0}.qcow'.format(ID))
        compute_conf_disk = os.path.join(LIBVIRT_IMGS,
                                         'compute-config{0}.qcow'.format(ID))
        local('sudo qemu-img create -f qcow2 -b {s} {d} {size}G'.format(
            s=ubuntu_img_path, d=compute_server_disk, size=DISK_SIZE))
        local('sudo cloud-localds {d} {user_data}'.format(
            d=compute_conf_disk, user_data=USER_DATA_YAML))

        compute_server_xml = os.path.join(PARENT_FOLDER_PATH,
                                          'files/2-role/compute-server.xml')
        with open(compute_server_xml) as f:
            tmpl = f.read().format(
                name=cls.VMs['compute'].name,
                admin_net_name=cls.ADMIN_NAME,
                disk=compute_server_disk, disk_config=compute_conf_disk,
                admin_mac=cls.VMs['compute'].mac, bridge=cls.BRIDGE2)
            tmpl_path = '/tmp/compute-server{0}.xml'.format(ID)
            with open(tmpl_path, 'w') as o:
                o.write(tmpl)
            local('sudo virsh define {s}'.format(s=tmpl_path))
            local('sudo virsh start {0}'.format(cls.VMs['compute'].name))

        # Create Titanium VM
        titanium_disk = os.path.join(LIBVIRT_IMGS,
                                     'titanium{0}.qcow'.format(ID))
        local('sudo cp {source} {dest}'.format(
            source=TITANIUM_IMG, dest=titanium_disk))
        titanium_xml = os.path.join(PARENT_FOLDER_PATH,
                                    'files/2-role/titanium.xml')
        with open(titanium_xml) as f:
            tmpl = f.read().format(
                name=cls.TITANIUM,
                bridge_mgmt=cls.BRIDGE_MGMT,
                disk=titanium_disk,
                bridge1=cls.BRIDGE1, bridge2=cls.BRIDGE2)
            tmpl_path = '/tmp/titanium{0}.xml'.format(ID)
            with open(tmpl_path, 'w') as o:
                o.write(tmpl)
            local('sudo virsh define {s}'.format(s=tmpl_path))
            local('sudo virsh start {0}'.format(cls.TITANIUM))

        hosts_ptrn = '{ip} {hostname}.slave.openstack.org {hostname}\n'
        hosts = hosts_ptrn.format(ip=cls.VMs['control'].ip,
                                  hostname='control-server')
        hosts += hosts_ptrn.format(ip=cls.VMs['compute'].ip,
                                   hostname='compute-server')
        for vm in cls.VMs.itervalues():
            with settings(host_string=vm.ip):
                with settings(warn_only=True):
                    vm_ready = lambda: not run('ls').failed
                    if not wait_until(vm_ready, timeout=60 * 5):
                        raise Exception('VM {0} failed'.format(vm.name))

                # hostname
                hostname = StringIO.StringIO()
                hostname.write(vm.name)
                put(hostname, '/etc/hostname', use_sudo=True)
                sudo('hostname {0}'.format(vm.name))

                # hosts
                append('/etc/hosts', hosts, use_sudo=True)

                # configure eth1. Used for tenant networks. Bridged to
                # certain titanium interface
                eth1_cfg = StringIO.StringIO()
                eth1_cfg.writelines([
                    'auto eth1\n',
                    'iface eth1 inet manual\n',
                    '\tup ifconfig $IFACE 0.0.0.0 up\n',
                    '\tup ip link set $IFACE promisc on\n',
                    '\tdown ifconfig $IFACE 0.0.0.0 down'])
                put(eth1_cfg, '/etc/network/interfaces.d/eth1.cfg',
                    use_sudo=True)
                sudo('ifup eth1')

                sudo('ip link set dev eth0 mtu 1450')

        with settings(host_string=cls.VMs['control'].ip):
            # Configure eth2. Used to connect to Titanium mgmt interface
            eth2_cfg = StringIO.StringIO()
            eth2_cfg.writelines([
                'auto eth2\n',
                'iface eth2 inet static\n',
                '\taddress {0}\n'.format(MGMT_NET[2]),
                '\tnetmask {0}\n'.format(MGMT_NET.netmask),
                '\tgateway {0}'.format(MGMT_NET[1])])
            put(eth2_cfg, '/etc/network/interfaces.d/eth2.cfg',
                use_sudo=True)
            sudo('ifup eth2')

            # Wait for Titanium VM
            with settings(warn_only=True):
                nexus_ready = lambda: \
                    not run('ping -c 1 {ip}'.format(ip=NEXUS_IP)).failed
                if not wait_until(nexus_ready, timeout=60 * 5):
                    raise Exception('Titanium VM is not online')

            # Add titanium public key to known_hosts
            run('ssh-keyscan -t rsa {ip} >> '
                '~/.ssh/known_hosts'.format(ip=NEXUS_IP))
    def setUpClass(cls):
        # Fabric's environment variables
        env.disable_known_hosts = True
        env.abort_exception = Exception
        env.key_filename = os.path.join(WORKSPACE, 'id_rsa')
        env.user = '******'

        # Parameters
        ID = int(time.time())
        USER_DATA_YAML = 'files/2-role/user-data.yaml'
        LIBVIRT_IMGS = '/var/lib/libvirt/images'
        UBUNTU_CLOUD_IMG = os.path.join(
            WORKSPACE, 'devstack-trusty.template.openstack.org.qcow')
        TITANIUM_IMG = os.path.join(WORKSPACE, 'titanium.qcow')
        DISK_SIZE = 20
        ADMIN_NET = cls.get_free_subnet('192.168.0.0/16', 24)
        MGMT_NET = IPNetwork('192.168.254.0/24')
        cls.TITANIUM = 'titanium-{0}'.format(ID)
        cls.BRIDGE_MGMT = 'br{0}-m'.format(ID)
        cls.BRIDGE1 = 'br{0}-1'.format(ID)
        cls.BRIDGE2 = 'br{0}-2'.format(ID)
        cls.ADMIN_NAME = 'admin-{0}'.format(ID)
        cls.MGMT_NAME = 'mgmt-{0}'.format(ID)

        VirtualMachine = namedtuple('VirtualMachine',
                                    ['ip', 'mac', 'port', 'name'])
        cls.VMs = {
            'control':
            VirtualMachine(
                ip=str(ADMIN_NET[2]),
                mac=cls.rand_mac(),
                port='2/1',
                name='control-{0}'.format(ID),
            ),
            'compute':
            VirtualMachine(ip=str(ADMIN_NET[3]),
                           mac=cls.rand_mac(),
                           port='2/2',
                           name='compute-{0}'.format(ID))
        }

        ubuntu_img_path = os.path.join(LIBVIRT_IMGS,
                                       'ubuntu-cloud{0}.qcow'.format(ID))
        local('sudo qemu-img convert -O qcow2 {source} {dest}'.format(
            source=UBUNTU_CLOUD_IMG, dest=ubuntu_img_path))

        # Create admin network
        admin_net_xml = os.path.join(PARENT_FOLDER_PATH,
                                     'files/2-role/admin-net.xml')
        with open(admin_net_xml) as f:
            tmpl = f.read().format(name=cls.ADMIN_NAME,
                                   ip=ADMIN_NET[1],
                                   ip_start=ADMIN_NET[2],
                                   ip_end=ADMIN_NET[254],
                                   control_servers_mac=cls.VMs['control'].mac,
                                   control_servers_ip=cls.VMs['control'].ip,
                                   compute_servers_mac=cls.VMs['compute'].mac,
                                   compute_servers_ip=cls.VMs['compute'].ip)
            tmpl_path = '/tmp/admin-net{0}.xml'.format(ID)
            with open(tmpl_path, 'w') as o:
                o.write(tmpl)
            local('sudo virsh net-define {file}'.format(file=tmpl_path))
            local('sudo virsh net-autostart {0}'.format(cls.ADMIN_NAME))
            local('sudo virsh net-start {0}'.format(cls.ADMIN_NAME))

        # Create bridges
        for br in (cls.BRIDGE1, cls.BRIDGE2, cls.BRIDGE_MGMT):
            local('sudo brctl addbr {0}'.format(br))
            local('sudo ip link set dev {0} up'.format(br))

        # Create control-server
        control_server_disk = os.path.join(LIBVIRT_IMGS,
                                           'control{0}.qcow'.format(ID))
        control_conf_disk = os.path.join(LIBVIRT_IMGS,
                                         'control-config{0}.qcow'.format(ID))
        local('sudo qemu-img create -f qcow2 -b {s} {d} {size}G'.format(
            s=ubuntu_img_path, d=control_server_disk, size=DISK_SIZE))
        local('sudo cloud-localds {d} {user_data}'.format(
            d=control_conf_disk, user_data=USER_DATA_YAML))

        cntrl_server_xml = os.path.join(PARENT_FOLDER_PATH,
                                        'files/2-role/control-server.xml')
        with open(cntrl_server_xml) as f:
            tmpl = f.read().format(name=cls.VMs['control'].name,
                                   admin_net_name=cls.ADMIN_NAME,
                                   bridge_mgmt=cls.BRIDGE_MGMT,
                                   disk=control_server_disk,
                                   disk_config=control_conf_disk,
                                   admin_mac=cls.VMs['control'].mac,
                                   bridge=cls.BRIDGE1)
            tmpl_path = '/tmp/control-server{0}.xml'.format(ID)
            with open(tmpl_path, 'w') as o:
                o.write(tmpl)
            local('sudo virsh define {s}'.format(s=tmpl_path))
            local('sudo virsh start {0}'.format(cls.VMs['control'].name))

        # Create compute-server
        compute_server_disk = os.path.join(LIBVIRT_IMGS,
                                           'compute{0}.qcow'.format(ID))
        compute_conf_disk = os.path.join(LIBVIRT_IMGS,
                                         'compute-config{0}.qcow'.format(ID))
        local('sudo qemu-img create -f qcow2 -b {s} {d} {size}G'.format(
            s=ubuntu_img_path, d=compute_server_disk, size=DISK_SIZE))
        local('sudo cloud-localds {d} {user_data}'.format(
            d=compute_conf_disk, user_data=USER_DATA_YAML))

        compute_server_xml = os.path.join(PARENT_FOLDER_PATH,
                                          'files/2-role/compute-server.xml')
        with open(compute_server_xml) as f:
            tmpl = f.read().format(name=cls.VMs['compute'].name,
                                   admin_net_name=cls.ADMIN_NAME,
                                   disk=compute_server_disk,
                                   disk_config=compute_conf_disk,
                                   admin_mac=cls.VMs['compute'].mac,
                                   bridge=cls.BRIDGE2)
            tmpl_path = '/tmp/compute-server{0}.xml'.format(ID)
            with open(tmpl_path, 'w') as o:
                o.write(tmpl)
            local('sudo virsh define {s}'.format(s=tmpl_path))
            local('sudo virsh start {0}'.format(cls.VMs['compute'].name))

        # Create Titanium VM
        titanium_disk = os.path.join(LIBVIRT_IMGS,
                                     'titanium{0}.qcow'.format(ID))
        local('sudo cp {source} {dest}'.format(source=TITANIUM_IMG,
                                               dest=titanium_disk))
        titanium_xml = os.path.join(PARENT_FOLDER_PATH,
                                    'files/2-role/titanium.xml')
        with open(titanium_xml) as f:
            tmpl = f.read().format(name=cls.TITANIUM,
                                   bridge_mgmt=cls.BRIDGE_MGMT,
                                   disk=titanium_disk,
                                   bridge1=cls.BRIDGE1,
                                   bridge2=cls.BRIDGE2)
            tmpl_path = '/tmp/titanium{0}.xml'.format(ID)
            with open(tmpl_path, 'w') as o:
                o.write(tmpl)
            local('sudo virsh define {s}'.format(s=tmpl_path))
            local('sudo virsh start {0}'.format(cls.TITANIUM))

        hosts_ptrn = '{ip} {hostname}.slave.openstack.org {hostname}\n'
        hosts = hosts_ptrn.format(ip=cls.VMs['control'].ip,
                                  hostname='control-server')
        hosts += hosts_ptrn.format(ip=cls.VMs['compute'].ip,
                                   hostname='compute-server')
        for vm in cls.VMs.itervalues():
            with settings(host_string=vm.ip):
                with settings(warn_only=True):
                    vm_ready = lambda: not run('ls').failed
                    if not wait_until(vm_ready, timeout=60 * 5):
                        raise Exception('VM {0} failed'.format(vm.name))

                # hostname
                hostname = StringIO.StringIO()
                hostname.write(vm.name)
                put(hostname, '/etc/hostname', use_sudo=True)
                sudo('hostname {0}'.format(vm.name))

                # hosts
                append('/etc/hosts', hosts, use_sudo=True)

                # configure eth1. Used for tenant networks. Bridged to
                # certain titanium interface
                eth1_cfg = StringIO.StringIO()
                eth1_cfg.writelines([
                    'auto eth1\n', 'iface eth1 inet manual\n',
                    '\tup ifconfig $IFACE 0.0.0.0 up\n',
                    '\tup ip link set $IFACE promisc on\n',
                    '\tdown ifconfig $IFACE 0.0.0.0 down'
                ])
                put(eth1_cfg,
                    '/etc/network/interfaces.d/eth1.cfg',
                    use_sudo=True)
                sudo('ifup eth1')

                sudo('ip link set dev eth0 mtu 1450')

        with settings(host_string=cls.VMs['control'].ip):
            # Configure eth2. Used to connect to Titanium mgmt interface
            eth2_cfg = StringIO.StringIO()
            eth2_cfg.writelines([
                'auto eth2\n', 'iface eth2 inet static\n',
                '\taddress {0}\n'.format(MGMT_NET[2]),
                '\tnetmask {0}\n'.format(MGMT_NET.netmask),
                '\tgateway {0}'.format(MGMT_NET[1])
            ])
            put(eth2_cfg, '/etc/network/interfaces.d/eth2.cfg', use_sudo=True)
            sudo('ifup eth2')

            # Wait for Titanium VM
            with settings(warn_only=True):
                nexus_ready = lambda: \
                    not run('ping -c 1 {ip}'.format(ip=NEXUS_IP)).failed
                if not wait_until(nexus_ready, timeout=60 * 5):
                    raise Exception('Titanium VM is not online')

            # Add titanium public key to known_hosts
            run('ssh-keyscan -t rsa {ip} >> '
                '~/.ssh/known_hosts'.format(ip=NEXUS_IP))