def setUp(self): super(BaseFuelAgentCITest, self).setUp() with open(FUEL_AGENT_CI_ENVIRONMENT_FILE) as f: ENV_DATA = (yaml.load(f.read())) self.env = environment.Environment.new(**ENV_DATA) self.env.start() self.name = ENV_DATA['vm'][0]['name'] repo_obj = self.env.repo_by_name(FUEL_AGENT_REPO_NAME) tgz_name = '%s.tar.gz' % repo_obj.name utils.execute('tar czf %s %s' % (tgz_name, os.path.join(self.env.envdir, repo_obj.path))) self.env.ssh_by_name(self.name).wait() self.env.ssh_by_name(self.name).put_file( tgz_name, os.path.join('/tmp', tgz_name)) self.env.ssh_by_name(self.name).run( 'tar xf %s' % os.path.join('/tmp', tgz_name), command_timeout=SSH_COMMAND_TIMEOUT) self.env.ssh_by_name(self.name).run( 'pip install setuptools --upgrade', command_timeout=SSH_COMMAND_TIMEOUT) self.env.ssh_by_name(self.name).run( 'cd /root/var/tmp/fuel_agent_ci/fuel_agent/fuel_agent; ' #FIXME(agordeev): ^ don't hardcode path 'python setup.py install', command_timeout=SSH_COMMAND_TIMEOUT) self.http_obj = self.env.http_by_name(FUEL_AGENT_HTTP_NAME) self.dhcp_hosts = self.env.dhcp_by_name(FUEL_AGENT_DHCP_NAME).hosts self.net = self.env.net_by_name(FUEL_AGENT_NET_NAME) p_data = get_filled_provision_data(self.dhcp_hosts[0]['ip'], self.dhcp_hosts[0]['mac'], self.net.ip, self.http_obj.port) self.env.ssh_by_name(self.name).put_content( json.dumps(p_data), os.path.join('/tmp', 'provision.json')) self.mgr = fa_manager.Manager(p_data)
def _test_copyimage(self, profile): #NOTE(agordeev): update provision.json with proper image specs provision_data = json.loads(self.render_template( template_data={ 'IP': self.dhcp_hosts[0]['ip'], 'MAC': self.dhcp_hosts[0]['mac'], 'MASTER_IP': self.net.ip, 'MASTER_HTTP_PORT': self.http.port, 'PROFILE': profile }, template_name='provision.json' )) self.ssh.put_content(json.dumps(provision_data), '/tmp/provision.json') #NOTE(agordeev): disks should be partitioned before applying the image self.ssh.run('partition') self.ssh.run('copyimage') #NOTE(agordeev): size and checksum needed for checking deployed image local_img_path = os.path.join( self.env.envdir, self.http.http_root, profile + '.img.gz') expected_md5 = str(utils.execute( 'gunzip -cd %s | md5sum' % local_img_path)).split()[0] img_size = int(str(utils.execute( 'gzip -ql %s' % local_img_path)).split()[1]) / 2 ** 20 #NOTE(agordeev): the partition can be bigger than actual size of image # so calculating checksum of rewritten partition part # assuming that image has size in MB w/o fractional part actual_md5 = self.ssh.run( 'dd if=%s bs=1M count=%s | md5sum' % ('/dev/mapper/os-root', img_size)).split()[0] self.assertEqual(expected_md5, actual_md5)
def _test_copyimage(self, profile): #NOTE(agordeev): update provision.json with proper image specs p_data = base.get_filled_provision_data(self.dhcp_hosts[0]['ip'], self.dhcp_hosts[0]['mac'], self.net.ip, self.http_obj.port, profile) self.env.ssh_by_name(self.name).put_content( json.dumps(p_data), os.path.join('/tmp', 'provision.json')) #NOTE(agordeev): disks should be partitioned before applying the image self.env.ssh_by_name(self.name).run( 'partition', command_timeout=base.SSH_COMMAND_TIMEOUT) self.env.ssh_by_name(self.name).run( 'copyimage', command_timeout=base.SSH_COMMAND_TIMEOUT) #NOTE(agordeev): size and checksum needed for checking deployed image local_img_path = os.path.join(self.env.envdir, self.http_obj.http_root, profile, profile + '.img.gz') md5sum_output = utils.execute( 'gunzip -cd %s | md5sum' % local_img_path) img_size_output = utils.execute('gzip -ql %s' % local_img_path) img_size = int(img_size_output[1].split()[1]) / 2 ** 20 expected_md5 = md5sum_output[1].split()[0] #NOTE(agordeev): the partition can be bigger than actual size of image # so calculating checksum of rewritten partition part # assuming that image has size in MB w/o fractional part md5sum_metadata_output = self.env.ssh_by_name(self.name).run( 'dd if=%s bs=1M count=%s | md5sum' % (TARGET_DEVICE, img_size), command_timeout=base.SSH_COMMAND_TIMEOUT) actual_md5 = md5sum_metadata_output.split()[0] self.assertEqual(expected_md5, actual_md5)
def _upgrade_fuel_agent(self): """This method is to be deprecated when artifact based build system is ready. """ src_dir = os.path.join(self.env.envdir, self.repo.path, 'fuel_agent') package_name = 'fuel-agent-0.1.0.tar.gz' # Building fuel-agent pip package utils.execute('python setup.py sdist', cwd=src_dir) # Putting fuel-agent pip package on a node self.ssh.put_file( os.path.join(src_dir, 'dist', package_name), os.path.join('/tmp', package_name)) # Installing fuel_agent pip package self.ssh.run('pip install --upgrade %s' % os.path.join('/tmp', package_name)) # Copying fuel_agent templates self.ssh.run('mkdir -p %s' % self.FUEL_AGENT_TEMPLATE_PATH) for f in os.listdir( os.path.join(src_dir, 'cloud-init-templates')): if f.endswith('.jinja2'): self.ssh.put_file( os.path.join(src_dir, 'cloud-init-templates', f), os.path.join(self.FUEL_AGENT_TEMPLATE_PATH, f)) self.ssh.put_file( os.path.join(src_dir, 'etc/fuel-agent/fuel-agent.conf.sample'), '/etc/fuel-agent/fuel-agent.conf')
def artifact_get(artifact): with open(os.path.join(artifact.env.envdir, artifact.path), 'wb') as f: for chunk in requests.get(artifact.url, stream=True).iter_content(1048576): f.write(chunk) f.flush() utils.execute(artifact.unpack, cwd=artifact.env.envdir)
def _test_copyimage(self, profile): #NOTE(agordeev): update provision.json with proper image specs provision_data = json.loads( self.render_template(template_data={ 'IP': self.dhcp_hosts[0]['ip'], 'MAC': self.dhcp_hosts[0]['mac'], 'MASTER_IP': self.net.ip, 'MASTER_HTTP_PORT': self.http.port, 'PROFILE': profile }, template_name='provision.json')) self.ssh.put_content(json.dumps(provision_data), '/tmp/provision.json') #NOTE(agordeev): disks should be partitioned before applying the image self.ssh.run('partition') self.ssh.run('copyimage') #NOTE(agordeev): size and checksum needed for checking deployed image local_img_path = os.path.join(self.env.envdir, self.http.http_root, profile + '.img.gz') expected_md5 = str( utils.execute('gunzip -cd %s | md5sum' % local_img_path)).split()[0] img_size = int( str(utils.execute( 'gzip -ql %s' % local_img_path)).split()[1]) / 2**20 #NOTE(agordeev): the partition can be bigger than actual size of image # so calculating checksum of rewritten partition part # assuming that image has size in MB w/o fractional part actual_md5 = self.ssh.run('dd if=%s bs=1M count=%s | md5sum' % ('/dev/mapper/os-root', img_size)).split()[0] self.assertEqual(expected_md5, actual_md5)
def artifact_get(artifact): with open(os.path.join(artifact.env.envdir, artifact.path), 'wb') as f: for chunk in requests.get( artifact.url, stream=True).iter_content(1048576): f.write(chunk) f.flush() utils.execute(artifact.unpack, cwd=artifact.env.envdir)
def _test_configdrive(self, profile): def _get_md5sum(file_path, size=-1): md5 = None with open(file_path) as f: md5 = hashlib.md5(f.read(size)).hexdigest() return md5 self._build_configdrive(profile) self.ssh.run('configdrive') self.ssh.get_file('/tmp/config-drive.img', '/tmp/actual-config-drive.img') # checking configdrive file system type fs_type = utils.execute( 'blkid -o value -s TYPE /tmp/actual-config-drive.img') self.assertEqual('iso9660', str(fs_type).strip()) # checking configdrive label label_output = utils.execute( 'blkid -o value -s LABEL /tmp/actual-config-drive.img') self.assertEqual('cidata', str(label_output).strip()) # mounting configdrive to check its content utils.execute('mkdir -p /tmp/cfgdrv') utils.execute('sudo mount -o loop ' '/tmp/actual-config-drive.img /tmp/cfgdrv') #NOTE(agordeev): mime boundary should be the same in both files, # since boundary is always randomly generated, # thus magic prevents from checksum differencies with open('/tmp/user-data') as f: expected_boundary = f.read().split('\n')[0].split('"')[1] actual_boundary = str( utils.execute('head -n1 /tmp/cfgdrv/user-data')).split('"')[1] actual_md5_userdata = str( utils.execute('sed -e s/%s/%s/ %s | md5sum' % (actual_boundary, expected_boundary, '/tmp/cfgdrv/user-data'))).split()[0] actual_md5_metadata = str( utils.execute('md5sum /tmp/cfgdrv/meta-data')).split()[0] # getting reference md5 for user-data and meta-data md5_userdata = _get_md5sum('/tmp/user-data') md5_metadata = _get_md5sum('/tmp/meta-data') self.assertEqual(md5_userdata, actual_md5_userdata) self.assertEqual(md5_metadata, actual_md5_metadata)
def _test_configdrive(self, profile): def _get_md5sum(file_path, size=-1): md5 = None with open(file_path) as f: md5 = hashlib.md5(f.read(size)).hexdigest() return md5 self._build_configdrive(profile) self.ssh.run('configdrive') self.ssh.get_file('/tmp/config-drive.img', '/tmp/actual-config-drive.img') # checking configdrive file system type fs_type = utils.execute( 'blkid -o value -s TYPE /tmp/actual-config-drive.img') self.assertEqual('iso9660', str(fs_type).strip()) # checking configdrive label label_output = utils.execute( 'blkid -o value -s LABEL /tmp/actual-config-drive.img') self.assertEqual('cidata', str(label_output).strip()) # mounting configdrive to check its content utils.execute('mkdir -p /tmp/cfgdrv') utils.execute('sudo mount -o loop ' '/tmp/actual-config-drive.img /tmp/cfgdrv') #NOTE(agordeev): mime boundary should be the same in both files, # since boundary is always randomly generated, # thus magic prevents from checksum differencies with open('/tmp/user-data') as f: expected_boundary = f.read().split('\n')[0].split('"')[1] actual_boundary = str(utils.execute( 'head -n1 /tmp/cfgdrv/user-data')).split('"')[1] actual_md5_userdata = str(utils.execute( 'sed -e s/%s/%s/ %s | md5sum' % (actual_boundary, expected_boundary, '/tmp/cfgdrv/user-data'))).split()[0] actual_md5_metadata = str(utils.execute( 'md5sum /tmp/cfgdrv/meta-data')).split()[0] # getting reference md5 for user-data and meta-data md5_userdata = _get_md5sum('/tmp/user-data') md5_metadata = _get_md5sum('/tmp/meta-data') self.assertEqual(md5_userdata, actual_md5_userdata) self.assertEqual(md5_metadata, actual_md5_metadata)
def artifact_clean(artifact): utils.execute(artifact.clean, cwd=artifact.env.envdir)
def repo_clean(repo): utils.execute('rm -rf %s' % os.path.join(repo.env.envdir, repo.path))
def _build_configdrive(self, profile): data = json.loads( self.render_template(template_name='provision.json', template_data={ 'IP': self.dhcp_hosts[0]['ip'], 'MAC': self.dhcp_hosts[0]['mac'], 'MASTER_IP': self.net.ip, 'MASTER_HTTP_PORT': self.http.port, 'PROFILE': profile, })) self.ssh.put_content(json.dumps(data), '/tmp/provision.json') admin_interface = filter( lambda x: (x['mac_address'] == data['kernel_options'][ 'netcfg/choose_interface']), [ dict(name=name, **spec) for name, spec in data['interfaces'].iteritems() ])[0] with open('/tmp/boothook.txt', 'wb') as f: f.write(self.render_template( template_name='boothook_%s.jinja2' % profile.split('_')[0], template_data={ 'MASTER_IP': data['ks_meta']['master_ip'], 'ADMIN_MAC': \ data['kernel_options']['netcfg/choose_interface'], 'UDEVRULES': data['kernel_options']['udevrules'] } )) with open('/tmp/cloud_config.txt', 'wb') as f: f.write(self.render_template( template_name='cloud_config_%s.jinja2' % profile.split('_')[0], template_data={ 'SSH_AUTH_KEY': data['ks_meta']['auth_key'], 'TIMEZONE': data['ks_meta']['timezone'], 'HOSTNAME': data['hostname'], 'FQDN': data['hostname'], 'NAME_SERVERS': data['name_servers'], 'SEARCH_DOMAIN': data['name_servers_search'], 'MASTER_IP': data['ks_meta']['master_ip'], 'MASTER_URL': \ 'http://%s:8000/api' % data['ks_meta']['master_ip'], # FIXME(kozhukalov): # 'KS_REPOS': IS NOT SET YET, 'MCO_PSKEY': data['ks_meta']['mco_pskey'], 'MCO_CONNECTOR': data['ks_meta']['mco_connector'], 'MCO_VHOST': data['ks_meta']['mco_vhost'], 'MCO_HOST': data['ks_meta']['mco_host'], # 'MCO_PORT': IS NOT SET, DEFAULT IS USED 'MCO_USER': data['ks_meta']['mco_user'], 'MCO_PASSWORD': data['ks_meta']['mco_password'], 'PUPPET_MASTER': data['ks_meta']['puppet_master'] } )) with open('/tmp/meta-data', 'wb') as f: f.write( self.render_template(template_name='meta-data_%s.jinja2' % profile.split('_')[0], template_data={ 'ADMIN_IFACE_NAME': admin_interface['name'], 'ADMIN_IP': admin_interface['ip_address'], 'ADMIN_MASK': admin_interface['netmask'], 'HOSTNAME': data['hostname'] })) # write-mime-multipart is provided by cloud-utils package utils.execute('write-mime-multipart --output=/tmp/user-data ' '/tmp/boothook.txt:text/cloud-boothook ' '/tmp/cloud_config.txt:text/cloud-config')
def tearDown(self): utils.execute('sudo umount -f /tmp/cfgdrv') utils.execute('rm /tmp/actual-config-drive.img ' '/tmp/user-data /tmp/meta-data ' '/tmp/cloud_config.txt /tmp/boothook.txt') super(TestConfigDrive, self).tearDown()
def _build_configdrive(self, profile): data = json.loads(self.render_template( template_name='provision.json', template_data={ 'IP': self.dhcp_hosts[0]['ip'], 'MAC': self.dhcp_hosts[0]['mac'], 'MASTER_IP': self.net.ip, 'MASTER_HTTP_PORT': self.http.port, 'PROFILE': profile, } )) self.ssh.put_content(json.dumps(data), '/tmp/provision.json') admin_interface = filter( lambda x: (x['mac_address'] == data['kernel_options']['netcfg/choose_interface']), [dict(name=name, **spec) for name, spec in data['interfaces'].iteritems()])[0] with open('/tmp/boothook.txt', 'wb') as f: f.write(self.render_template( template_name='boothook_%s.jinja2' % profile.split('_')[0], template_data={ 'MASTER_IP': data['ks_meta']['master_ip'], 'ADMIN_MAC': \ data['kernel_options']['netcfg/choose_interface'], 'UDEVRULES': data['kernel_options']['udevrules'] } )) with open('/tmp/cloud_config.txt', 'wb') as f: f.write(self.render_template( template_name='cloud_config_%s.jinja2' % profile.split('_')[0], template_data={ 'SSH_AUTH_KEY': data['ks_meta']['auth_key'], 'TIMEZONE': data['ks_meta']['timezone'], 'HOSTNAME': data['hostname'], 'FQDN': data['hostname'], 'NAME_SERVERS': data['name_servers'], 'SEARCH_DOMAIN': data['name_servers_search'], 'MASTER_IP': data['ks_meta']['master_ip'], 'MASTER_URL': \ 'http://%s:8000/api' % data['ks_meta']['master_ip'], # FIXME(kozhukalov): # 'KS_REPOS': IS NOT SET YET, 'MCO_PSKEY': data['ks_meta']['mco_pskey'], 'MCO_CONNECTOR': data['ks_meta']['mco_connector'], 'MCO_VHOST': data['ks_meta']['mco_vhost'], 'MCO_HOST': data['ks_meta']['mco_host'], # 'MCO_PORT': IS NOT SET, DEFAULT IS USED 'MCO_USER': data['ks_meta']['mco_user'], 'MCO_PASSWORD': data['ks_meta']['mco_password'], 'PUPPET_MASTER': data['ks_meta']['puppet_master'] } )) with open('/tmp/meta-data', 'wb') as f: f.write(self.render_template( template_name='meta-data_%s.jinja2' % profile.split('_')[0], template_data={ 'ADMIN_IFACE_NAME': admin_interface['name'], 'ADMIN_IP': admin_interface['ip_address'], 'ADMIN_MASK': admin_interface['netmask'], 'HOSTNAME': data['hostname'] } )) # write-mime-multipart is provided by cloud-utils package utils.execute('write-mime-multipart --output=/tmp/user-data ' '/tmp/boothook.txt:text/cloud-boothook ' '/tmp/cloud_config.txt:text/cloud-config')