def create_if_vm(ctx, machine_name, _downburst=None): """ Use downburst to create a virtual machine :param _downburst: Only used for unit testing. """ if _downburst: status_info = _downburst.status else: status_info = teuthology.lock.query.get_status(machine_name) shortname = decanonicalize_hostname(machine_name) machine_type = status_info['machine_type'] os_type = get_distro(ctx) os_version = get_distro_version(ctx) if not teuthology.lock.query.is_vm(status=status_info): return False if machine_type in cloud.get_types(): return cloud.get_provisioner( machine_type, shortname, os_type, os_version, conf=getattr(ctx, 'config', dict()), ).create() has_config = hasattr(ctx, 'config') and ctx.config is not None if has_config and 'downburst' in ctx.config: log.warning('Usage of a custom downburst config has been deprecated.') dbrst = _downburst or \ downburst.Downburst(name=machine_name, os_type=os_type, os_version=os_version, status=status_info, logfile=_logfile(ctx, shortname)) return dbrst.create()
def need_to_install(ctx, role, version): """ Check to see if we need to install a kernel. Get the version of the currently running kernel, and compare it against the value passed in. :param ctx: Context :param role: Role :param version: value to compare against (used in checking), can be either a utsrelease string (e.g. '3.13.0-rc3-ceph-00049-ge2817b3') or a sha1. """ ret = True log.info('Checking kernel version of {role}, want "{ver}"...'.format( role=role, ver=version)) uname_fp = StringIO() ctx.cluster.only(role).run( args=[ 'uname', '-r', ], stdout=uname_fp, ) cur_version = uname_fp.getvalue().rstrip('\n') log.debug('current kernel version is {ver} vs {want}'.format(ver=cur_version, want=version)) if '.' in str(version): if cur_version == version: log.debug('utsrelease strings match, do not need to install') ret = False os_type = teuthology.get_distro(ctx) log.debug("Distro of this test job: {}".format(os_type)) if os_type in ['sle', 'opensuse']: cur_version_match = re.search('(.*)-default$', cur_version) if cur_version_match: cur_version_rp = cur_version_match.group(1) if cur_version_rp in version: log.debug('"{}" is a substring of "{}" - the latest {} kernel is running' .format(cur_version_rp, version, os_type)) ret = False else: log.debug('failed to parse current kernel version {} (os_type is "{}")' .format(cur_version, os_type)) else: # version is sha1, need to try to extract sha1 from cur_version match = re.search('[-_]g([0-9a-f]{6,40})', cur_version) if match: cur_sha1 = match.group(1) log.debug('extracting sha1, {ver} -> {sha1}'.format( ver=cur_version, sha1=cur_sha1)) m = min(len(cur_sha1), len(version)) assert m >= 6, "cur_sha1 and/or version is too short, m = %d" % m if cur_sha1[0:m] == version[0:m]: log.debug('extracted sha1 matches, do not need to install') ret = False else: log.debug('failed to parse current kernel version') uname_fp.close() return ret
def create_if_vm(ctx, machine_name): status_info = ls.get_status(ctx, machine_name) phys_host = status_info["vpshost"] if not phys_host: return False from teuthology.misc import get_distro os_type = get_distro(ctx) default_os_version = dict( ubuntu="12.04", fedora="18", centos="6.4", opensuse="12.2", sles="11-sp2", rhel="6.3", debian="6.0" ) createMe = decanonicalize_hostname(machine_name) with tempfile.NamedTemporaryFile() as tmp: try: lcnfg = ctx.config["downburst"] except (KeyError, AttributeError): lcnfg = {} distro = lcnfg.get("distro", os_type.lower()) try: distroversion = ctx.config.get("os_version", default_os_version[distro]) except AttributeError: distroversion = default_os_version[distro] file_info = {} file_info["disk-size"] = lcnfg.get("disk-size", "30G") file_info["ram"] = lcnfg.get("ram", "1.9G") file_info["cpus"] = lcnfg.get("cpus", 1) file_info["networks"] = lcnfg.get("networks", [{"source": "front", "mac": status_info["mac"]}]) file_info["distro"] = distro file_info["distroversion"] = distroversion file_info["additional-disks"] = lcnfg.get("additional-disks", 3) file_info["additional-disks-size"] = lcnfg.get("additional-disks-size", "200G") file_info["arch"] = lcnfg.get("arch", "x86_64") file_out = {"downburst": file_info} yaml.safe_dump(file_out, tmp) metadata = "--meta-data=%s" % tmp.name dbrst = _get_downburst_exec() if not dbrst: log.error("No downburst executable found.") return False p = subprocess.Popen( [dbrst, "-c", phys_host, "create", metadata, createMe], stdout=subprocess.PIPE, stderr=subprocess.PIPE ) owt, err = p.communicate() if err: log.info("Downburst completed on %s: %s" % (machine_name, err)) else: log.info("%s created: %s" % (machine_name, owt)) # If the guest already exists first destroy then re-create: if "exists" in err: log.info("Guest files exist. Re-creating guest: %s" % (machine_name)) destroy_if_vm(ctx, machine_name) create_if_vm(ctx, machine_name) return True
def create_if_vm(ctx, machine_name): status_info = ls.get_status(ctx, machine_name) phys_host = status_info['vpshost'] if not phys_host: return False os_type = get_distro(ctx) os_version = get_distro_version(ctx) createMe = decanonicalize_hostname(machine_name) with tempfile.NamedTemporaryFile() as tmp: try: lcnfg = ctx.config['downburst'] except (KeyError, AttributeError): lcnfg = {} distro = lcnfg.get('distro', os_type.lower()) distroversion = lcnfg.get('distroversion', os_version) file_info = {} file_info['disk-size'] = lcnfg.get('disk-size', '30G') file_info['ram'] = lcnfg.get('ram', '1.9G') file_info['cpus'] = lcnfg.get('cpus', 1) file_info['networks'] = lcnfg.get('networks', [{'source': 'front', 'mac': status_info['mac']}]) file_info['distro'] = distro file_info['distroversion'] = distroversion file_info['additional-disks'] = lcnfg.get( 'additional-disks', 3) file_info['additional-disks-size'] = lcnfg.get( 'additional-disks-size', '200G') file_info['arch'] = lcnfg.get('arch', 'x86_64') file_out = {'downburst': file_info} yaml.safe_dump(file_out, tmp) metadata = "--meta-data=%s" % tmp.name dbrst = _get_downburst_exec() if not dbrst: log.error("No downburst executable found.") return False p = subprocess.Popen([dbrst, '-c', phys_host, 'create', metadata, createMe], stdout=subprocess.PIPE, stderr=subprocess.PIPE,) owt, err = p.communicate() if err: log.info("Downburst completed on %s: %s" % (machine_name, err)) else: log.info("%s created: %s" % (machine_name, owt)) # If the guest already exists first destroy then re-create: if 'exists' in err: log.info("Guest files exist. Re-creating guest: %s" % (machine_name)) destroy_if_vm(ctx, machine_name) create_if_vm(ctx, machine_name) return True
def create_if_vm(ctx, machine_name): status_info = ls.get_status(ctx, machine_name) phys_host = status_info['vpshost'] if not phys_host: return False os_type = get_distro(ctx) os_version = get_distro_version(ctx) createMe = decanonicalize_hostname(machine_name) with tempfile.NamedTemporaryFile() as tmp: if hasattr(ctx, 'config') and ctx.config is not None: lcnfg = ctx.config.get('downburst', dict()) else: lcnfg = {} distro = lcnfg.get('distro', os_type.lower()) distroversion = lcnfg.get('distroversion', os_version) file_info = {} file_info['disk-size'] = lcnfg.get('disk-size', '100G') file_info['ram'] = lcnfg.get('ram', '1.9G') file_info['cpus'] = lcnfg.get('cpus', 1) file_info['networks'] = lcnfg.get('networks', [{'source': 'front', 'mac': status_info['mac']}]) file_info['distro'] = distro file_info['distroversion'] = distroversion file_info['additional-disks'] = lcnfg.get( 'additional-disks', 3) file_info['additional-disks-size'] = lcnfg.get( 'additional-disks-size', '200G') file_info['arch'] = lcnfg.get('arch', 'x86_64') file_out = {'downburst': file_info} yaml.safe_dump(file_out, tmp) metadata = "--meta-data=%s" % tmp.name dbrst = _get_downburst_exec() if not dbrst: log.error("No downburst executable found.") return False p = subprocess.Popen([dbrst, '-c', phys_host, 'create', metadata, createMe], stdout=subprocess.PIPE, stderr=subprocess.PIPE,) owt, err = p.communicate() if err: log.info("Downburst completed on %s: %s" % (machine_name, err)) else: log.info("%s created: %s" % (machine_name, owt)) # If the guest already exists first destroy then re-create: if 'exists' in err: log.info("Guest files exist. Re-creating guest: %s" % (machine_name)) destroy_if_vm(ctx, machine_name) create_if_vm(ctx, machine_name) return True
def reimage(ctx, machine_name, machine_type): os_type = get_distro(ctx) os_version = get_distro_version(ctx) pelagos_types = pelagos.get_types() fog_types = fog.get_types() if machine_type in pelagos_types and machine_type in fog_types: raise Exception('machine_type can be used with one provisioner only') elif machine_type in pelagos_types: obj = pelagos.Pelagos(machine_name, os_type, os_version) elif machine_type in fog_types: obj = fog.FOG(machine_name, os_type, os_version) else: raise Exception("The machine_type '%s' is not known to any " "of configured provisioners" % machine_type) return obj.create()
def kubeadm_install(ctx, config): version = config.get('version', '1.21') os_type = teuthology.get_distro(ctx) os_version = teuthology.get_distro_version(ctx) try: if os_type in ['centos', 'rhel']: os = f"CentOS_{os_version.split('.')[0]}" log.info('Installing cri-o') run.wait( ctx.cluster.run( args=[ 'sudo', 'curl', '-L', '-o', '/etc/yum.repos.d/devel:kubic:libcontainers:stable.repo', f'https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/{os}/devel:kubic:libcontainers:stable.repo', run.Raw('&&'), 'sudo', 'curl', '-L', '-o', f'/etc/yum.repos.d/devel:kubic:libcontainers:stable:cri-o:{version}.repo', f'https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/{version}/{os}/devel:kubic:libcontainers:stable:cri-o:{version}.repo', run.Raw('&&'), 'sudo', 'dnf', 'install', '-y', 'cri-o', ], wait=False, ) ) log.info('Installing kube{adm,ctl,let}') repo = """[kubernetes] name=Kubernetes baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-$basearch enabled=1 gpgcheck=1 repo_gpgcheck=1 gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg """ for remote in ctx.cluster.remotes.keys(): remote.write_file( '/etc/yum.repos.d/kubernetes.repo', repo, sudo=True, ) run.wait( ctx.cluster.run( args=[ 'sudo', 'dnf', 'install', '-y', 'kubelet', 'kubeadm', 'kubectl', 'iproute-tc', 'bridge-utils', ], wait=False, ) ) # fix cni config for remote in ctx.cluster.remotes.keys(): conf = """# from https://github.com/cri-o/cri-o/blob/master/tutorials/kubernetes.md#flannel-network { "name": "crio", "type": "flannel" } """ remote.write_file('/etc/cni/net.d/10-crio-flannel.conf', conf, sudo=True) remote.run(args=[ 'sudo', 'rm', '-f', '/etc/cni/net.d/87-podman-bridge.conflist', '/etc/cni/net.d/100-crio-bridge.conf', ]) # start crio run.wait( ctx.cluster.run( args=[ 'sudo', 'systemctl', 'daemon-reload', run.Raw('&&'), 'sudo', 'systemctl', 'enable', 'crio', '--now', ], wait=False, ) ) elif os_type == 'ubuntu': os = f"xUbuntu_{os_version}" log.info('Installing kube{adm,ctl,let}') run.wait( ctx.cluster.run( args=[ 'sudo', 'apt', 'update', run.Raw('&&'), 'sudo', 'apt', 'install', '-y', 'apt-transport-https', 'ca-certificates', 'curl', run.Raw('&&'), 'sudo', 'curl', '-fsSLo', '/usr/share/keyrings/kubernetes-archive-keyring.gpg', 'https://packages.cloud.google.com/apt/doc/apt-key.gpg', run.Raw('&&'), 'echo', 'deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main', run.Raw('|'), 'sudo', 'tee', '/etc/apt/sources.list.d/kubernetes.list', run.Raw('&&'), 'sudo', 'apt', 'update', run.Raw('&&'), 'sudo', 'apt', 'install', '-y', 'kubelet', 'kubeadm', 'kubectl', 'bridge-utils', ], wait=False, ) ) else: raise RuntimeError(f'unsupported distro {os_type} for cri-o') run.wait( ctx.cluster.run( args=[ 'sudo', 'systemctl', 'enable', '--now', 'kubelet', run.Raw('&&'), 'sudo', 'kubeadm', 'config', 'images', 'pull', ], wait=False, ) ) yield finally: if config.get('uninstall', True): log.info('Uninstalling kube{adm,let,ctl}') if os_type in ['centos', 'rhel']: run.wait( ctx.cluster.run( args=[ 'sudo', 'rm', '-f', '/etc/yum.repos.d/kubernetes.repo', run.Raw('&&'), 'sudo', 'dnf', 'remove', '-y', 'kubeadm', 'kubelet', 'kubectl', 'cri-o', ], wait=False ) ) elif os_type == 'ubuntu' and False: run.wait( ctx.cluster.run( args=[ 'sudo', 'rm', '-f', '/etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list', f'/etc/apt/sources.list.d/devel:kubic:libcontainers:stable:cri-o:{version}.list', '/etc/apt/trusted.gpg.d/libcontainers-cri-o.gpg', run.Raw('&&'), 'sudo', 'apt', 'remove', '-y', 'kkubeadm', 'kubelet', 'kubectl', 'cri-o', 'cri-o-runc', ], wait=False, ) )
def rook_operator(ctx, config): cluster_name = config['cluster'] rook_branch = config.get('rook_branch', 'master') rook_git_url = config.get('rook_git_url', 'https://github.com/rook/rook') log.info(f'Cloning {rook_git_url} branch {rook_branch}') ctx.rook[cluster_name].remote.run(args=[ 'rm', '-rf', 'rook', run.Raw('&&'), 'git', 'clone', '--single-branch', '--branch', rook_branch, rook_git_url, 'rook', ]) # operator.yaml operator_yaml = ctx.rook[cluster_name].remote.read_file( 'rook/cluster/examples/kubernetes/ceph/operator.yaml') rook_image = config.get('rook_image') if rook_image: log.info(f'Patching operator to use image {rook_image}') crs = list(yaml.load_all(operator_yaml, Loader=yaml.FullLoader)) assert len(crs) == 2 crs[1]['spec']['template']['spec']['containers'][0][ 'image'] = rook_image operator_yaml = yaml.dump_all(crs) ctx.rook[cluster_name].remote.write_file('operator.yaml', operator_yaml) op_job = None try: log.info('Deploying operator') _kubectl(ctx, config, [ 'create', '-f', 'rook/cluster/examples/kubernetes/ceph/crds.yaml', '-f', 'rook/cluster/examples/kubernetes/ceph/common.yaml', '-f', 'operator.yaml', ]) # on centos: if teuthology.get_distro(ctx) == 'centos': _kubectl(ctx, config, [ '-n', 'rook-ceph', 'set', 'env', 'deploy/rook-ceph-operator', 'ROOK_HOSTPATH_REQUIRES_PRIVILEGED=true' ]) # wait for operator op_name = None with safe_while(sleep=10, tries=90, action="wait for operator") as proceed: while not op_name and proceed(): p = _kubectl( ctx, config, [ '-n', 'rook-ceph', 'get', 'pods', '-l', 'app=rook-ceph-operator' ], stdout=BytesIO(), ) for line in p.stdout.getvalue().decode( 'utf-8').strip().splitlines(): name, ready, status, _ = line.split(None, 3) if status == 'Running': op_name = name break # log operator output op_job = _kubectl( ctx, config, ['-n', 'rook-ceph', 'logs', '-f', op_name], wait=False, logger=log.getChild('operator'), ) yield except Exception as e: log.exception(e) raise finally: log.info('Cleaning up rook operator') _kubectl(ctx, config, [ 'delete', '-f', 'operator.yaml', ]) if False: # don't bother since we'll tear down k8s anyway (and this mysteriously # fails sometimes when deleting some of the CRDs... not sure why!) _kubectl(ctx, config, [ 'delete', '-f', 'rook/cluster/examples/kubernetes/ceph/common.yaml', ]) _kubectl(ctx, config, [ 'delete', '-f', 'rook/cluster/examples/kubernetes/ceph/crds.yaml', ]) ctx.rook[cluster_name].remote.run( args=['rm', '-rf', 'rook', 'operator.yaml']) if op_job: op_job.wait() run.wait(ctx.cluster.run(args=['sudo', 'rm', '-rf', '/var/lib/rook']))
def task(ctx, config): """ Build Ceph packages. This task will automagically be run before the task that need to install packages (this is taken care of by the internal teuthology task). The config should be as follows: buildpackages: good_machine: disk: 40 # GB ram: 48000 # MB cpus: 16 min_machine: disk: 40 # GB ram: 8000 # MB cpus: 1 example: tasks: - buildpackages: good_machine: disk: 40 # GB ram: 15000 # MB cpus: 16 min_machine: disk: 40 # GB ram: 8000 # MB cpus: 1 - install: When a buildpackages task is already included, the values it contains can be overriden with: overrides: buildpackages: good_machine: disk: 20 # GB ram: 2000 # MB cpus: 2 min_machine: disk: 10 # GB ram: 1000 # MB cpus: 1 """ log.info('Beginning buildpackages...') if config is None: config = {} assert isinstance(config, dict), \ 'task only accepts a dict for config not ' + str(config) overrides = ctx.config.get('overrides', {}) misc.deep_merge(config, overrides.get('buildpackages', {})) d = os.path.join(os.path.dirname(__file__), 'buildpackages') os_type = misc.get_distro(ctx) os_version = misc.get_distro_version(ctx) arch = ctx.config.get('arch', OpenStack().get_default_arch()) dist = LocalGitbuilderProject()._get_distro(distro=os_type, version=os_version) pkg_type = get_pkg_type(os_type) misc.sh( "flock --close /tmp/buildpackages " + "make -C " + d + " " + os.environ['HOME'] + "/.ssh_agent") for (flavor, tag, branch, sha1) in lookup_configs(ctx, ctx.config): if tag: sha1 = get_sha1(tag) elif branch: sha1 = get_sha1(branch) log.info("building flavor = " + flavor + "," + " tag = " + tag + "," + " branch = " + branch + "," + " sha1 = " + sha1) target = ('ceph-' + pkg_type + '-' + dist + '-' + arch + '-' + flavor + '-' + sha1) openstack = OpenStack() openstack.set_provider() if openstack.provider == 'ovh': select = '^(vps|hg)-.*ssd' else: select = '' network = openstack.net() if network != "": network = " OPENSTACK_NETWORK='" + network + "' " openstack.image(os_type, os_version, arch) # create if it does not exist build_flavor = openstack.flavor_range( config['min_machine'], config['good_machine'], arch, select) default_arch = openstack.get_default_arch() http_flavor = openstack.flavor({ 'disk': 30, # GB 'ram': 1024, # MB 'cpus': 1, }, default_arch, select) lock = "/tmp/buildpackages-" + sha1 + "-" + os_type + "-" + os_version cmd = (". " + os.environ['HOME'] + "/.ssh_agent ; " + " flock --close " + lock + " make -C " + d + network + " CEPH_GIT_URL=" + teuth_config.get_ceph_git_url() + " CEPH_PKG_TYPE=" + pkg_type + " CEPH_OS_TYPE=" + os_type + " CEPH_OS_VERSION=" + os_version + " CEPH_DIST=" + dist + " CEPH_ARCH=" + arch + " CEPH_SHA1=" + sha1 + " CEPH_TAG=" + tag + " CEPH_BRANCH=" + branch + " CEPH_FLAVOR=" + flavor + " BUILD_FLAVOR=" + build_flavor + " HTTP_FLAVOR=" + http_flavor + " HTTP_ARCH=" + default_arch + " " + target + " ") log.info("buildpackages: " + cmd) misc.sh(cmd) teuth_config.gitbuilder_host = openstack.get_ip('packages-repository', '') log.info('Finished buildpackages')
def task(ctx, config): """ Build Ceph packages. This task will automagically be run before the task that need to install packages (this is taken care of by the internal teuthology task). The config should be as follows: buildpackages: good_machine: disk: 40 # GB ram: 48000 # MB cpus: 16 min_machine: disk: 40 # GB ram: 8000 # MB cpus: 1 example: tasks: - buildpackages: good_machine: disk: 40 # GB ram: 15000 # MB cpus: 16 min_machine: disk: 40 # GB ram: 8000 # MB cpus: 1 - install: When a buildpackages task is already included, the values it contains can be overriden with: overrides: buildpackages: good_machine: disk: 20 # GB ram: 2000 # MB cpus: 2 min_machine: disk: 10 # GB ram: 1000 # MB cpus: 1 """ log.info('Beginning buildpackages...') if config is None: config = {} assert isinstance(config, dict), \ 'task only accepts a dict for config not ' + str(config) overrides = ctx.config.get('overrides', {}) misc.deep_merge(config, overrides.get('buildpackages', {})) d = os.path.join(os.path.dirname(__file__), 'buildpackages') os_type = misc.get_distro(ctx) os_version = misc.get_distro_version(ctx) arch = ctx.config.get('arch', OpenStack().get_default_arch()) dist = LocalGitbuilderProject()._get_distro(distro=os_type, version=os_version) pkg_type = get_pkg_type(os_type) misc.sh("flock --close /tmp/buildpackages " + "make -C " + d + " " + os.environ['HOME'] + "/.ssh_agent") for (flavor, tag, branch, sha1) in lookup_configs(ctx, ctx.config): if tag: sha1 = get_sha1(tag) elif branch: sha1 = get_sha1(branch) log.info("building flavor = " + flavor + "," + " tag = " + tag + "," + " branch = " + branch + "," + " sha1 = " + sha1) self_name = 'teuthology' key_name = 'teuthology' pkg_repo = 'packages-repository' security_group = 'teuthology' if teuth_config.openstack.has_key('selfname'): self_name = teuth_config.openstack['selfname'] if teuth_config.openstack.has_key('keypair'): key_name = teuth_config.openstack['keypair'] if teuth_config.openstack.has_key('package_repo'): pkg_repo = teuth_config.openstack['package_repo'] if teuth_config.openstack.has_key('server_group'): security_group = teuth_config.openstack['server_group'] target = (self_name + '-ceph-' + pkg_type + '-' + dist + '-' + arch + '-' + flavor + '-' + sha1) openstack = OpenStack() openstack.set_provider() network = openstack.net() if network != "": network = " OPENSTACK_NETWORK='" + network + "' " openstack.image(os_type, os_version, arch) # create if it does not exist build_flavor = openstack.flavor_range(config['min_machine'], config['good_machine'], arch) default_arch = openstack.get_default_arch() http_flavor = openstack.flavor( { 'disk': 30, # GB 'ram': 1024, # MB 'cpus': 1, }, default_arch) lock = "/tmp/buildpackages-" + sha1 + "-" + os_type + "-" + os_version cmd = (". " + os.environ['HOME'] + "/.ssh_agent ; " + " flock --close " + lock + " make -C " + d + network + " SELFNAME=" + self_name + " KEY_NAME=" + key_name + " PKG_REPO=" + pkg_repo + " SEC_GROUP=" + security_group + " CEPH_GIT_URL=" + teuth_config.get_ceph_git_url() + " CEPH_PKG_TYPE=" + pkg_type + " CEPH_OS_TYPE=" + os_type + " CEPH_OS_VERSION=" + os_version + " CEPH_DIST=" + dist + " CEPH_ARCH=" + arch + " CEPH_SHA1=" + sha1 + " CEPH_TAG=" + tag + " CEPH_BRANCH=" + branch + " CEPH_FLAVOR=" + flavor + " BUILD_FLAVOR=" + build_flavor + " HTTP_FLAVOR=" + http_flavor + " HTTP_ARCH=" + default_arch + " BUILDPACKAGES_CANONICAL_TAGS=" + ("true" if teuth_config.canonical_tags else "false") + " " + target + " ") log.info("Executing the following make command to build {} packages. " \ "Note that some values in the command, like CEPH_GIT_URL " \ "and BUILDPACKAGES_CANONICAL_TAGS, may differ from similar " \ "command-line parameter values. This is because " \ "the values used by this task are taken from the teuthology " \ "configuration file. If in doubt, tear down your teuthology " \ "instance and start again from scratch.".format(pkg_type)) log.info("buildpackages make command: " + cmd) misc.sh(cmd) teuth_config.gitbuilder_host = openstack.get_ip(pkg_repo, '') log.info('Finished buildpackages')
def test_argument(self): # we don't want fake_ctx to have a config self.fake_ctx = Mock() self.fake_ctx.os_type = 'centos' distro = get_distro(self.fake_ctx) assert distro == 'centos'
def reimage(ctx, machine_name): os_type = get_distro(ctx) os_version = get_distro_version(ctx) fog_obj = fog.FOG(machine_name, os_type, os_version) return fog_obj.create()
def test_teuth_config(self): self.fake_ctx.config = {'os_type': 'fedora'} distro = get_distro(self.fake_ctx) assert distro == 'fedora'
def test_argument_takes_precedence(self): self.fake_ctx.config = {'os_type': 'fedora'} self.fake_ctx.os_type = "centos" distro = get_distro(self.fake_ctx) assert distro == 'centos'
def test_default_distro(self): distro = get_distro(self.fake_ctx) assert distro == 'ubuntu'
def task(ctx, config): """ Build Ceph packages. This task will automagically be run before the task that need to install packages (this is taken care of by the internal teuthology task). The config should be as follows: buildpackages: machine: disk: 40 # GB ram: 15000 # MB cpus: 16 example: tasks: - buildpackages: machine: disk: 40 # GB ram: 15000 # MB cpus: 16 - install: """ log.info('Beginning buildpackages...') if config is None: config = {} assert isinstance(config, dict), \ 'task only accepts a dict for config not ' + str(config) d = os.path.join(os.path.dirname(__file__), 'buildpackages') os_type = misc.get_distro(ctx) os_version = misc.get_distro_version(ctx) arch = ctx.config.get('arch', 'x86_64') dist = LocalGitbuilderProject()._get_distro(distro=os_type, version=os_version) pkg_type = get_pkg_type(os_type) misc.sh( "flock --close /tmp/buildpackages " + "make -C " + d + " " + os.environ['HOME'] + "/.ssh_agent") for (flavor, tag, branch, sha1) in lookup_configs(ctx, ctx.config): if tag: sha1 = get_sha1(tag) elif branch: sha1 = get_sha1(branch) log.info("building flavor = " + flavor + "," + " tag = " + tag + "," + " branch = " + branch + "," + " sha1 = " + sha1) target = ('ceph-' + pkg_type + '-' + dist + '-' + arch + '-' + flavor + '-' + sha1) openstack = OpenStack() openstack.set_provider() if openstack.provider == 'ovh': select = '^(vps|eg)-' else: select = '' openstack.image(os_type, os_version) # create if it does not exist build_flavor = openstack.flavor(config['machine'], select) http_flavor = openstack.flavor({ 'disk': 40, # GB 'ram': 1024, # MB 'cpus': 1, }, select) cmd = (". " + os.environ['HOME'] + "/.ssh_agent ; " + " flock --close /tmp/buildpackages-" + sha1 + " make -C " + d + " CEPH_GIT_URL=" + teuth_config.get_ceph_git_url() + " CEPH_PKG_TYPE=" + pkg_type + " CEPH_OS_TYPE=" + os_type + " CEPH_OS_VERSION=" + os_version + " CEPH_DIST=" + dist + " CEPH_ARCH=" + arch + " CEPH_SHA1=" + sha1 + " CEPH_TAG=" + tag + " CEPH_BRANCH=" + branch + " CEPH_FLAVOR=" + flavor + " BUILD_FLAVOR=" + build_flavor + " HTTP_FLAVOR=" + http_flavor + " " + target + " ") log.info("buildpackages: " + cmd) misc.sh(cmd) teuth_config.gitbuilder_host = openstack.get_ip('packages-repository', '') log.info('Finished buildpackages')
def main(): from gevent import monkey monkey.patch_all(dns=False) from .orchestra import monkey monkey.patch_all() import logging ctx = parse_args() set_up_logging(ctx) log = logging.getLogger(__name__) if ctx.owner is None: from teuthology.misc import get_user ctx.owner = get_user() write_initial_metadata(ctx) if 'targets' in ctx.config and 'roles' in ctx.config: targets = len(ctx.config['targets']) roles = len(ctx.config['roles']) assert targets >= roles, \ '%d targets are needed for all roles but found %d listed.' % (roles, targets) machine_type = ctx.machine_type if machine_type is None: fallback_default = ctx.config.get('machine_type', 'plana') machine_type = ctx.config.get('machine-type', fallback_default) if ctx.block: assert ctx.lock, \ 'the --block option is only supported with the --lock option' from teuthology.misc import read_config read_config(ctx) log.debug('\n '.join([ 'Config:', ] + yaml.safe_dump(ctx.config, default_flow_style=False).splitlines())) ctx.summary = dict(success=True) ctx.summary['owner'] = ctx.owner if ctx.description is not None: ctx.summary['description'] = ctx.description for task in ctx.config['tasks']: assert 'kernel' not in task, \ 'kernel installation shouldn be a base-level item, not part of the tasks list' init_tasks = [] if ctx.lock: assert 'targets' not in ctx.config, \ 'You cannot specify targets in a config file when using the --lock option' init_tasks.append({ 'internal.lock_machines': (len(ctx.config['roles']), machine_type) }) init_tasks.extend([ { 'internal.save_config': None }, { 'internal.check_lock': None }, { 'internal.connect': None }, { 'internal.check_conflict': None }, { 'internal.check_ceph_data': None }, { 'internal.vm_setup': None }, ]) if 'kernel' in ctx.config: from teuthology.misc import get_distro distro = get_distro(ctx) if distro == 'ubuntu': init_tasks.append({'kernel': ctx.config['kernel']}) init_tasks.extend([ { 'internal.base': None }, { 'internal.archive': None }, { 'internal.coredump': None }, { 'internal.sudo': None }, { 'internal.syslog': None }, { 'internal.timer': None }, ]) ctx.config['tasks'][:0] = init_tasks from teuthology.run_tasks import run_tasks try: run_tasks(tasks=ctx.config['tasks'], ctx=ctx) finally: if not ctx.summary.get('success') and ctx.config.get('nuke-on-error'): from teuthology.nuke import nuke # only unlock if we locked them in the first place nuke(ctx, log, ctx.lock) if ctx.archive is not None: with file(os.path.join(ctx.archive, 'summary.yaml'), 'w') as f: yaml.safe_dump(ctx.summary, f, default_flow_style=False) with contextlib.closing(StringIO.StringIO()) as f: yaml.safe_dump(ctx.summary, f) log.info('Summary data:\n%s' % f.getvalue()) with contextlib.closing(StringIO.StringIO()) as f: if 'email-on-error' in ctx.config and not ctx.summary.get( 'success', False): yaml.safe_dump(ctx.summary, f) yaml.safe_dump(ctx.config, f) emsg = f.getvalue() subject = "Teuthology error -- %s" % ctx.summary[ 'failure_reason'] from teuthology.suite import email_results email_results(subject, "Teuthology", ctx.config['email-on-error'], emsg) if ctx.summary.get('success', True): log.info('pass') else: log.info('FAIL') import sys sys.exit(1)
def create_if_vm(ctx, machine_name): status_info = ls.get_status(ctx, machine_name) phys_host = status_info['vpshost'] if not phys_host: return False from teuthology.misc import get_distro os_type = get_distro(ctx) default_os_version = dict(ubuntu="12.04", fedora="18", centos="6.4", opensuse="12.2", sles="11-sp2", rhel="6.3", debian='6.0') createMe = decanonicalize_hostname(machine_name) with tempfile.NamedTemporaryFile() as tmp: try: lcnfg = ctx.config['downburst'] except (KeyError, AttributeError): lcnfg = {} distro = lcnfg.get('distro', os_type.lower()) try: distroversion = ctx.config.get('os_version', default_os_version[distro]) except AttributeError: distroversion = default_os_version[distro] file_info = {} file_info['disk-size'] = lcnfg.get('disk-size', '30G') file_info['ram'] = lcnfg.get('ram', '1.9G') file_info['cpus'] = lcnfg.get('cpus', 1) file_info['networks'] = lcnfg.get('networks', [{ 'source': 'front', 'mac': status_info['mac'] }]) file_info['distro'] = distro file_info['distroversion'] = distroversion file_info['additional-disks'] = lcnfg.get('additional-disks', 3) file_info['additional-disks-size'] = lcnfg.get('additional-disks-size', '200G') file_info['arch'] = lcnfg.get('arch', 'x86_64') file_out = {'downburst': file_info} yaml.safe_dump(file_out, tmp) metadata = "--meta-data=%s" % tmp.name dbrst = _get_downburst_exec() if not dbrst: log.error("No downburst executable found.") return False p = subprocess.Popen( [dbrst, '-c', phys_host, 'create', metadata, createMe], stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) owt, err = p.communicate() if err: log.info("Downburst completed on %s: %s" % (machine_name, err)) else: log.info("%s created: %s" % (machine_name, owt)) #If the guest already exists first destroy then re-create: if 'exists' in err: log.info("Guest files exist. Re-creating guest: %s" % (machine_name)) destroy_if_vm(ctx, machine_name) create_if_vm(ctx, machine_name) return True
def main(): from gevent import monkey monkey.patch_all(dns=False) from .orchestra import monkey monkey.patch_all() import logging ctx = parse_args() set_up_logging(ctx) log = logging.getLogger(__name__) if ctx.owner is None: from teuthology.misc import get_user ctx.owner = get_user() write_initial_metadata(ctx) if 'targets' in ctx.config and 'roles' in ctx.config: targets = len(ctx.config['targets']) roles = len(ctx.config['roles']) assert targets >= roles, \ '%d targets are needed for all roles but found %d listed.' % (roles, targets) machine_type = ctx.machine_type if machine_type is None: fallback_default = ctx.config.get('machine_type', 'plana') machine_type = ctx.config.get('machine-type', fallback_default) if ctx.block: assert ctx.lock, \ 'the --block option is only supported with the --lock option' from teuthology.misc import read_config read_config(ctx) log.debug('\n '.join(['Config:', ] + yaml.safe_dump(ctx.config, default_flow_style=False).splitlines())) ctx.summary = dict(success=True) ctx.summary['owner'] = ctx.owner if ctx.description is not None: ctx.summary['description'] = ctx.description for task in ctx.config['tasks']: assert 'kernel' not in task, \ 'kernel installation shouldn be a base-level item, not part of the tasks list' init_tasks = [] if ctx.lock: assert 'targets' not in ctx.config, \ 'You cannot specify targets in a config file when using the --lock option' init_tasks.append({'internal.lock_machines': (len(ctx.config['roles']), machine_type)}) init_tasks.extend([ {'internal.save_config': None}, {'internal.check_lock': None}, {'internal.connect': None}, {'internal.check_conflict': None}, {'internal.check_ceph_data': None}, {'internal.vm_setup': None}, ]) if 'kernel' in ctx.config: from teuthology.misc import get_distro distro = get_distro(ctx) if distro == 'ubuntu': init_tasks.append({'kernel': ctx.config['kernel']}) init_tasks.extend([ {'internal.base': None}, {'internal.archive': None}, {'internal.coredump': None}, {'internal.sudo': None}, {'internal.syslog': None}, {'internal.timer': None}, ]) ctx.config['tasks'][:0] = init_tasks from teuthology.run_tasks import run_tasks try: run_tasks(tasks=ctx.config['tasks'], ctx=ctx) finally: if not ctx.summary.get('success') and ctx.config.get('nuke-on-error'): from teuthology.nuke import nuke # only unlock if we locked them in the first place nuke(ctx, log, ctx.lock) if ctx.archive is not None: with file(os.path.join(ctx.archive, 'summary.yaml'), 'w') as f: yaml.safe_dump(ctx.summary, f, default_flow_style=False) with contextlib.closing(StringIO.StringIO()) as f: yaml.safe_dump(ctx.summary, f) log.info('Summary data:\n%s' % f.getvalue()) with contextlib.closing(StringIO.StringIO()) as f: if 'email-on-error' in ctx.config and not ctx.summary.get('success', False): yaml.safe_dump(ctx.summary, f) yaml.safe_dump(ctx.config, f) emsg = f.getvalue() subject = "Teuthology error -- %s" % ctx.summary['failure_reason'] from teuthology.suite import email_results email_results(subject,"Teuthology",ctx.config['email-on-error'],emsg) if ctx.summary.get('success', True): log.info('pass') else: log.info('FAIL') import sys sys.exit(1)
def test_config_os_type_is_none(self): self.fake_ctx.config["os_type"] = None distro = get_distro(self.fake_ctx) assert distro == 'ubuntu'
def test_no_config_or_os_type(self): self.fake_ctx = Mock() self.fake_ctx.os_type = None distro = get_distro(self.fake_ctx) assert distro == 'ubuntu'