Esempio n. 1
0
    def __init__(self, logger_name):

        CI_DEBUG = os.getenv('CI_DEBUG')

        self.logger = logging.getLogger(logger_name)
        self.logger.propagate = 0
        self.logger.setLevel(logging.DEBUG)

        formatter = logging.Formatter('%(asctime)s %(filename)s %(lineno)d '
                                      '%(levelname)-6s %(message)s')

        ch = logging.StreamHandler()
        ch.setFormatter(formatter)
        if CI_DEBUG is not None and CI_DEBUG.lower() == "true":
            ch.setLevel(logging.DEBUG)
        else:
            ch.setLevel(logging.INFO)
        self.logger.addHandler(ch)

        test_dir = get_doctor_test_root_dir()
        self.filename = '{0}/{1}.log'.format(test_dir, logger_name)
        file_handler = logging.FileHandler(self.filename, mode='w')
        file_handler.setFormatter(formatter)
        file_handler.setLevel(logging.DEBUG)
        self.logger.addHandler(file_handler)
Esempio n. 2
0
    def _get_ssh_key(self, client, key_path):
        self.log.info('Get SSH keys from %s installer......'
                      % self.conf.installer.type)

        if self.key_file is not None:
            self.log.info('Already have SSH keys from %s installer......'
                          % self.conf.installer.type)
            return self.key_file

        ssh_key = '{0}/{1}'.format(get_doctor_test_root_dir(), 'instack_key')
        client.scp(key_path, ssh_key, method='get')
        user = getpass.getuser()
        uid = pwd.getpwnam(user).pw_uid
        gid = grp.getgrnam(user).gr_gid
        os.chown(ssh_key, uid, gid)
        os.chmod(ssh_key, stat.S_IREAD)
        return ssh_key
Esempio n. 3
0
    def set_apply_patches(self):
        self.log.info('Set apply patches start......')
        fenix_files = None

        set_scripts = [self.cm_set_script]

        restart_cmd = 'sudo systemctl restart' \
                      ' ceilometer-agent-notification.service'

        if self.conf.test_case != 'fault_management':
            if is_fenix(self.conf):
                set_scripts.append(self.fe_set_script)
                testdir = get_doctor_test_root_dir()
                fenix_files = ["Dockerfile", "run"]
            restart_cmd += ' nova-scheduler.service'
            set_scripts.append(self.nc_set_compute_script)

        for node_ip in self.controllers:
            client = SSHClient(node_ip,
                               self.node_user_name,
                               key_filename=self.key_file)
            if fenix_files is not None:
                for fenix_file in fenix_files:
                    src_file = '{0}/{1}/{2}'.format(testdir,
                                                    'admin_tool/fenix',
                                                    fenix_file)
                    client.scp(src_file, fenix_file)
            self._run_apply_patches(client,
                                    restart_cmd,
                                    set_scripts,
                                    python=self.python)
        time.sleep(5)

        self.log.info('Set apply patches start......')

        if self.conf.test_case != 'fault_management':
            restart_cmd = 'sudo systemctl restart nova-compute.service'
            for node_ip in self.computes:
                client = SSHClient(node_ip,
                                   self.node_user_name,
                                   key_filename=self.key_file)
                self._run_apply_patches(client,
                                        restart_cmd,
                                        [self.nc_set_compute_script],
                                        python=self.python)
            time.sleep(5)
Esempio n. 4
0
 def __init__(self, conf, installer, user, log, transport_url):
     self.conf = conf
     self.log = log
     self.user = user
     self.installer = installer
     auth = get_identity_auth(project=self.conf.doctor_project)
     self.nova = nova_client(self.conf.nova_version, get_session(auth=auth))
     self.test_dir = get_doctor_test_root_dir()
     self.down_host = None
     self.GetLog = False
     self.disable_network_log = None
     self.network = Network(self.conf, log)
     self.instance = Instance(self.conf, log)
     self.alarm = Alarm(self.conf, log)
     self.inspector = get_inspector(self.conf, log, transport_url)
     self.monitor = get_monitor(self.conf,
                                self.inspector.get_inspector_url(), log)
     self.consumer = get_consumer(self.conf, log)
Esempio n. 5
0
    def setup_maintenance(self, user):
        # each hypervisor needs to have same amount of vcpus and they
        # need to be free before test
        hvisors = self.nova.hypervisors.list(detailed=True)
        prev_vcpus = 0
        prev_hostname = ''
        self.log.info('checking hypervisors.......')
        for hvisor in hvisors:
            vcpus = hvisor.__getattr__('vcpus')
            vcpus_used = hvisor.__getattr__('vcpus_used')
            hostname = hvisor.__getattr__('hypervisor_hostname')
            if vcpus < 2:
                raise Exception('not enough vcpus (%d) on %s' %
                                (vcpus, hostname))
            if vcpus_used > 0:
                if self.conf.test_case == 'all':
                    # VCPU might not yet be free after fault_management test
                    self.log.info('%d vcpus used on %s, retry...' %
                                  (vcpus_used, hostname))
                    time.sleep(15)
                    hvisor = self.nova.hypervisors.get(hvisor.id)
                    vcpus_used = hvisor.__getattr__('vcpus_used')
                if vcpus_used > 0:
                    raise Exception('%d vcpus used on %s' %
                                    (vcpus_used, hostname))
            if prev_vcpus != 0 and prev_vcpus != vcpus:
                raise Exception('%d vcpus on %s does not match to'
                                '%d on %s' %
                                (vcpus, hostname, prev_vcpus, prev_hostname))
            prev_vcpus = vcpus
            prev_hostname = hostname

        # maintenance flavor made so that 2 instances take whole node
        flavor_vcpus = int(vcpus / 2)
        compute_nodes = len(hvisors)
        amount_actstdby_instances = 2
        amount_noredundancy_instances = 2 * compute_nodes - 2
        self.log.info('testing %d computes with %d vcpus each' %
                      (compute_nodes, vcpus))
        self.log.info(
            'testing %d actstdby and %d noredundancy instances' %
            (amount_actstdby_instances, amount_noredundancy_instances))
        max_instances = (amount_actstdby_instances +
                         amount_noredundancy_instances)
        max_cores = compute_nodes * vcpus

        user.update_quota(max_instances, max_cores)

        test_dir = get_doctor_test_root_dir()
        template_file = '{0}/{1}'.format(test_dir, 'maintenance_hot_tpl.yaml')
        files, template = self.stack.get_hot_tpl(template_file)

        ext_net = self.get_external_network()

        parameters = {
            'ext_net': ext_net,
            'flavor_vcpus': flavor_vcpus,
            'maint_image': self.conf.image_name,
            'nonha_intances': amount_noredundancy_instances,
            'ha_intances': amount_actstdby_instances
        }

        self.log.info('creating maintenance stack.......')
        self.log.info('parameters: %s' % parameters)

        self.stack.create('doctor_test_maintenance',
                          template,
                          parameters=parameters,
                          files=files)

        if self.conf.admin_tool.type == 'sample':
            self.admin_tool.start()
        else:
            # TBD Now we expect Fenix is running in self.conf.admin_tool.port
            pass
        self.app_manager.start()
        self.inspector.start()