Example #1
0
def delete_host(host_id, cluster_id_list, username=None):
    """Delete host and all clusterhosts on it.

    :param host_id: id of the host.
    :type host_id: int

    .. note::
        The function should be called out of database session.
    """
    with util.lock('serialized_action', timeout=100) as lock:
        if not lock:
            raise Exception('failed to acquire lock to delete host')

        user = user_db.get_user_object(username)
        for cluster_id in cluster_id_list:
            cluster_info = util.ActionHelper.get_cluster_info(cluster_id, user)
            adapter_id = cluster_info[const.ADAPTER_ID]

            adapter_info = util.ActionHelper.get_adapter_info(
                adapter_id, cluster_id, user)
            hosts_info = util.ActionHelper.get_hosts_info(
                cluster_id, [host_id], user)

            deploy_manager = DeployManager(adapter_info, cluster_info,
                                           hosts_info)

            deploy_manager.remove_hosts(package_only=True,
                                        delete_cluster=False)

        util.ActionHelper.delete_host(host_id, user)
    def setUp(self):
        super(MetadataTestCase, self).setUp()
        reload(setting)
        setting.CONFIG_DIR = os.path.join(
            os.path.dirname(os.path.abspath(__file__)),
            'data'
        )
        database.init('sqlite://')
        database.create_db()
        adapter.load_adapters()
        metadata.load_metadatas()

        # Get a os_id and adapter_id
        self.user_object = (
            user_api.get_user_object(
                setting.COMPASS_ADMIN_EMAIL
            )
        )
        self.adapter_object = adapter.list_adapters(self.user_object)
        test_adapter = None
        for adapter_obj in self.adapter_object:
            if adapter_obj['name'] == 'openstack_icehouse':
                self.adapter_id = adapter_obj['id']
                test_adapter = adapter_obj
                break
        self.os_id = None
        if test_adapter['flavors']:
            for supported_os in test_adapter['supported_oses']:
                self.os_id = supported_os['os_id']
                break
            for flavor in test_adapter['flavors']:
                if flavor['name'] == 'HA-multinodes':
                    self.flavor_id = flavor['id']
                    break
Example #3
0
def set_switch_machines():
    """Set switches and machines.

    .. note::
       --switch_machines_file is the filename which stores all switches
       and machines information.
       each line in fake_switches_files presents one machine.
       the format of each line machine,<switch_ip>,<switch_port>,<vlan>,<mac>
       or switch,<switch_ip>,<switch_vendor>,<switch_version>,
       <switch_community>,<switch_state>
    """
    if not flags.OPTIONS.switch_machines_file:
        print 'flag --switch_machines_file is missing'
        return
    database.init()
    switches, switch_machines = util.get_switch_machines_from_file(
        flags.OPTIONS.switch_machines_file)
    user = user_api.get_user_object(setting.COMPASS_ADMIN_EMAIL)
    switch_mapping = {}
    for switch in switches:
        added_switch = switch_api.add_switch(False, user=user, **switch)
        switch_mapping[switch['ip']] = added_switch['id']
    for switch_ip, machines in switch_machines.items():
        if switch_ip not in switch_mapping:
            print 'switch ip %s not found' % switch_ip
            sys.exit(1)
        switch_id = switch_mapping[switch_ip]
        for machine in machines:
            switch_api.add_switch_machine(switch_id,
                                          False,
                                          user=user,
                                          **machine)
Example #4
0
    def setUp(self):
        super(MetadataTestCase, self).setUp()
        os.environ['COMPASS_IGNORE_SETTING'] = 'true'
        os.environ['COMPASS_CONFIG_DIR'] = os.path.join(
            os.path.dirname(os.path.abspath(__file__)), 'data')
        reload(setting)
        database.init('sqlite://')
        database.create_db()
        adapter.load_adapters(force_reload=True)
        metadata.load_metadatas(force_reload=True)
        adapter.load_flavors(force_reload=True)

        # Get a os_id and adapter_id
        self.user_object = (user_api.get_user_object(
            setting.COMPASS_ADMIN_EMAIL))
        self.adapter_object = adapter.list_adapters(self.user_object)
        test_adapter = None
        for adapter_obj in self.adapter_object:
            if adapter_obj['name'] == 'openstack_icehouse':
                self.adapter_id = adapter_obj['id']
                test_adapter = adapter_obj
                break
        self.os_id = None
        if test_adapter['flavors']:
            for supported_os in test_adapter['supported_oses']:
                self.os_id = supported_os['os_id']
                break
            for flavor in test_adapter['flavors']:
                if flavor['name'] == 'HA-multinodes':
                    self.flavor_id = flavor['id']
                    break
Example #5
0
 def test_delete_cluster_not_editable(self):
     # delete a cluster which state is installing
     self.user_object = user_api.get_user_object(setting.COMPASS_ADMIN_EMAIL)
     cluster_api.update_cluster_state(1, state="INSTALLING", user=self.user_object)
     url = "/clusters/1"
     return_value = self.delete(url)
     self.assertEqual(return_value.status_code, 403)
def cluster_installed(cluster_id, clusterhosts_ready, username=None):
    """Callback when cluster is installed.

    :param cluster_id: cluster id
    :param clusterhosts_ready: clusterhosts that should trigger ready.

    .. note::
        The function should be called out of database session.
    """
    with util.lock("serialized_action") as lock:
        if not lock:
            raise Exception("failed to acquire lock to " "do the post action after cluster installation")
        logging.info("package installed on cluster %s with clusterhosts ready %s", cluster_id, clusterhosts_ready)
        if username:
            user = user_db.get_user_object(username)
        else:
            user = None
        cluster_info = util.ActionHelper.get_cluster_info(cluster_id, user)
        adapter_id = cluster_info[const.ADAPTER_ID]

        adapter_info = util.ActionHelper.get_adapter_info(adapter_id, cluster_id, user)
        hosts_info = util.ActionHelper.get_hosts_info(cluster_id, clusterhosts_ready.keys(), user)

        deploy_manager = DeployManager(adapter_info, cluster_info, hosts_info)

        deploy_manager.cluster_installed()
        util.ActionHelper.cluster_ready(cluster_id, True, user)
        for host_id, clusterhost_ready in clusterhosts_ready.items():
            if clusterhost_ready:
                util.ActionHelper.cluster_host_ready(cluster_id, host_id, False, user)
Example #7
0
def authenticate_user(email, password, **kwargs):
    """Authenticate a user by email and password."""
    user = user_api.get_user_object(
        email, **kwargs
    )
    user.authenticate(password)
    return user
Example #8
0
def delete_cluster_host(
    cluster_id, host_id,
    username=None, delete_underlying_host=False
):
    with util.lock('serialized_action', timeout=100) as lock:
        if not lock:
            raise Exception('failed to acquire lock to delete clusterhost')

        user = user_db.get_user_object(username)
        cluster_info = util.ActionHelper.get_cluster_info(cluster_id, user)
        adapter_id = cluster_info[const.ADAPTER_ID]

        adapter_info = util.ActionHelper.get_adapter_info(
            adapter_id, cluster_id, user)
        hosts_info = util.ActionHelper.get_hosts_info(
            cluster_id, [host_id], user)

        deploy_manager = DeployManager(adapter_info, cluster_info, hosts_info)
        logging.debug('Created deploy manager with %s %s %s'
                      % (adapter_info, cluster_info, hosts_info))

        deploy_manager.remove_hosts(
            package_only=not delete_underlying_host,
            delete_cluster=False
        )
        util.ActionHelper.delete_cluster_host(
            cluster_id, host_id, user,
            delete_underlying_host
        )
Example #9
0
def delete_host(
    host_id, cluster_id_list, username=None
):
    with util.lock('serialized_action', timeout=100) as lock:
        if not lock:
            raise Exception('failed to acquire lock to delete host')

        user = user_db.get_user_object(username)
        for cluster_id in cluster_id_list:
            cluster_info = util.ActionHelper.get_cluster_info(
                cluster_id, user)
            adapter_id = cluster_info[const.ADAPTER_ID]

            adapter_info = util.ActionHelper.get_adapter_info(
                adapter_id, cluster_id, user)
            hosts_info = util.ActionHelper.get_hosts_info(
                cluster_id, [host_id], user)

            deploy_manager = DeployManager(
                adapter_info, cluster_info, hosts_info)

            deploy_manager.remove_hosts(
                package_only=True,
                delete_cluster=False
            )

        util.ActionHelper.delete_host(
            host_id, user
        )
Example #10
0
def health_check(cluster_id, report_uri, username):
    with util.lock('cluster_health_check') as lock:
        if not lock:
            raise Exception('failed to acquire lock to check health')

        user = user_db.get_user_object(username)
        cluster_info = util.ActionHelper.get_cluster_info(cluster_id, user)
        adapter_id = cluster_info[const.ADAPTER_ID]

        adapter_info = util.ActionHelper.get_adapter_info(
            adapter_id, cluster_id, user
        )

        deploy_manager = DeployManager(adapter_info, cluster_info, None)
        try:
            deploy_manager.check_cluster_health(report_uri)
        except Exception as exc:
            logging.error("health_check exception: ============= %s" % exc)
            data = {'state': 'error', 'error_message': str(exc), 'report': {}}
            reports = health_check_db.list_health_reports(
                cluster_id, user=user)
            if not reports:
                # Exception before executing command remotely for health check.
                # No reports names sending back yet. Create a report
                name = 'pre_remote_health_check'
                health_check_db.add_report_record(
                    cluster_id, name, user=user, **data
                )

            health_check_db.update_multi_reports(cluster_id, user=user, **data)
Example #11
0
def set_switch_machines():
    """Set switches and machines.

    .. note::
       --switch_machines_file is the filename which stores all switches
       and machines information.
       each line in fake_switches_files presents one machine.
       the format of each line machine,<switch_ip>,<switch_port>,<vlan>,<mac>
       or switch,<switch_ip>,<switch_vendor>,<switch_version>,
       <switch_community>,<switch_state>
    """
    if not flags.OPTIONS.switch_machines_file:
        print 'flag --switch_machines_file is missing'
        return
    database.init()
    switches, switch_machines = util.get_switch_machines_from_file(
        flags.OPTIONS.switch_machines_file)
    user = user_api.get_user_object(
        setting.COMPASS_ADMIN_EMAIL
    )
    switch_mapping = {}
    for switch in switches:
        added_switch = switch_api.add_switch(
            user, False, **switch
        )
        switch_mapping[switch['ip']] = added_switch['id']
    for switch_ip, machines in switch_machines.items():
        if switch_ip not in switch_mapping:
            print 'switch ip %s not found' % switch_ip
            sys.exit(1)
        switch_id = switch_mapping[switch_ip]
        for machine in machines:
            switch_api.add_switch_machine(
                user, switch_id, False, **machine
            )
Example #12
0
def deploy(cluster_id, hosts_id_list, username=None):
    """Deploy clusters.

    :param cluster_hosts: clusters and hosts in each cluster to deploy.
    :type cluster_hosts: dict of int or str to list of int or str

    .. note::
        The function should be called out of database session.
    """
    with util.lock('serialized_action', timeout=1000) as lock:
        if not lock:
            raise Exception('failed to acquire lock to deploy')

        user = user_db.get_user_object(username)

        cluster_info = ActionHelper.get_cluster_info(cluster_id, user)
        adapter_id = cluster_info[const.ADAPTER_ID]

        adapter_info = ActionHelper.get_adapter_info(adapter_id, cluster_id,
                                                     user)
        hosts_info = ActionHelper.get_hosts_info(cluster_id, hosts_id_list,
                                                 user)

        deploy_manager = DeployManager(adapter_info, cluster_info, hosts_info)
        #deploy_manager.prepare_for_deploy()
        logging.debug('Created deploy manager with %s %s %s'
                      % (adapter_info, cluster_info, hosts_info))

        deployed_config = deploy_manager.deploy()
        ActionHelper.save_deployed_config(deployed_config, user)
        ActionHelper.update_state(cluster_id, hosts_id_list, user)
Example #13
0
def redeploy(cluster_id, hosts_id_list, username=None):
    """Deploy clusters.

    :param cluster_hosts: clusters and hosts in each cluster to deploy.
    :type cluster_hosts: dict of int or str to list of int or str
    """
    with util.lock('serialized_action') as lock:
        if not lock:
            raise Exception('failed to acquire lock to deploy')

        user = user_db.get_user_object(username)
        cluster_info = ActionHelper.get_cluster_info(cluster_id, user)
        adapter_id = cluster_info[const.ADAPTER_ID]

        adapter_info = ActionHelper.get_adapter_info(adapter_id,
                                                     cluster_id,
                                                     user)
        hosts_info = ActionHelper.get_hosts_info(cluster_id,
                                                 hosts_id_list,
                                                 user)

        deploy_manager = DeployManager(adapter_info, cluster_info, hosts_info)
        # deploy_manager.prepare_for_deploy()
        deploy_manager.redeploy()
        ActionHelper.update_state(cluster_id, hosts_id_list, user)
Example #14
0
    def setUp(self):
        super(AdapterTestCase, self).setUp()
        reload(setting)
        setting.CONFIG_DIR = os.path.join(
            os.path.dirname(os.path.abspath(__file__)),
            'data'
        )
        database.init('sqlite://')
        database.create_db()
        self.user_object = (
            user_api.get_user_object(
                setting.COMPASS_ADMIN_EMAIL
            )
        )

        mock_config = mock.Mock()
        self.backup_adapter_configs = util.load_configs
        util.load_configs = mock_config
        configs = [{
            'NAME': 'openstack_test',
            'DISLAY_NAME': 'Test OpenStack Icehouse',
            'PACKAGE_INSTALLER': 'chef_installer',
            'OS_INSTALLER': 'cobbler',
            'SUPPORTED_OS_PATTERNS': ['(?i)centos.*', '(?i)ubuntu.*'],
            'DEPLOYABLE': True
        }]
        util.load_configs.return_value = configs
        with database.session() as session:
            adapter_api.add_adapters_internal(session)
        adapter.load_adapters()
        self.adapter_object = adapter.list_adapters(user=self.user_object)
        for adapter_obj in self.adapter_object:
            if adapter_obj['name'] == 'openstack_icehouse':
                self.adapter_id = adapter_obj['id']
                break
    def setUp(self):
        super(AdapterTestCase, self).setUp()
        os.environ['COMPASS_IGNORE_SETTING'] = 'true'
        os.environ['COMPASS_CONFIG_DIR'] = os.path.join(
            os.path.dirname(os.path.abspath(__file__)),
            'data'
        )
        reload(setting)
        database.init('sqlite://')
        database.create_db()
        self.user_object = (
            user_api.get_user_object(
                setting.COMPASS_ADMIN_EMAIL
            )
        )

        mock_config = mock.Mock(side_effect=self._mock_load_configs)
        self.backup_adapter_configs = util.load_configs
        util.load_configs = mock_config
        adapter.load_adapters(force_reload=True)
        adapter.load_flavors(force_reload=True)
        self.adapter_object = adapter.list_adapters(user=self.user_object)
        self.adapter_obj = None
        self.adapter_id = None
        self.flavor_id = None
        for adapter_obj in self.adapter_object:
            if adapter_obj['name'] == 'openstack_icehouse':
                self.adapter_obj = adapter_obj
                self.adapter_id = adapter_obj['id']
                break

        for flavor in self.adapter_obj['flavors']:
            if flavor['name'] == 'HA-multinodes':
                self.flavor_id = flavor['id']
                break
Example #16
0
def health_check(cluster_id, report_uri, username):
    with util.lock('cluster_health_check') as lock:
        if not lock:
            raise Exception('failed to acquire lock to check health')

        user = user_db.get_user_object(username)
        cluster_info = util.ActionHelper.get_cluster_info(cluster_id, user)
        adapter_id = cluster_info[const.ADAPTER_ID]

        adapter_info = util.ActionHelper.get_adapter_info(
            adapter_id, cluster_id, user
        )

        deploy_manager = DeployManager(adapter_info, cluster_info, None)
        try:
            deploy_manager.check_cluster_health(report_uri)
        except Exception as exc:
            logging.error("health_check exception: ============= %s" % exc)
            data = {'state': 'error', 'error_message': str(exc), 'report': {}}
            reports = health_check_db.list_health_reports(
                cluster_id, user=user)
            if not reports:
                # Exception before executing command remotely for health check.
                # No reports names sending back yet. Create a report
                name = 'pre_remote_health_check'
                health_check_db.add_report_record(
                    cluster_id, name, user=user, **data
                )

            health_check_db.update_multi_reports(cluster_id, user=user, **data)
Example #17
0
    def setUp(self):
        super(AdapterTestCase, self).setUp()
        os.environ['COMPASS_IGNORE_SETTING'] = 'true'
        os.environ['COMPASS_CONFIG_DIR'] = os.path.join(
            os.path.dirname(os.path.abspath(__file__)), 'data')
        reload(setting)
        database.init('sqlite://')
        database.create_db()
        self.user_object = (user_api.get_user_object(
            setting.COMPASS_ADMIN_EMAIL))

        mock_config = mock.Mock(side_effect=self._mock_load_configs)
        self.backup_adapter_configs = util.load_configs
        util.load_configs = mock_config
        adapter.load_adapters(force_reload=True)
        adapter.load_flavors(force_reload=True)
        self.adapter_object = adapter.list_adapters(user=self.user_object)
        self.adapter_obj = None
        self.adapter_id = None
        self.flavor_id = None
        for adapter_obj in self.adapter_object:
            if adapter_obj['name'] == 'openstack_icehouse':
                self.adapter_obj = adapter_obj
                self.adapter_id = adapter_obj['id']
                break

        for flavor in self.adapter_obj['flavors']:
            if flavor['name'] == 'HA-multinodes':
                self.flavor_id = flavor['id']
                break
Example #18
0
def os_installed(
    host_id, clusterhosts_ready, clusters_os_ready,
    username=None
):
    """Callback when os is installed.

    :param host_id: host that os is installed.
    :type host_id: integer

    .. note::
        The function should be called out of database session.
    """
    with util.lock('serialized_action') as lock:
        if not lock:
            raise Exception(
                'failed to acquire lock to '
                'do the post action after os installation'
            )
        logging.info(
            'os installed on host %s '
            'with cluster host ready %s cluster os ready %s',
            host_id, clusterhosts_ready, clusters_os_ready
        )
        if username:
            user = user_db.get_user_object(username)
        else:
            user = None
        os_installed_triggered = False
        for cluster_id, clusterhost_ready in clusterhosts_ready.items():
            if not clusterhost_ready and os_installed_triggered:
                continue

            cluster_info = util.ActionHelper.get_cluster_info(
                cluster_id, user)
            adapter_id = cluster_info[const.ADAPTER_ID]

            adapter_info = util.ActionHelper.get_adapter_info(
                adapter_id, cluster_id, user)
            hosts_info = util.ActionHelper.get_hosts_info(
                cluster_id, [host_id], user)

            deploy_manager = DeployManager(
                adapter_info, cluster_info, hosts_info)

            if not os_installed_triggered:
                deploy_manager.os_installed()
                util.ActionHelper.host_ready(host_id, True, user)
                os_installed_triggered = True

            if clusterhost_ready:
                #deploy_manager.cluster_os_installed()
                util.ActionHelper.cluster_host_ready(
                    cluster_id, host_id, False, user
                )


            if util.ActionHelper.is_cluster_os_ready(cluster_id, user):
                logging.info("deploy_manager begin cluster_os_installed")
                deploy_manager.cluster_os_installed()
Example #19
0
def poll_switch(poller_email,
                ip_addr,
                credentials,
                req_obj='mac',
                oper="SCAN"):
    """Query switch and update switch machines.

    .. note::
       When polling switch succeeds, for each mac it got from polling switch,
       A Machine record associated with the switch is added to the database.

    :param ip_addr: switch ip address.
    :type ip_addr: str
    :param credentials: switch crednetials.
    :type credentials: dict
    :param req_obj: the object requested to query from switch.
    :type req_obj: str
    :param oper: the operation to query the switch.
    :type oper: str, should be one of ['SCAN', 'GET', 'SET']

    .. note::
       The function should be called out of database session scope.
    """
    poller = user_api.get_user_object(poller_email)
    ip_int = long(netaddr.IPAddress(ip_addr))
    with util.lock('poll switch %s' % ip_addr, timeout=120) as lock:
        if not lock:
            raise Exception('failed to acquire lock to poll switch %s' %
                            ip_addr)

        # TODO(grace): before repoll the switch, set the state to repolling.
        # and when the poll switch is timeout, set the state to error.
        # the frontend should only consider some main state like INTIALIZED,
        # ERROR and SUCCESSFUL, REPOLLING is as an intermediate state to
        # indicate the switch is in learning the mac of the machines connected
        # to it.
        logging.debug('poll switch: %s', ip_addr)
        switch_dict, machine_dicts = _poll_switch(ip_addr,
                                                  credentials,
                                                  req_obj=req_obj,
                                                  oper=oper)
        switches = switch_api.list_switches(ip_int=ip_int, user=poller)
        if not switches:
            logging.error('no switch found for %s', ip_addr)
            return

        for switch in switches:
            for machine_dict in machine_dicts:
                logging.info('add machine: %s', machine_dict)
                machine_dict['owner_id'] = poller.id
                switch_api.add_switch_machine(switch['id'],
                                              False,
                                              user=poller,
                                              **machine_dict)
                switch_api.update_switch(switch['id'],
                                         user=poller,
                                         **switch_dict)
Example #20
0
def add_subnet():
    if not flags.OPTIONS.subnet:
        print 'flag --subnet is missing'
        return
    database.init()
    subnet_tuple = flags.OPTIONS.subnet
    subnet_name = subnet_tuple[0]
    subnet_cidr = subnet_tuple[1]
    user = user_api.get_user_object(setting.COMPASS_ADMIN_EMAIL)
    network_api.add_subnet(user=user, name=subnet_name, subnet=subnet_cidr)
Example #21
0
def os_installed(host_id,
                 clusterhosts_ready,
                 clusters_os_ready,
                 username=None):
    """Callback when os is installed.

    :param host_id: host that os is installed.
    :type host_id: integer
    :param clusterhosts_ready: the clusterhosts that should trigger ready.
    :param clusters_os_ready: the cluster that should trigger os ready.

    .. note::
        The function should be called out of database session.
    """
    with util.lock('serialized_action') as lock:
        if not lock:
            raise Exception('failed to acquire lock to '
                            'do the post action after os installation')
        logging.info(
            'os installed on host %s '
            'with cluster host ready %s cluster os ready %s', host_id,
            clusterhosts_ready, clusters_os_ready)
        if username:
            user = user_db.get_user_object(username)
        else:
            user = None
        os_installed_triggered = False
        for cluster_id, clusterhost_ready in clusterhosts_ready.items():
            if not clusterhost_ready and os_installed_triggered:
                continue

            cluster_info = util.ActionHelper.get_cluster_info(cluster_id, user)
            adapter_id = cluster_info[const.ADAPTER_ID]

            adapter_info = util.ActionHelper.get_adapter_info(
                adapter_id, cluster_id, user)
            hosts_info = util.ActionHelper.get_hosts_info(
                cluster_id, [host_id], user)

            deploy_manager = DeployManager(adapter_info, cluster_info,
                                           hosts_info)

            if not os_installed_triggered:
                deploy_manager.os_installed()
                util.ActionHelper.host_ready(host_id, True, user)
                os_installed_triggered = True

            if clusterhost_ready:
                # deploy_manager.cluster_os_installed()
                util.ActionHelper.cluster_host_ready(cluster_id, host_id,
                                                     False, user)

            if util.ActionHelper.is_cluster_os_ready(cluster_id, user):
                logging.info("deploy_manager begin cluster_os_installed")
                deploy_manager.cluster_os_installed()
Example #22
0
def delete_cluster(
    cluster_id, host_id_list,
    username=None, delete_underlying_host=False
):
    """Delete cluster.

    :param cluster_id: id of the cluster.
    :type cluster_id: int

    .. note::
        The function should be called out of database session.
    """
    with util.lock('serialized_action', timeout=100) as lock:
        if not lock:
            raise Exception('failed to acquire lock to delete cluster')

        user = user_db.get_user_object(username)

        for host_id in host_id_list:
            cluster_api.update_cluster_host_state(
                user, cluster_id, host_id, state='ERROR'
            )
        cluster_api.update_cluster_state(
            user, cluster_id, state='ERROR'
        )

        cluster_api.update_cluster(
            user, cluster_id, reinstall_distributed_system=True
        )
        for host_id in host_id_list:
            cluster_api.update_cluster_host(
                user, cluster_id, host_id, reinstall_os=True
            )

        cluster_info = util.ActionHelper.get_cluster_info(cluster_id, user)
        adapter_id = cluster_info[const.ADAPTER_ID]

        adapter_info = util.ActionHelper.get_adapter_info(
            adapter_id, cluster_id, user)
        hosts_info = util.ActionHelper.get_hosts_info(
            cluster_id, host_id_list, user)

        logging.debug('adapter info: %s', adapter_info)
        logging.debug('cluster info: %s', cluster_info)
        logging.debug('hosts info: %s', hosts_info)
        deploy_manager = DeployManager(adapter_info, cluster_info, hosts_info)

        deploy_manager.remove_hosts(
            package_only=not delete_underlying_host,
            delete_cluster=True
        )
        util.ActionHelper.delete_cluster(
            cluster_id, host_id_list, user,
            delete_underlying_host
        )
Example #23
0
def poll_switch(poller_email, ip_addr, credentials,
                req_obj='mac', oper="SCAN"):
    """Query switch and update switch machines.

    .. note::
       When polling switch succeeds, for each mac it got from polling switch,
       A Machine record associated with the switch is added to the database.

    :param ip_addr: switch ip address.
    :type ip_addr: str
    :param credentials: switch crednetials.
    :type credentials: dict
    :param req_obj: the object requested to query from switch.
    :type req_obj: str
    :param oper: the operation to query the switch.
    :type oper: str, should be one of ['SCAN', 'GET', 'SET']

    .. note::
       The function should be called out of database session scope.
    """
    poller = user_api.get_user_object(poller_email)
    ip_int = long(netaddr.IPAddress(ip_addr))
    with util.lock('poll switch %s' % ip_addr, timeout=120) as lock:
        if not lock:
            raise Exception(
                'failed to acquire lock to poll switch %s' % ip_addr
            )

        # TODO(grace): before repoll the switch, set the state to repolling.
        # and when the poll switch is timeout, set the state to error.
        # the frontend should only consider some main state like INTIALIZED,
        # ERROR and SUCCESSFUL, REPOLLING is as an intermediate state to
        # indicate the switch is in learning the mac of the machines connected
        # to it.
        logging.debug('poll switch: %s', ip_addr)
        switch_dict, machine_dicts = _poll_switch(
            ip_addr, credentials, req_obj=req_obj, oper=oper
        )
        switches = switch_api.list_switches(ip_int=ip_int, user=poller)
        if not switches:
            logging.error('no switch found for %s', ip_addr)
            return

        for switch in switches:
            for machine_dict in machine_dicts:
                logging.debug('add machine: %s', machine_dict)
                switch_api.add_switch_machine(
                    switch['id'], False, user=poller, **machine_dict
                )
                switch_api.update_switch(
                    switch['id'],
                    user=poller,
                    **switch_dict
                )
Example #24
0
 def test_not_admin(self):
     user_api.add_user(user=self.user_object,
                       email='*****@*****.**',
                       password='******',
                       is_admin=False)
     user_object = user_api.get_user_object('*****@*****.**')
     self.assertRaises(exception.Forbidden,
                       user_api.update_user,
                       2,
                       user=user_object,
                       is_admin=False)
Example #25
0
 def test_delete_cluster_not_editable(self):
     # delete a cluster which state is installing
     self.user_object = (user_api.get_user_object(
         setting.COMPASS_ADMIN_EMAIL))
     cluster_api.update_cluster_state(
         1,
         state='INSTALLING',
         user=self.user_object,
     )
     url = '/clusters/1'
     return_value = self.delete(url)
     self.assertEqual(return_value.status_code, 403)
Example #26
0
 def setUp(self):
     super(BaseTest, self).setUp()
     os.environ['COMPASS_IGNORE_SETTING'] = 'true'
     os.environ['COMPASS_CONFIG_DIR'] = os.path.join(
         os.path.dirname(os.path.abspath(__file__)), 'data')
     reload(setting)
     database.init('sqlite://')
     database.create_db()
     adapter_api.load_adapters(force_reload=True)
     metadata_api.load_metadatas(force_reload=True)
     adapter_api.load_flavors(force_reload=True)
     self.user_object = (user_api.get_user_object(
         setting.COMPASS_ADMIN_EMAIL))
Example #27
0
 def test_not_admin(self):
     user_api.add_user(
         user=self.user_object,
         email='*****@*****.**',
         password='******',
         is_admin=False
     )
     user_object = user_api.get_user_object('*****@*****.**')
     self.assertRaises(
         exception.Forbidden,
         user_api.update_user,
         2,
         user=user_object,
         is_admin=False
     )
Example #28
0
def poll_switch(poller_email, ip_addr, credentials,
                req_obj='mac', oper="SCAN"):
    """Query switch and update switch machines.

    .. note::
       When polling switch succeeds, for each mac it got from polling switch,
       A Machine record associated with the switch is added to the database.

    :param ip_addr: switch ip address.
    :type ip_addr: str
    :param credentials: switch crednetials.
    :type credentials: dict
    :param req_obj: the object requested to query from switch.
    :type req_obj: str
    :param oper: the operation to query the switch.
    :type oper: str, should be one of ['SCAN', 'GET', 'SET']

    .. note::
       The function should be called out of database session scope.
    """
    poller = user_api.get_user_object(poller_email)
    ip_int = long(netaddr.IPAddress(ip_addr))
    with util.lock('poll switch %s' % ip_addr, timeout=120) as lock:
        if not lock:
            raise Exception(
                'failed to acquire lock to poll switch %s' % ip_addr
            )

        logging.debug('poll switch: %s', ip_addr)
        switch_dict, machine_dicts = _poll_switch(
            ip_addr, credentials, req_obj=req_obj, oper=oper
        )
        switches = switch_api.list_switches(ip_int=ip_int, user=poller)
        if not switches:
            logging.error('no switch found for %s', ip_addr)
            return

        for switch in switches:
            for machine_dict in machine_dicts:
                logging.debug('add machine: %s', machine_dict)
                switch_api.add_switch_machine(
                    switch['id'], False, user=poller, **machine_dict
                )
                switch_api.update_switch(
                    switch['id'],
                    user=poller,
                    **switch_dict
                )
Example #29
0
 def setUp(self):
     super(BaseTest, self).setUp()
     reload(setting)
     setting.CONFIG_DIR = os.path.join(
         os.path.dirname(os.path.abspath(__file__)),
         'data'
     )
     database.init('sqlite://')
     database.create_db()
     adapter_api.load_adapters()
     metadata_api.load_metadatas()
     self.user_object = (
         user_api.get_user_object(
             setting.COMPASS_ADMIN_EMAIL
         )
     )
Example #30
0
def package_installed(
    cluster_id, host_id, cluster_ready,
    host_ready, username=None
):
    """Callback when package is installed.

    :param cluster_id: cluster id.
    :param host_id: host id.
    :param cluster_ready: if the cluster should trigger ready.
    :param host_ready: if the host should trigger ready.

    .. note::
        The function should be called out of database session.
    """
    with util.lock('serialized_action') as lock:
        if not lock:
            raise Exception(
                'failed to acquire lock to '
                'do the post action after package installation'
            )
        logging.info(
            'package installed on cluster %s host %s '
            'with cluster ready %s host ready %s',
            cluster_id, host_id, cluster_ready, host_ready
        )

        if username:
            user = user_db.get_user_object(username)
        else:
            user = None
        cluster_info = util.ActionHelper.get_cluster_info(cluster_id, user)
        adapter_id = cluster_info[const.ADAPTER_ID]

        adapter_info = util.ActionHelper.get_adapter_info(
            adapter_id, cluster_id, user)
        hosts_info = util.ActionHelper.get_hosts_info(
            cluster_id, [host_id], user)

        deploy_manager = DeployManager(adapter_info, cluster_info, hosts_info)

        deploy_manager.package_installed()
        util.ActionHelper.cluster_host_ready(cluster_id, host_id, True, user)
        if cluster_ready:
            util.ActionHelper.cluster_ready(cluster_id, False, user)
        if host_ready:
            util.ActionHelper.host_ready(host_id, False, user)
Example #31
0
def deploy(cluster_id, hosts_id_list, username=None):
    """Deploy clusters.

    :param cluster_hosts: clusters and hosts in each cluster to deploy.
    :type cluster_hosts: dict of int or str to list of int or str

    .. note::
        The function should be called out of database session.
    """
    with util.lock('serialized_action', timeout=1000) as lock:
        if not lock:
            raise Exception('failed to acquire lock to deploy')

        user = user_db.get_user_object(username)

        cluster_info = util.ActionHelper.get_cluster_info(cluster_id, user)
        adapter_id = cluster_info[const.ADAPTER_ID]

        adapter_info = util.ActionHelper.get_adapter_info(
            adapter_id, cluster_id, user)
        hosts_info = util.ActionHelper.get_hosts_info(
            cluster_id, hosts_id_list, user)

        deploy_successful = True
        try:
            deploy_manager = DeployManager(
                adapter_info, cluster_info, hosts_info)
            # deploy_manager.prepare_for_deploy()
            logging.debug('Created deploy manager with %s %s %s'
                          % (adapter_info, cluster_info, hosts_info))
            deployed_config = deploy_manager.deploy()
        except Exception as error:
            logging.exception(error)
            deploy_successful = False

        if deploy_successful:
            util.ActionHelper.save_deployed_config(deployed_config, user)
            util.ActionHelper.update_state(
                cluster_id, hosts_id_list, user, state='INSTALLING'
            )
        else:
            util.ActionHelper.update_state(
                cluster_id, hosts_id_list, user, state='ERROR',
                message='failed to start deployment', severity='ERROR'
            )
Example #32
0
def deploy(cluster_id, hosts_id_list, username=None):
    """Deploy clusters.

    :param cluster_hosts: clusters and hosts in each cluster to deploy.
    :type cluster_hosts: dict of int or str to list of int or str

    .. note::
        The function should be called out of database session.
    """
    with util.lock('serialized_action', timeout=1000) as lock:
        if not lock:
            raise Exception('failed to acquire lock to deploy')

        user = user_db.get_user_object(username)

        cluster_info = util.ActionHelper.get_cluster_info(cluster_id, user)
        adapter_id = cluster_info[const.ADAPTER_ID]

        adapter_info = util.ActionHelper.get_adapter_info(
            adapter_id, cluster_id, user)
        hosts_info = util.ActionHelper.get_hosts_info(
            cluster_id, hosts_id_list, user)

        deploy_successful = True
        try:
            deploy_manager = DeployManager(
                adapter_info, cluster_info, hosts_info)
            # deploy_manager.prepare_for_deploy()
            logging.debug('Created deploy manager with %s %s %s'
                          % (adapter_info, cluster_info, hosts_info))
            deployed_config = deploy_manager.deploy()
        except Exception as error:
            logging.exception(error)
            deploy_successful = False

        if deploy_successful:
            util.ActionHelper.save_deployed_config(deployed_config, user)
            util.ActionHelper.update_state(
                cluster_id, hosts_id_list, user, state='INSTALLING'
            )
        else:
            util.ActionHelper.update_state(
                cluster_id, hosts_id_list, user, state='ERROR',
                message='failed to start deployment', severity='ERROR'
            )
Example #33
0
def delete_clusters():
    clusternames = [
        clustername for clustername in flags.OPTIONS.clusternames.split(',')
        if clustername
    ]
    user = user_api.get_user_object(setting.COMPASS_ADMIN_EMAIL)
    list_cluster_args = {}
    if clusternames:
        list_cluster_args['name'] = clusternames
    clusters = cluster_api.list_clusters(user=user, **list_cluster_args)
    delete_underlying_host = flags.OPTIONS.delete_hosts
    for cluster in clusters:
        cluster_id = cluster['id']
        cluster_api.del_cluster(cluster_id,
                                True,
                                False,
                                delete_underlying_host,
                                user=user)
Example #34
0
def set_machine():
    if not flags.OPTIONS.machine_file:
        print 'flag --machine_file is missing'
        return
    database.init()
    machine_file = flags.OPTIONS.machine_file
    user = user_api.get_user_object(setting.COMPASS_ADMIN_EMAIL)
    with open(machine_file) as f:
        machine_data = yaml.load(f)
        for machine in machine_data:
            power_manage = {}
            power_manage.update({"ip": machine.get("power_ip", "")})
            power_manage.update({"username": machine.get("power_user", "")})
            power_manage.update({"password": machine.get("power_pass", "")})
            machine_api.add_machine(user=user,
                                    mac=machine["mac"],
                                    power_type=machine["power_type"],
                                    power_manage=power_manage)
Example #35
0
 def setUp(self):
     super(BaseTest, self).setUp()
     os.environ['COMPASS_IGNORE_SETTING'] = 'true'
     os.environ['COMPASS_CONFIG_DIR'] = os.path.join(
         os.path.dirname(os.path.abspath(__file__)),
         'data'
     )
     reload(setting)
     database.init('sqlite://')
     database.create_db()
     adapter_api.load_adapters(force_reload=True)
     metadata_api.load_metadatas(force_reload=True)
     adapter_api.load_flavors(force_reload=True)
     self.user_object = (
         user_api.get_user_object(
             setting.COMPASS_ADMIN_EMAIL
         )
     )
def delete_clusters():
    clusternames = [
        clustername
        for clustername in flags.OPTIONS.clusternames.split(',')
        if clustername
    ]
    user = user_api.get_user_object(setting.COMPASS_ADMIN_EMAIL)
    list_cluster_args = {}
    if clusternames:
        list_cluster_args['name'] = clusternames
    clusters = cluster_api.list_clusters(
        user, **list_cluster_args
    )
    delete_underlying_host = flags.OPTIONS.delete_hosts
    for cluster in clusters:
        cluster_id = cluster['id']
        hosts = cluster_api.list_cluster_hosts(user, cluster_id)
        host_id_list = [host['id'] for host in hosts]
        logging.info(
            'delete cluster %s and cluster hosts %s',
            cluster_id, host_id_list
        )
        logging.info('delete underlying host? %s', delete_underlying_host)
        if flags.OPTIONS.async:
            celery.send_task(
                'compass.tasks.delete_cluster',
                (
                    setting.COMPASS_ADMIN_EMAIL,
                    cluster_id,
                    host_id_list,
                    delete_underlying_host
                )
            )
        else:
            try:
                delete.delete_cluster(
                    cluster_id,
                    host_id_list,
                    setting.COMPASS_ADMIN_EMAIL,
                    delete_underlying_host
                )
            except Exception as error:
                logging.error('failed to delete cluster %s', cluster)
                logging.exception(error)
Example #37
0
def patch(cluster_id, username=None):
    """Patch cluster.

    :param cluster_id: id of the cluster
    :type cluster_id: int

    .. note::
        The function should be called out of database session.
    """
    with util.lock('serialized_action', timeout=1000) as lock:
        if not lock:
            raise Exception('failed to acquire lock to deploy')

        user = user_db.get_user_object(username)
        cluster_hosts = cluster_db.list_cluster_hosts(cluster_id, user)
        hosts_id_list = [host['id'] for host in cluster_hosts]
        cluster_info = util.ActionHelper.get_cluster_info(cluster_id, user)
        adapter_id = cluster_info[const.ADAPTER_ID]

        adapter_info = util.ActionHelper.get_adapter_info(
            adapter_id, cluster_id, user)
        hosts_info = util.ActionHelper.get_hosts_info(
            cluster_id, hosts_id_list, user)
        patch_successful = True
        try:
            patcher = Patcher(
                adapter_info, cluster_info, hosts_info, cluster_hosts)
            patched_config = patcher.patch()
        except Exception as error:
            logging.exception(error)
            patch_successful = False

        if patch_successful:
            clean_payload = '{"patched_roles": []}'
            clean_payload = json.loads(clean_payload)
            for cluster_host in cluster_hosts:
                cluster_db.update_cluster_host(
                    cluster_id, cluster_host['id'], user, **clean_payload)
                logging.info(
                    "cleaning up patched roles for host id: %s",
                    cluster_host['id']
                )
            logging.info("Patch successful: %s", patched_config)
Example #38
0
def delete_clusters():
    clusternames = [
        clustername
        for clustername in flags.OPTIONS.clusternames.split(',')
        if clustername
    ]
    user = user_api.get_user_object(setting.COMPASS_ADMIN_EMAIL)
    list_cluster_args = {}
    if clusternames:
        list_cluster_args['name'] = clusternames
    clusters = cluster_api.list_clusters(
        user=user, **list_cluster_args
    )
    delete_underlying_host = flags.OPTIONS.delete_hosts
    for cluster in clusters:
        cluster_id = cluster['id']
        cluster_api.del_cluster(
            cluster_id, True, False, delete_underlying_host, user=user
        )
Example #39
0
def patch(cluster_id, username=None):
    """Patch cluster.

    :param cluster_id: id of the cluster
    :type cluster_id: int

    .. note::
        The function should be called out of database session.
    """
    with util.lock('serialized_action', timeout=1000) as lock:
        if not lock:
            raise Exception('failed to acquire lock to deploy')

        user = user_db.get_user_object(username)
        cluster_hosts = cluster_db.list_cluster_hosts(cluster_id, user)
        hosts_id_list = [host['id'] for host in cluster_hosts]
        cluster_info = util.ActionHelper.get_cluster_info(cluster_id, user)
        adapter_id = cluster_info[const.ADAPTER_ID]

        adapter_info = util.ActionHelper.get_adapter_info(
            adapter_id, cluster_id, user)
        hosts_info = util.ActionHelper.get_hosts_info(cluster_id,
                                                      hosts_id_list, user)
        patch_successful = True
        try:
            patcher = Patcher(adapter_info, cluster_info, hosts_info,
                              cluster_hosts)
            patched_config = patcher.patch()
        except Exception as error:
            logging.exception(error)
            patch_successful = False

        if patch_successful:
            clean_payload = '{"patched_roles": []}'
            clean_payload = json.loads(clean_payload)
            for cluster_host in cluster_hosts:
                cluster_db.update_cluster_host(cluster_id, cluster_host['id'],
                                               user, **clean_payload)
                logging.info("cleaning up patched roles for host id: %s",
                             cluster_host['id'])
            logging.info("Patch successful: %s", patched_config)
Example #40
0
def redeploy(cluster_id, username=None):
    """Deploy clusters.

    :param cluster_hosts: clusters and hosts in each cluster to deploy.
    :type cluster_hosts: dict of int or str to list of int or str
    """
    with util.lock('serialized_action') as lock:
        if not lock:
            raise Exception('failed to acquire lock to deploy')

        user = user_db.get_user_object(username)
        cluster_info = util.ActionHelper.get_cluster_info(cluster_id, user)
        adapter_id = cluster_info[const.ADAPTER_ID]

        adapter_info = util.ActionHelper.get_adapter_info(
            adapter_id, cluster_id, user)

        cluster_hosts = cluster_db.list_cluster_hosts(cluster_id, user)
        hosts_id_list = [host['id'] for host in cluster_hosts]

        hosts_info = util.ActionHelper.get_hosts_info(
            cluster_id, hosts_id_list, user)

        deploy_successful = True
        try:
            deploy_manager = DeployManager(
                adapter_info, cluster_info, hosts_info)
            # deploy_manager.prepare_for_deploy()
            deploy_manager.redeploy()
        except Exception as error:
            logging.exception(error)
            deploy_successful = False
        if deploy_successful:
            util.ActionHelper.update_state(
                cluster_id, hosts_id_list, user, state='INSTALLING',
            )
        else:
            util.ActionHelper.update_state(
                cluster_id, hosts_id_list, user, state='ERROR',
                message='failed to start redeployment', severity='ERROR'
            )
Example #41
0
def delete_cluster(
    cluster_id, host_id_list,
    username=None, delete_underlying_host=False
):
    """Delete cluster and all clusterhosts on it.

    :param cluster_id: id of the cluster.
    :type cluster_id: int
    :param host_id_list: list of host id.
    :type host_id_list: list of int.

    If delete_underlying_host is set, all underlying hosts will
    be deleted.

    .. note::
        The function should be called out of database session.
    """
    with util.lock('serialized_action', timeout=100) as lock:
        if not lock:
            raise Exception('failed to acquire lock to delete cluster')

        user = user_db.get_user_object(username)

        cluster_info = util.ActionHelper.get_cluster_info(cluster_id, user)
        adapter_id = cluster_info[const.ADAPTER_ID]

        adapter_info = util.ActionHelper.get_adapter_info(
            adapter_id, cluster_id, user)
        hosts_info = util.ActionHelper.get_hosts_info(
            cluster_id, host_id_list, user)

        deploy_manager = DeployManager(adapter_info, cluster_info, hosts_info)

        deploy_manager.remove_hosts(
            package_only=not delete_underlying_host,
            delete_cluster=True
        )
        util.ActionHelper.delete_cluster(
            cluster_id, host_id_list, user,
            delete_underlying_host
        )
Example #42
0
def pollswitches(switch_ips):
    """poll switch."""
    user = user_api.get_user_object(setting.COMPASS_ADMIN_EMAIL)
    poll_switches = []
    all_switches = dict([
        (switch['ip'], switch['credentials'])
        for switch in switch_api.list_switches(user=user)
    ])
    if switch_ips:
        poll_switches = dict([
            (switch_ip, all_switches[switch_ip])
            for switch_ip in switch_ips
            if switch_ip in all_switches
        ])
    else:
        poll_switches = all_switches

    if flags.OPTIONS.async:
        for switch_ip, switch_credentials in poll_switches.items():
            celery.send_task(
                'compass.tasks.pollswitch',
                (user.email, switch_ip, switch_credentials)
            )

    else:
        try:
            pool = Pool(processes=flags.OPTIONS.thread_pool_size)
            for switch_ip, switch_credentials in poll_switches.items():
                pool.apply_async(
                    poll_switch.poll_switch,
                    (user.email, switch_ip, switch_credentials)
                )
            pool.close()
            pool.join()
        except Exception as error:
            logging.error('failed to poll switches %s',
                          poll_switches)
            logging.exception(error)
Example #43
0
def pollswitches(switch_ips):
    """poll switch."""
    user = user_api.get_user_object(setting.COMPASS_ADMIN_EMAIL)
    poll_switches = []
    all_switches = dict([
        (switch['ip'], switch['credentials'])
        for switch in switch_api.list_switches(user)
    ])
    if switch_ips:
        poll_switches = dict([
            (switch_ip, all_switches[switch_ip])
            for switch_ip in switch_ips
            if switch_ip in all_switches
        ])
    else:
        poll_switches = all_switches

    if flags.OPTIONS.async:
        for switch_ip, switch_credentials in poll_switches.items():
            celery.send_task(
                'compass.tasks.pollswitch',
                (user.email, switch_ip, switch_credentials)
            )

    else:
        try:
            pool = Pool(processes=flags.OPTIONS.thread_pool_size)
            for switch_ip, switch_credentials in poll_switches.items():
                pool.apply_async(
                    poll_switch.poll_switch,
                    (user.email, switch_ip, switch_credentials)
                )
            pool.close()
            pool.join()
        except Exception as error:
            logging.error('failed to poll switches %s',
                          poll_switches)
            logging.exception(error)
Example #44
0
def delete_cluster(cluster_id,
                   host_id_list,
                   username=None,
                   delete_underlying_host=False):
    """Delete cluster and all clusterhosts on it.

    :param cluster_id: id of the cluster.
    :type cluster_id: int
    :param host_id_list: list of host id.
    :type host_id_list: list of int.

    If delete_underlying_host is set, all underlying hosts will
    be deleted.

    .. note::
        The function should be called out of database session.
    """
    with util.lock('serialized_action', timeout=100) as lock:
        if not lock:
            raise Exception('failed to acquire lock to delete cluster')

        user = user_db.get_user_object(username)

        cluster_info = util.ActionHelper.get_cluster_info(cluster_id, user)
        adapter_id = cluster_info[const.ADAPTER_ID]

        adapter_info = util.ActionHelper.get_adapter_info(
            adapter_id, cluster_id, user)
        hosts_info = util.ActionHelper.get_hosts_info(cluster_id, host_id_list,
                                                      user)

        deploy_manager = DeployManager(adapter_info, cluster_info, hosts_info)

        deploy_manager.remove_hosts(package_only=not delete_underlying_host,
                                    delete_cluster=True)
        util.ActionHelper.delete_cluster(cluster_id, host_id_list, user,
                                         delete_underlying_host)
Example #45
0
def delete_host(
    host_id, cluster_id_list, username=None
):
    """Delete host and all clusterhosts on it.

    :param host_id: id of the host.
    :type host_id: int

    .. note::
        The function should be called out of database session.
    """
    with util.lock('serialized_action', timeout=100) as lock:
        if not lock:
            raise Exception('failed to acquire lock to delete host')

        user = user_db.get_user_object(username)
        for cluster_id in cluster_id_list:
            cluster_info = util.ActionHelper.get_cluster_info(
                cluster_id, user)
            adapter_id = cluster_info[const.ADAPTER_ID]

            adapter_info = util.ActionHelper.get_adapter_info(
                adapter_id, cluster_id, user)
            hosts_info = util.ActionHelper.get_hosts_info(
                cluster_id, [host_id], user)

            deploy_manager = DeployManager(
                adapter_info, cluster_info, hosts_info)

            deploy_manager.remove_hosts(
                package_only=True,
                delete_cluster=False
            )

        util.ActionHelper.delete_host(
            host_id, user
        )
Example #46
0
def update_progress():
    """Update status and installing progress of the given cluster.

    :param cluster_hosts: clusters and hosts in each cluster to update.
    :type cluster_hosts: dict of int or str to list of int or str

    .. note::
       The function should be called out of the database session scope.
       In the function, it will update the database cluster_state and
       host_state table for the deploying cluster and hosts.

       The function will also query log_progressing_history table to get
       the lastest installing progress and the position of log it has
       processed in the last run. The function uses these information to
       avoid recalculate the progress from the beginning of the log file.
       After the progress got updated, these information will be stored back
       to the log_progressing_history for next time run.
    """
    with util.lock('log_progressing', timeout=60, blocking=False) as lock:
        if not lock:
            logging.error(
                'failed to acquire lock to calculate installation progress'
            )
            return

        logging.info('update installing progress')

        user = user_api.get_user_object(setting.COMPASS_ADMIN_EMAIL)
        hosts = host_api.list_hosts(user=user)
        host_mapping = {}
        for host in hosts:
            if 'id' not in host:
                logging.error('id is not in host %s', host)
                continue
            host_id = host['id']
            if 'os_name' not in host:
                logging.error('os_name is not in host %s', host)
                continue
            if 'os_installer' not in host:
                logging.error('os_installer is not in host %s', host)
                continue
            host_dirname = setting.HOST_INSTALLATION_LOGDIR_NAME
            if host_dirname not in host:
                logging.error(
                    '%s is not in host %s', host_dirname, host
                )
                continue
            host_state = host_api.get_host_state(host_id, user=user)
            if 'state' not in host_state:
                logging.error('state is not in host state %s', host_state)
                continue
            if host_state['state'] == 'INSTALLING':
                host_log_histories = host_api.get_host_log_histories(
                    host_id, user=user
                )
                host_log_history_mapping = {}
                for host_log_history in host_log_histories:
                    if 'filename' not in host_log_history:
                        logging.error(
                            'filename is not in host log history %s',
                            host_log_history
                        )
                        continue
                    host_log_history_mapping[
                        host_log_history['filename']
                    ] = host_log_history
                host_mapping[host_id] = (
                    host, host_state, host_log_history_mapping
                )
            else:
                logging.info(
                    'ignore host state %s since it is not in installing',
                    host_state
                )
        adapters = adapter_api.list_adapters(user=user)
        adapter_mapping = {}
        for adapter in adapters:
            if 'id' not in adapter:
                logging.error(
                    'id not in adapter %s', adapter
                )
                continue
            if 'package_installer' not in adapter:
                logging.info(
                    'package_installer not in adapter %s', adapter
                )
                continue
            adapter_id = adapter['id']
            adapter_mapping[adapter_id] = adapter
        clusters = cluster_api.list_clusters(user=user)
        cluster_mapping = {}
        for cluster in clusters:
            if 'id' not in cluster:
                logging.error('id not in cluster %s', cluster)
                continue
            cluster_id = cluster['id']
            if 'adapter_id' not in cluster:
                logging.error(
                    'adapter_id not in cluster %s',
                    cluster
                )
                continue
            cluster_state = cluster_api.get_cluster_state(
                cluster_id,
                user=user
            )
            if 'state' not in cluster_state:
                logging.error('state not in cluster state %s', cluster_state)
                continue
            cluster_mapping[cluster_id] = (cluster, cluster_state)
        clusterhosts = cluster_api.list_clusterhosts(user=user)
        clusterhost_mapping = {}
        for clusterhost in clusterhosts:
            if 'clusterhost_id' not in clusterhost:
                logging.error(
                    'clusterhost_id not in clusterhost %s',
                    clusterhost
                )
                continue
            clusterhost_id = clusterhost['clusterhost_id']
            if 'cluster_id' not in clusterhost:
                logging.error(
                    'cluster_id not in clusterhost %s',
                    clusterhost
                )
                continue
            cluster_id = clusterhost['cluster_id']
            if cluster_id not in cluster_mapping:
                logging.info(
                    'ignore clusterhost %s '
                    'since the cluster_id '
                    'is not in cluster_mapping %s',
                    clusterhost, cluster_mapping
                )
                continue
            cluster, _ = cluster_mapping[cluster_id]
            if 'flavor_name' not in cluster:
                logging.error(
                    'flavor_name is not in clusterhost %s related cluster',
                    clusterhost
                )
                continue
            clusterhost_dirname = setting.CLUSTERHOST_INATALLATION_LOGDIR_NAME
            if clusterhost_dirname not in clusterhost:
                logging.error(
                    '%s is not in clusterhost %s',
                    clusterhost_dirname, clusterhost
                )
                continue
            adapter_id = cluster['adapter_id']
            if adapter_id not in adapter_mapping:
                logging.info(
                    'ignore clusterhost %s '
                    'since the adapter_id %s '
                    'is not in adaper_mapping %s',
                    clusterhost, adapter_id, adapter_mapping
                )
                continue
            adapter = adapter_mapping[adapter_id]
            if 'package_installer' not in adapter:
                logging.info(
                    'ignore clusterhost %s '
                    'since the package_installer is not define '
                    'in adapter %s',
                    clusterhost, adapter
                )
                continue
            package_installer = adapter['package_installer']
            clusterhost['package_installer'] = package_installer
            clusterhost['adapter_name'] = adapter['name']
            clusterhost_state = cluster_api.get_clusterhost_self_state(
                clusterhost_id, user=user
            )
            if 'state' not in clusterhost_state:
                logging.error(
                    'state not in clusterhost_state %s',
                    clusterhost_state
                )
                continue
            if clusterhost_state['state'] == 'INSTALLING':
                clusterhost_log_histories = (
                    cluster_api.get_clusterhost_log_histories(
                        clusterhost_id, user=user
                    )
                )
                clusterhost_log_history_mapping = {}
                for clusterhost_log_history in clusterhost_log_histories:
                    if 'filename' not in clusterhost_log_history:
                        logging.error(
                            'filename not in clusterhost_log_history %s',
                            clusterhost_log_history
                        )
                        continue
                    clusterhost_log_history_mapping[
                        clusterhost_log_history['filename']
                    ] = clusterhost_log_history
                clusterhost_mapping[clusterhost_id] = (
                    clusterhost, clusterhost_state,
                    clusterhost_log_history_mapping
                )
            else:
                logging.info(
                    'ignore clusterhost state %s '
                    'since it is not in installing',
                    clusterhost_state
                )

        progress_calculator.update_host_progress(
            host_mapping)
        for host_id, (host, host_state, host_log_history_mapping) in (
            host_mapping.items()
        ):
            host_api.update_host_state(
                host_id, user=user,
                percentage=host_state.get('percentage', 0),
                message=host_state.get('message', ''),
                severity=host_state.get('severity', 'INFO')
            )
            for filename, host_log_history in (
                host_log_history_mapping.items()
            ):
                host_api.add_host_log_history(
                    host_id, filename=filename, user=user,
                    position=host_log_history.get('position', 0),
                    percentage=host_log_history.get('percentage', 0),
                    partial_line=host_log_history.get('partial_line', ''),
                    message=host_log_history.get('message', ''),
                    severity=host_log_history.get('severity', 'INFO'),
                    line_matcher_name=host_log_history.get(
                        'line_matcher_name', 'start'
                    )
                )
        progress_calculator.update_clusterhost_progress(
            clusterhost_mapping)
        for (
            clusterhost_id,
            (clusterhost, clusterhost_state, clusterhost_log_history_mapping)
        ) in (
            clusterhost_mapping.items()
        ):
            cluster_api.update_clusterhost_state(
                clusterhost_id, user=user,
                percentage=clusterhost_state.get('percentage', 0),
                message=clusterhost_state.get('message', ''),
                severity=clusterhost_state.get('severity', 'INFO')
            )
            for filename, clusterhost_log_history in (
                clusterhost_log_history_mapping.items()
            ):
                cluster_api.add_clusterhost_log_history(
                    clusterhost_id, user=user, filename=filename,
                    position=clusterhost_log_history.get('position', 0),
                    percentage=clusterhost_log_history.get('percentage', 0),
                    partial_line=clusterhost_log_history.get(
                        'partial_line', ''),
                    message=clusterhost_log_history.get('message', ''),
                    severity=clusterhost_log_history.get('severity', 'INFO'),
                    line_matcher_name=(
                        clusterhost_log_history.get(
                            'line_matcher_name', 'start'
                        )
                    )
                )
        progress_calculator.update_cluster_progress(
            cluster_mapping)
        for cluster_id, (cluster, cluster_state) in cluster_mapping.items():
            cluster_api.update_cluster_state(
                cluster_id, user=user
            )
Example #47
0
 def test_get_user_object(self):
     user_object = user_api.get_user_object(setting.COMPASS_ADMIN_EMAIL)
     self.assertIsNotNone(user_object)
Example #48
0
def clean_installers():
    os_installers = [
        os_installer for os_installer in flags.OPTIONS.os_installers.split(',')
        if os_installer
    ]
    package_installers = [
        package_installer
        for package_installer in flags.OPTIONS.package_installers.split(',')
        if package_installer
    ]
    user = user_api.get_user_object(setting.COMPASS_ADMIN_EMAIL)
    adapters = adapter_api.list_adapters(user=user)
    filtered_os_installers = {}
    filtered_package_installers = {}
    for adapter in adapters:
        logging.info('got adapter: %s', adapter)
        if 'os_installer' in adapter:
            os_installer = adapter['os_installer']
            os_installer_name = os_installer['alias']
            if not os_installers or os_installer_name in os_installers:
                filtered_os_installers[os_installer_name] = os_installer
            else:
                logging.info('ignore os installer %s', os_installer_name)
        else:
            logging.info('cannot find os installer in adapter %s',
                         adapter['name'])
        if 'package_installer' in adapter:
            package_installer = adapter['package_installer']
            package_installer_name = package_installer['alias']
            if (not package_installers
                    or package_installer_name in package_installers):
                filtered_package_installers[package_installer_name] = (
                    package_installer)
            else:
                logging.info('ignore package installer %s',
                             package_installer_name)
        else:
            logging.info('cannot find package installer in adapter %s',
                         adapter['name'])
    logging.info('clean os installers: %s', filtered_os_installers.keys())
    logging.info('clean package installers: %s',
                 filtered_package_installers.keys())
    if flags.OPTIONS. async:
        for os_installer_name, os_installer in filtered_os_installers.items():
            celery.send_task('compass.tasks.clean_os_installer',
                             (os_installer['name'], os_installer['settings']))
        for package_installer_name, package_installer in (
                filtered_package_installers.items()):
            celery.send_task(
                'compass.tasks.clean_package_installer',
                (package_installer['name'], package_installer['settings']))
    else:
        for os_installer_name, os_installer in (
                filtered_os_installers.items()):
            try:
                clean.clean_os_installer(os_installer['name'],
                                         os_installer['settings'])
            except Exception as error:
                logging.error('failed to clean os installer %s',
                              os_installer_name)
                logging.exception(error)
        for package_installer_name, package_installer in (
                filtered_package_installers.items()):
            try:
                clean.clean_package_installer(package_installer['name'],
                                              package_installer['settings'])
            except Exception as error:
                logging.error('failed to clean package installer %s',
                              package_installer_name)
                logging.exception(error)
Example #49
0
def update_progress():
    """Update status and installing progress of the given cluster.

    :param cluster_hosts: clusters and hosts in each cluster to update.
    :type cluster_hosts: dict of int or str to list of int or str

    .. note::
       The function should be called out of the database session scope.
       In the function, it will update the database cluster_state and
       host_state table for the deploying cluster and hosts.

       The function will also query log_progressing_history table to get
       the lastest installing progress and the position of log it has
       processed in the last run. The function uses these information to
       avoid recalculate the progress from the beginning of the log file.
       After the progress got updated, these information will be stored back
       to the log_progressing_history for next time run.
    """
    with util.lock('log_progressing', timeout=60, blocking=False) as lock:
        if not lock:
            logging.error(
                'failed to acquire lock to calculate installation progress'
            )
            return

        logging.info('update installing progress')

        user = user_api.get_user_object(setting.COMPASS_ADMIN_EMAIL)
        hosts = host_api.list_hosts(user=user)
        host_mapping = {}
        for host in hosts:
            if 'id' not in host:
                logging.error('id is not in host %s', host)
                continue
            host_id = host['id']
            if 'os_name' not in host:
                logging.error('os_name is not in host %s', host)
                continue
            if 'os_installer' not in host:
                logging.error('os_installer is not in host %s', host)
                continue
            host_dirname = setting.HOST_INSTALLATION_LOGDIR_NAME
            if host_dirname not in host:
                logging.error(
                    '%s is not in host %s', host_dirname, host
                )
                continue
            host_state = host_api.get_host_state(host_id, user=user)
            if 'state' not in host_state:
                logging.error('state is not in host state %s', host_state)
                continue
            if host_state['state'] == 'INSTALLING':
                host_log_histories = host_api.get_host_log_histories(
                    host_id, user=user
                )
                host_log_history_mapping = {}
                for host_log_history in host_log_histories:
                    if 'filename' not in host_log_history:
                        logging.error(
                            'filename is not in host log history %s',
                            host_log_history
                        )
                        continue
                    host_log_history_mapping[
                        host_log_history['filename']
                    ] = host_log_history
                host_mapping[host_id] = (
                    host, host_state, host_log_history_mapping
                )
            else:
                logging.info(
                    'ignore host state %s since it is not in installing',
                    host_state
                )
        adapters = adapter_api.list_adapters(user=user)
        adapter_mapping = {}
        for adapter in adapters:
            if 'id' not in adapter:
                logging.error(
                    'id not in adapter %s', adapter
                )
                continue
            if 'package_installer' not in adapter:
                logging.info(
                    'package_installer not in adapter %s', adapter
                )
                continue
            adapter_id = adapter['id']
            adapter_mapping[adapter_id] = adapter
        clusters = cluster_api.list_clusters(user=user)
        cluster_mapping = {}
        for cluster in clusters:
            if 'id' not in cluster:
                logging.error('id not in cluster %s', cluster)
                continue
            cluster_id = cluster['id']
            if 'adapter_id' not in cluster:
                logging.error(
                    'adapter_id not in cluster %s',
                    cluster
                )
                continue
            cluster_state = cluster_api.get_cluster_state(
                cluster_id,
                user=user
            )
            if 'state' not in cluster_state:
                logging.error('state not in cluster state %s', cluster_state)
                continue
            cluster_mapping[cluster_id] = (cluster, cluster_state)
        clusterhosts = cluster_api.list_clusterhosts(user=user)
        clusterhost_mapping = {}
        for clusterhost in clusterhosts:
            if 'clusterhost_id' not in clusterhost:
                logging.error(
                    'clusterhost_id not in clusterhost %s',
                    clusterhost
                )
                continue
            clusterhost_id = clusterhost['clusterhost_id']
            if 'distributed_system_name' not in clusterhost:
                logging.error(
                    'distributed_system_name is not in clusterhost %s',
                    clusterhost
                )
                continue
            clusterhost_dirname = setting.CLUSTERHOST_INATALLATION_LOGDIR_NAME
            if clusterhost_dirname not in clusterhost:
                logging.error(
                    '%s is not in clusterhost %s',
                    clusterhost_dirname, clusterhost
                )
                continue
            if 'cluster_id' not in clusterhost:
                logging.error(
                    'cluster_id not in clusterhost %s',
                    clusterhost
                )
                continue
            cluster_id = clusterhost['cluster_id']
            if cluster_id not in cluster_mapping:
                logging.info(
                    'ignore clusterhost %s '
                    'since the cluster_id '
                    'is not in cluster_mapping %s',
                    clusterhost, cluster_mapping
                )
                continue
            cluster, _ = cluster_mapping[cluster_id]
            adapter_id = cluster['adapter_id']
            if adapter_id not in adapter_mapping:
                logging.info(
                    'ignore clusterhost %s '
                    'since the adapter_id %s '
                    'is not in adaper_mapping %s',
                    clusterhost, adapter_id, adapter_mapping
                )
                continue
            adapter = adapter_mapping[adapter_id]
            if 'package_installer' not in adapter:
                logging.info(
                    'ignore clusterhost %s '
                    'since the package_installer is not define '
                    'in adapter %s',
                    clusterhost, adapter
                )
                continue
            package_installer = adapter['package_installer']
            clusterhost['package_installer'] = package_installer
            clusterhost_state = cluster_api.get_clusterhost_self_state(
                clusterhost_id, user=user
            )
            if 'state' not in clusterhost_state:
                logging.error(
                    'state not in clusterhost_state %s',
                    clusterhost_state
                )
                continue
            if clusterhost_state['state'] == 'INSTALLING':
                clusterhost_log_histories = (
                    cluster_api.get_clusterhost_log_histories(
                        clusterhost_id, user=user
                    )
                )
                clusterhost_log_history_mapping = {}
                for clusterhost_log_history in clusterhost_log_histories:
                    if 'filename' not in clusterhost_log_history:
                        logging.error(
                            'filename not in clusterhost_log_history %s',
                            clusterhost_log_history
                        )
                        continue
                    clusterhost_log_history_mapping[
                        clusterhost_log_history['filename']
                    ] = clusterhost_log_history
                clusterhost_mapping[clusterhost_id] = (
                    clusterhost, clusterhost_state,
                    clusterhost_log_history_mapping
                )
            else:
                logging.info(
                    'ignore clusterhost state %s '
                    'since it is not in installing',
                    clusterhost_state
                )

        progress_calculator.update_host_progress(
            host_mapping)
        for host_id, (host, host_state, host_log_history_mapping) in (
            host_mapping.items()
        ):
            host_api.update_host_state(
                host_id, user=user,
                percentage=host_state.get('percentage', 0),
                message=host_state.get('message', ''),
                severity=host_state.get('severity', 'INFO')
            )
            for filename, host_log_history in (
                host_log_history_mapping.items()
            ):
                host_api.add_host_log_history(
                    host_id, filename=filename, user=user,
                    position=host_log_history.get('position', 0),
                    percentage=host_log_history.get('percentage', 0),
                    partial_line=host_log_history.get('partial_line', ''),
                    message=host_log_history.get('message', ''),
                    severity=host_log_history.get('severity', 'INFO'),
                    line_matcher_name=host_log_history.get(
                        'line_matcher_name', 'start'
                    )
                )
        progress_calculator.update_clusterhost_progress(
            clusterhost_mapping)
        for (
            clusterhost_id,
            (clusterhost, clusterhost_state, clusterhost_log_history_mapping)
        ) in (
            clusterhost_mapping.items()
        ):
            cluster_api.update_clusterhost_state(
                clusterhost_id, user=user,
                percentage=clusterhost_state.get('percentage', 0),
                message=clusterhost_state.get('message', ''),
                severity=clusterhost_state.get('severity', 'INFO')
            )
            for filename, clusterhost_log_history in (
                clusterhost_log_history_mapping.items()
            ):
                cluster_api.add_clusterhost_log_history(
                    clusterhost_id, user=user, filename=filename,
                    position=clusterhost_log_history.get('position', 0),
                    percentage=clusterhost_log_history.get('percentage', 0),
                    partial_line=clusterhost_log_history.get(
                        'partial_line', ''),
                    message=clusterhost_log_history.get('message', ''),
                    severity=clusterhost_log_history.get('severity', 'INFO'),
                    line_matcher_name=(
                        clusterhost_log_history.get(
                            'line_matcher_name', 'start'
                        )
                    )
                )
        progress_calculator.update_cluster_progress(
            cluster_mapping)
        for cluster_id, (cluster, cluster_state) in cluster_mapping.items():
            cluster_api.update_cluster_state(
                cluster_id, user=user
            )
Example #50
0
    def setUp(self):
        super(HostTestCase, self).setUp()
        os.environ['COMPASS_IGNORE_SETTING'] = 'true'
        os.environ['COMPASS_CONFIG_DIR'] = os.path.join(
            os.path.dirname(os.path.abspath(__file__)),
            'data'
        )
        reload(setting)
        database.init('sqlite://')
        database.create_db()
        adapter.load_adapters(force_reload=True)
        metadata.load_metadatas(force_reload=True)
        adapter.load_flavors(force_reload=True)

        self.user_object = (
            user_api.get_user_object(
                setting.COMPASS_ADMIN_EMAIL
            )
        )
        # get adapter information
        list_adapters = adapter.list_adapters(user=self.user_object)
        for list_adapter in list_adapters:
            for supported_os in list_adapter['supported_oses']:
                self.os_id = supported_os['os_id']
                break
            if list_adapter['flavors']:
                details = list_adapter['flavors']
                for detail in details:
                    if detail['display_name'] == 'allinone':
                        roles = detail['roles']
                        for role in roles:
                            self.adapter_id = role['adapter_id']
                            self.flavor_id = role['flavor_id']
                            break

        # add cluster
        cluster_names = ['test_cluster1', 'test_cluster2']
        for cluster_name in cluster_names:
            cluster.add_cluster(
                user=self.user_object,
                adapter_id=self.adapter_id,
                os_id=self.os_id,
                flavor_id=self.flavor_id,
                name=cluster_name
            )
        clusters = cluster.list_clusters(user=self.user_object)
        self.roles = None
        for list_cluster in clusters:
            for item in list_cluster['flavor']['roles']:
                self.roles = item
            if list_cluster['name'] == 'test_cluster1':
                self.cluster_id = list_cluster['id']
                break
        # add switch
        switch.add_switch(
            user=self.user_object,
            ip='172.29.8.40'
        )
        switches = switch.list_switches(user=self.user_object)
        self.switch_id = None
        for item in switches:
            self.switch_id = item['id']
        macs = ['28:6e:d4:46:c4:25', '00:0c:29:bf:eb:1d']
        for mac in macs:
            switch.add_switch_machine(
                self.switch_id,
                user=self.user_object,
                mac=mac,
                port='1'
            )
        # get machine information
        machines = machine.list_machines(user=self.user_object)
        self.machine_ids = []
        for item in machines:
            self.machine_ids.append(item['id'])
        # add cluster host
        name = ['newname1', 'newname2']
        for i in range(0, 2):
            cluster.add_cluster_host(
                self.cluster_id,
                user=self.user_object,
                machine_id=self.machine_ids[i],
                name=name[i]
            )
        self.host_ids = []
        clusterhosts = cluster.list_clusterhosts(user=self.user_object)
        for clusterhost in clusterhosts:
            self.host_ids.append(clusterhost['host_id'])
        # add subnet
        subnets = ['10.145.88.0/23', '192.168.100.0/23']
        for subnet in subnets:
            network.add_subnet(
                user=self.user_object,
                subnet=subnet
            )
        list_subnet = network.list_subnets(
            user=self.user_object
        )
        self.subnet_ids = []
        for item in list_subnet:
            self.subnet_ids.append(item['id'])
        # add host network
        host.add_host_network(
            self.host_ids[0],
            user=self.user_object,
            interface='eth0',
            ip='10.145.88.0',
            subnet_id=self.subnet_ids[0],
            is_mgmt=True
        )
        host.add_host_network(
            self.host_ids[1],
            user=self.user_object,
            interface='eth1',
            ip='192.168.100.0',
            subnet_id=self.subnet_ids[1],
            is_promiscuous=True
        )
        # add log history
        filenames = ['log1', 'log2']
        for filename in filenames:
            host.add_host_log_history(
                self.host_ids[0],
                user=self.user_object,
                filename=filename
            )

        self.os_configs = {
            'general': {
                'language': 'EN',
                'timezone': 'UTC',
                'http_proxy': 'http://127.0.0.1:3128',
                'https_proxy': 'http://127.0.0.1:3128',
                'no_proxy': [
                    '127.0.0.1',
                    'compass'
                ],
                'ntp_server': '127.0.0.1',
                'dns_servers': [
                    '127.0.0.1'
                ],
                'domain': 'ods.com',
                'search_path': [
                    'ods.com'
                ],
                'default_gateway': '127.0.0.1',
            },
            'server_credentials': {
                'username': '******',
                'password': '******',
            },
            'partition': {
                '/var': {
                    'max_size': '100G',
                    'percentage': 10,
                    'size': '1G'
                }
            }
        }
        self.package_configs = {
            'security': {
                'service_credentials': {
                    '$service': {
                        'username': '******',
                        'password': '******'
                    }
                },
                'console_credentials': {
                    '$console': {
                        'username': '******',
                        'password': '******'
                    }
                }
            },
            'network_mapping': {
                '$interface_type': 'eth0'
            }
        }