Exemplo n.º 1
0
def remove_registry(req, hosts_id_list, host_ip, log_file):
    LOG.info(_("begin to remove docker registry on host %s" % host_ip))
    check_docker_container_cmd = \
        "ssh -o StrictHostKeyChecking=no %s \
        docker ps |grep registry:2 |awk -F ' ' '{print $2}'"                                                             % (host_ip)
    docker_container_result = \
        subprocess.check_output(check_docker_container_cmd,
                                shell=True,
                                stderr=subprocess.STDOUT)

    stop_docker_container_cmd = \
        'ssh -o StrictHostKeyChecking=no %s \
        "docker stop registry"'                                % (host_ip)
    remove_docker_container_cmd = \
        'ssh -o StrictHostKeyChecking=no %s \
        "docker rm registry"'                              % (host_ip)
    remove_docker_images_cmd = \
        'ssh -o StrictHostKeyChecking=no %s \
        "docker rmi -f registry:2"'                                    % (host_ip)

    if "registry:2" in docker_container_result:
        daisy_cmn.subprocess_call(stop_docker_container_cmd, log_file)
        daisy_cmn.subprocess_call(remove_docker_container_cmd, log_file)

    LOG.info(_("remove docker images on host %s successfully!" % host_ip))
Exemplo n.º 2
0
def remove_registry(req, hosts_id_list, host_ip, log_file):
    LOG.info(_("begin to remove docker registry on host %s" % host_ip))
    check_docker_container_cmd = \
        "ssh -o StrictHostKeyChecking=no %s \
        docker ps |grep registry:2 |awk -F ' ' '{print $2}'" % (host_ip)
    docker_container_result = \
        subprocess.check_output(check_docker_container_cmd,
                                shell=True,
                                stderr=subprocess.STDOUT)

    stop_docker_container_cmd = \
        'ssh -o StrictHostKeyChecking=no %s \
        "docker stop registry"' % (host_ip)
    remove_docker_container_cmd = \
        'ssh -o StrictHostKeyChecking=no %s \
        "docker rm registry"' % (host_ip)
    remove_docker_images_cmd = \
        'ssh -o StrictHostKeyChecking=no %s \
        "docker rmi -f registry:2"' % (host_ip)

    if "registry:2" in docker_container_result:
        daisy_cmn.subprocess_call(stop_docker_container_cmd, log_file)
        daisy_cmn.subprocess_call(remove_docker_container_cmd, log_file)

    LOG.info(_("remove docker images on host %s successfully!" % host_ip))
Exemplo n.º 3
0
 def getnodeinfo_ip(self, daisy_management_ip):
     cmd = 'dhcp_linenumber=`grep -n "dhcp_ip="' \
           ' /var/lib/daisy/kolla/getnodeinfo.sh|cut -d ":" -f 1` && ' \
           'sed -i "${dhcp_linenumber}c dhcp_ip=\'%s\'" ' \
           '/var/lib/daisy/kolla/getnodeinfo.sh' \
           % (daisy_management_ip,)
     daisy_cmn.subprocess_call(cmd)
Exemplo n.º 4
0
def config_compute_multipath(all_nodes_ip):
    error_msg = ""
    for host_ip in all_nodes_ip:
        password = "******"
        cmd = '/var/lib/daisy/tecs/trustme.sh %s %s' % (host_ip, password)
        daisy_cmn.subprocess_call(cmd)
        cmd = 'clush -S -w %s "mkdir -p /home/tecs_install"' % (host_ip,)
        daisy_cmn.subprocess_call(cmd)
        try:
            scp_bin_result = subprocess.check_output(
                'scp -o StrictHostKeyChecking=no -r /var/lib/daisy/tecs/storage_auto_config %s:/home/tecs_install' % (host_ip,),
                shell=True, stderr=subprocess.STDOUT)
        except subprocess.CalledProcessError as e:
            error_msg = "scp /var/lib/daisy/tecs/storage_auto_config to %s failed!" % host_ip
            return error_msg
        try:
            LOG.info(_("Config multipath for host %s" % host_ip))
            cmd = 'cd /home/tecs_install/storage_auto_config/; python storage_auto_config.py check_multipath'
            exc_result = subprocess.check_output('clush -S -w %s "%s"' % (host_ip,cmd),
                                                  shell=True, stderr=subprocess.STDOUT)
        except subprocess.CalledProcessError as e:
            LOG.info(_("Storage script error message: %s" % e.output))
            error_msg = "config Disk Array multipath on %s failed!" % host_ip
            return error_msg
    return error_msg
Exemplo n.º 5
0
def config_ceph_for_cinder(config_data, disk):
    ceph_config = {'enable_cinder': 'yes', 'enable_ceph': 'yes'}
    update_kolla_globals_yml(ceph_config)
    disk_name = disk.get('partition', None)
    storage_ip_list = config_data.get('Storage_ips')
    if len(storage_ip_list) > 2:
        LOG.info(
            _("this is CEPH backend environment \
                    with %s nodes" % len(storage_ip_list)))
        for storage_ip in storage_ip_list:
            fp = '/var/log/daisy/api.log'
            cmd = 'ssh -o StrictHostKeyChecking=no %s \
                  "parted %s -s -- mklabel gpt mkpart\
                   KOLLA_CEPH_OSD_BOOTSTRAP 1 -1" '                                                    % \
                  (storage_ip, disk_name)
            daisy_cmn.subprocess_call(cmd, fp)
            exc_result = subprocess.check_output(
                'ssh -o StrictHostKeyChecking=no %s \
                "parted %s print" ' % (storage_ip, disk_name),
                shell=True,
                stderr=subprocess.STDOUT)
            LOG.info(_("parted label is %s" % exc_result))
            LOG.info(
                _("execute labeled command successfully\
                        on %s node" % storage_ip))
Exemplo n.º 6
0
    def _run(self):
        host_ip = self.host['mgtip']

        cmd = 'ssh -o StrictHostKeyChecking=no %s \
              "docker ps"' % host_ip
        docker_result = subprocess.check_output(cmd,
                                                shell=True,
                                                stderr=subprocess.STDOUT)
        if 'registry' in docker_result:

            # stop registry server
            cmd = 'ssh -o StrictHostKeyChecking=no %s \
                  "docker stop registry"' % host_ip
            subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)

            cmd = 'ssh -o StrictHostKeyChecking=no %s \
                  "docker rm -f registry"' % host_ip
            subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)

        cmd = 'ssh -o StrictHostKeyChecking=no %s \
              "if [ ! -d %s ];then mkdir -p %s;fi" '                                                     % \
              (host_ip, daisy_kolla_ver_path, daisy_kolla_ver_path)
        subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)

        # receive image from daisy server
        cmd = 'ssh -o StrictHostKeyChecking=no %s \
               "jasminec %s %s > %s"' % (host_ip, host_ip, _get_local_ip(),
                                         self.kolla_version_pkg_file)
        subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)

        # clean up the old version files
        cmd = 'ssh -o StrictHostKeyChecking=no %s \
               "rm -rf %s/tmp"' % (host_ip, daisy_kolla_ver_path)

        daisy_cmn.subprocess_call(cmd)

        # install the new version files
        cmd = 'ssh -o StrictHostKeyChecking=no %s \
               "cd %s && tar mzxf %s"' % (host_ip, daisy_kolla_ver_path,
                                          self.kolla_version_pkg_file)

        subprocess.call(cmd, shell=True)

        registry_file = daisy_kolla_ver_path + "/tmp/registry"

        # start registry server again
        cmd = 'ssh -o StrictHostKeyChecking=no %s \
               "docker run -d -p 4000:5000 --restart=always \
               -e REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/tmp/registry \
               -v %s:/tmp/registry  --name registry registry:2"'\
                % (host_ip, registry_file)

        subprocess.call(cmd, shell=True)
Exemplo n.º 7
0
def check_and_get_proton_version(daisy_proton_path):
    proton_version_pkg_name = ""
    get_proton_version_pkg = "ls %s| grep ^ZXDTC-PROTON.*\.bin$" \
                             % daisy_proton_path
    obj = subprocess.Popen(
        get_proton_version_pkg, shell=True, stdout=subprocess.PIPE,
        stderr=subprocess.PIPE)
    (stdoutput, erroutput) = obj.communicate()
    if stdoutput:
        proton_version_pkg_name = stdoutput.split('\n')[0]
        proton_version_pkg_file = daisy_proton_path + proton_version_pkg_name
        chmod_for_proton_version = 'chmod +x %s' % proton_version_pkg_file
        daisy_cmn.subprocess_call(chmod_for_proton_version)
    return proton_version_pkg_name
Exemplo n.º 8
0
    def export_db(self, req, cluster_id):
        """
        Export daisy db data to tecs.conf and HA.conf.

        :param req: The WSGI/Webob Request object

        :raises HTTPBadRequest if x-install-cluster is missing
        """

        (tecs_config, mgnt_ip_list) =\
            instl.get_cluster_tecs_config(req, cluster_id)

        config_files = {'tecs_conf':'','ha_conf':''}
        tecs_install_path = "/home/tecs_install"
        tecs_config_file = ''
        if tecs_config:
            cluster_conf_path = tecs_install_path + "/" + cluster_id
            create_cluster_conf_path =\
                "rm -rf %s;mkdir %s" %(cluster_conf_path, cluster_conf_path)
            daisy_cmn.subprocess_call(create_cluster_conf_path)
            config.update_tecs_config(tecs_config, cluster_conf_path)

            get_tecs_conf = "ls %s|grep tecs.conf" % cluster_conf_path
            obj = subprocess.Popen(get_tecs_conf,
                                   shell=True,
                                   stdout=subprocess.PIPE,
                                   stderr=subprocess.PIPE)
            (stdoutput, erroutput) = obj.communicate()
            tecs_conf_file = ""
            if stdoutput:
                tecs_conf_file = stdoutput.split('\n')[0]
                config_files['tecs_conf'] =\
                    cluster_conf_path + "/" + tecs_conf_file

            get_ha_conf_cmd = "ls %s|grep HA_1.conf" % cluster_conf_path
            obj = subprocess.Popen(get_ha_conf_cmd,
                                   shell=True,
                                   stdout=subprocess.PIPE,
                                   stderr=subprocess.PIPE)
            (stdoutput, erroutput) = obj.communicate()
            ha_conf_file = ""
            if stdoutput:
                ha_conf_file = stdoutput.split('\n')[0]
                config_files['ha_conf'] =\
                    cluster_conf_path + "/" + ha_conf_file
        else:
            LOG.info(_("No TECS config files generated."))

        return config_files
Exemplo n.º 9
0
def check_and_get_zenic_version(daisy_zenic_pkg_path):
    zenic_version_pkg_file = ""
    zenic_version_pkg_name = ""
    get_zenic_version_pkg = "ls %s| grep ^ZENIC.*\.zip$" % daisy_zenic_pkg_path
    obj = subprocess.Popen(get_zenic_version_pkg,
                        shell=True,
                        stdout=subprocess.PIPE,
                        stderr=subprocess.PIPE)
    (stdoutput, erroutput) = obj.communicate()
    if stdoutput:
        zenic_version_pkg_name = stdoutput.split('\n')[0]
        zenic_version_pkg_file = daisy_zenic_pkg_path + zenic_version_pkg_name
        chmod_for_zenic_version = 'chmod +x %s' % zenic_version_pkg_file
        daisy_cmn.subprocess_call(chmod_for_zenic_version)
    return (zenic_version_pkg_file,zenic_version_pkg_name)
Exemplo n.º 10
0
def check_and_get_kolla_version(daisy_kolla_pkg_path, file_name=None):
    kolla_version_pkg_file = ""
    if file_name:
        get_kolla_version_pkg = "ls %s| grep %s$" % (daisy_kolla_pkg_path,
                                                     file_name)
    else:
        get_kolla_version_pkg = "ls %s| grep ^kolla.*\.tgz$"\
                                % daisy_kolla_pkg_path
    obj = subprocess.Popen(get_kolla_version_pkg,
                           shell=True,
                           stdout=subprocess.PIPE,
                           stderr=subprocess.PIPE)
    (stdoutput, erroutput) = obj.communicate()
    if stdoutput:
        kolla_version_pkg_name = stdoutput.split('\n')[0]
        kolla_version_pkg_file = daisy_kolla_pkg_path + kolla_version_pkg_name
        chmod_for_kolla_version = 'chmod +x %s' % kolla_version_pkg_file
        daisy_cmn.subprocess_call(chmod_for_kolla_version)
    return kolla_version_pkg_file
Exemplo n.º 11
0
def check_and_get_kolla_version(daisy_kolla_pkg_path, file_name=None):
    kolla_version_pkg_file = ""
    if file_name:
        get_kolla_version_pkg = "ls %s| grep %s$" % (daisy_kolla_pkg_path,
                                                     file_name)
    else:
        get_kolla_version_pkg = "ls %s| grep ^kolla.*\.tgz$"\
                                % daisy_kolla_pkg_path
    obj = subprocess.Popen(get_kolla_version_pkg,
                           shell=True,
                           stdout=subprocess.PIPE,
                           stderr=subprocess.PIPE)
    (stdoutput, erroutput) = obj.communicate()
    if stdoutput:
        kolla_version_pkg_name = stdoutput.split('\n')[0]
        kolla_version_pkg_file = daisy_kolla_pkg_path + kolla_version_pkg_name
        chmod_for_kolla_version = 'chmod +x %s' % kolla_version_pkg_file
        daisy_cmn.subprocess_call(chmod_for_kolla_version)
    return kolla_version_pkg_file
Exemplo n.º 12
0
def check_tfg_exist():
    get_tfg_patch = "ls %s|grep CGSL_VPLAT-.*\.iso$" % daisy_tecs_path
    obj = subprocess.Popen(get_tfg_patch,
                           shell=True,
                           stdout=subprocess.PIPE,
                           stderr=subprocess.PIPE)
    (stdoutput, erroutput) = obj.communicate()
    tfg_patch_pkg_file = ""
    tfg_patch_pkg_name = ""
    if stdoutput:
        tfg_patch_pkg_name = stdoutput.split('\n')[0]
        tfg_patch_pkg_file = daisy_tecs_path + tfg_patch_pkg_name
        chmod_for_tfg_bin = 'chmod +x %s' % tfg_patch_pkg_file
        daisy_cmn.subprocess_call(chmod_for_tfg_bin)
        
    if not stdoutput or not tfg_patch_pkg_name:
        LOG.info(_("no CGSL_VPLAT iso file got in %s" % daisy_tecs_path)) 
        return ""
    return tfg_patch_pkg_file
Exemplo n.º 13
0
def thread_bin(req, host, role_id_list,uninstall_progress_percentage):
    host_ip = host['mgtip']
    password = host['rootpwd']
    cmd = 'mkdir -p /var/log/daisy/daisy_uninstall/'
    daisy_cmn.subprocess_call(cmd)
    var_log_path = "/var/log/daisy/daisy_uninstall/%s_uninstall_zenic.log" % host_ip
    with open(var_log_path, "w+") as fp:
        cmd = '/var/lib/daisy/zenic/trustme.sh %s %s' % (host_ip, password)
        daisy_cmn.subprocess_call(cmd,fp)
        
        try:
            exc_result = subprocess.check_output(
                'clush -S -b -w %s  /home/zenic/node_stop.sh' % (host_ip,),
                shell=True, stderr=subprocess.STDOUT)
        except subprocess.CalledProcessError as e:
            update_progress_to_db(req, role_id_list, zenic_state['UNINSTALL_FAILED'])
            fp.write(e.output.strip())            
        else:
            update_progress_to_db(req, role_id_list, zenic_state['UNINSTALLING'], uninstall_progress_percentage)
            fp.write(exc_result)
Exemplo n.º 14
0
def _thread_bin(req, host_ip, role_id_list,hosts_list):
    # uninstall network-configuration-1.1.1-15.x86_64.rpm
    update_progress_to_db(req,role_id_list,tecs_state['UNINSTALLING'],hosts_list,host_ip)
    tecs_cmn.TecsShellExector(host_ip, 'uninstall_rpm')

    cmd = 'mkdir -p /var/log/daisy/daisy_uninstall/'
    daisy_cmn.subprocess_call(cmd)
    password = "******"   
    var_log_path = "/var/log/daisy/daisy_uninstall/%s_uninstall_tecs.log" % host_ip
    with open(var_log_path, "w+") as fp:
        cmd = '/var/lib/daisy/tecs/trustme.sh %s %s' % (host_ip, password)
        daisy_cmn.subprocess_call(cmd,fp)
        cmd = 'clush -S -b -w %s "rm -rf /home/daisy_uninstall"' % (host_ip,)
        daisy_cmn.subprocess_call(cmd,fp)
        cmd = 'clush -S -w %s "mkdir -p /home/daisy_uninstall"' % (host_ip,)
        daisy_cmn.subprocess_call(cmd,fp)
        
        try:
            scp_bin_result = subprocess.check_output(
                'clush -S -w %s -c /var/lib/daisy/tecs/ZXTECS*.bin --dest=/home/daisy_uninstall' % (host_ip,),
                shell=True, stderr=subprocess.STDOUT)
        except subprocess.CalledProcessError as e:
            update_progress_to_db(req, role_id_list, tecs_state['UNINSTALL_FAILED'],hosts_list,host_ip)
            LOG.error(_("scp TECS bin for %s failed!" % host_ip))
            fp.write(e.output.strip())
        
        cmd = 'clush -S -w %s "chmod 777 /home/daisy_uninstall/*"' % (host_ip,)
        daisy_cmn.subprocess_call(cmd,fp)
        
        try:
            exc_result = subprocess.check_output(
                'clush -S -w %s /home/daisy_uninstall/ZXTECS*.bin clean' % (host_ip,),
                shell=True, stderr=subprocess.STDOUT)
        except subprocess.CalledProcessError as e:
            update_progress_to_db(req, role_id_list, tecs_state['UNINSTALL_FAILED'],hosts_list,host_ip)
            LOG.error(_("Uninstall TECS for %s failed!" % host_ip))
            fp.write(e.output.strip())
        else:
            update_progress_to_db(req, role_id_list, tecs_state['ACTIVE'], hosts_list,host_ip)
            LOG.info(_("Uninstall TECS for %s successfully!" % host_ip))
            fp.write(exc_result)
Exemplo n.º 15
0
def update_password_yml():
    LOG.info(_("begin to update kolla's passwd.yml file..."))
    cmd = 'python '\
          '/home/kolla_install/kolla-ansible/tools/generate_passwords.py'
    fp = '/var/log/daisy/api.log'
    daisy_cmn.subprocess_call(cmd, fp)
    # generate the password of horizon
    keystone_admin_password = ['keystone_admin_password']
    opendaylight_password = ['opendaylight_password']
    with open('/etc/kolla/passwords.yml', 'r') as f:
        passwords = yaml.load(f.read())
    for k, v in passwords.items():
        if k in keystone_admin_password:
            passwords[k] = "keystone"
        elif k in opendaylight_password:
            passwords[k] = 'admin'
    f.close()
    with open('/etc/kolla/passwords.yml', 'w') as f:
        f.write(yaml.dump(passwords, default_flow_style=False))
        f.close()
    LOG.info(_("generate kolla's passwd.yml file ok..."))
Exemplo n.º 16
0
def update_password_yml():
    LOG.info(_("begin to update kolla's passwd.yml file..."))
    cmd = 'python '\
          '/home/kolla_install/kolla-ansible/tools/generate_passwords.py'
    fp = '/var/log/daisy/api.log'
    daisy_cmn.subprocess_call(cmd, fp)
    # generate the password of horizon
    keystone_admin_password = ['keystone_admin_password']
    opendaylight_password = ['opendaylight_password']
    with open('/etc/kolla/passwords.yml', 'r') as f:
        passwords = yaml.load(f.read())
    for k, v in passwords.items():
        if k in keystone_admin_password:
            passwords[k] = "keystone"
        elif k in opendaylight_password:
            passwords[k] = 'admin'
    f.close()
    with open('/etc/kolla/passwords.yml', 'w') as f:
        f.write(yaml.dump(passwords, default_flow_style=False))
        f.close()
    LOG.info(_("generate kolla's passwd.yml file ok..."))
Exemplo n.º 17
0
def config_ha_cinder_volume(volume_disk_info, controller_ha_ips):
    error_msg = ""
    cmd = 'rm -rf /var/lib/daisy/tecs/storage_auto_config/base/*.json'
    daisy_cmn.subprocess_call(cmd)
    with open("/var/lib/daisy/tecs/storage_auto_config/base/cinder.json", "w") as fp:
        json.dump(volume_disk_info, fp, indent=2)
    for host_ip in controller_ha_ips:
        password = "******"
        cmd = '/var/lib/daisy/tecs/trustme.sh %s %s' % (host_ip, password)
        daisy_cmn.subprocess_call(cmd)
        cmd = 'clush -S -w %s "mkdir -p /home/tecs_install"' % (host_ip,)
        daisy_cmn.subprocess_call(cmd)
        try:
            scp_bin_result = subprocess.check_output(
                'scp -o StrictHostKeyChecking=no -r /var/lib/daisy/tecs/storage_auto_config %s:/home/tecs_install' % (host_ip,),
                shell=True, stderr=subprocess.STDOUT)
        except subprocess.CalledProcessError as e:
            error_msg = "scp /var/lib/daisy/tecs/storage_auto_config to %s failed!" % host_ip
            return error_msg
        try:
            LOG.info(_("Config cinder volume for host %s" % host_ip))
            cmd = 'cd /home/tecs_install/storage_auto_config/; python storage_auto_config.py cinder_conf %s' % host_ip
            exc_result = subprocess.check_output('clush -S -w %s "%s"' % (host_ip,cmd),
                                                  shell=True, stderr=subprocess.STDOUT)
        except subprocess.CalledProcessError as e:
            LOG.info(_("Storage script error message: %s" % e.output))
            error_msg = "config Disk Array cinder volumes on %s failed!" % host_ip
            return error_msg
    return error_msg
Exemplo n.º 18
0
def config_lvm_for_cinder(config_data):
    lvm_config = {
        'enable_cinder': 'yes',
        'enable_cinder_backend_lvm': 'yes',
        'cinder_volume_group': 'cinder-volumes'
    }
    update_kolla_globals_yml(lvm_config)
    storage_ip_list = config_data.get('Storage_ips')
    if len(storage_ip_list) == 1:
        LOG.info(
            _("this is all in one environment \
                    to enable ceph backend"))
        storage_ip = storage_ip_list[0]
        fp = '/var/log/daisy/api.log'
        cmd = 'ssh -o StrictHostKeyChecking=no %s \
              "dd if=/dev/zero of=/var/lib/cinder_data.img\
               bs=1G count=20" '                                 % \
              (storage_ip)
        daisy_cmn.subprocess_call(cmd, fp)
        cmd = 'ssh -o StrictHostKeyChecking=no %s \
              "losetup --find --show /var/lib/cinder_data.img"'                                                                % \
              (storage_ip)
        obj = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
        dev_name = obj.stdout.read().decode('utf8')
        cmd = 'ssh -o StrictHostKeyChecking=no %s \
              "pvcreate %s" '                              % \
              (storage_ip, dev_name)
        daisy_cmn.subprocess_call(cmd, fp)
        cmd = 'ssh -o StrictHostKeyChecking=no %s \
              "vgcreate cinder-volumes %s" '                                             % \
              (storage_ip, dev_name)
        daisy_cmn.subprocess_call(cmd, fp)
        LOG.info(
            _("execute all four commands on \
                    storage node %s ok!" % storage_ip))
Exemplo n.º 19
0
def config_lvm_for_cinder(config_data):
    lvm_config = {'enable_cinder': 'yes',
                  'enable_cinder_backend_lvm': 'yes',
                  'cinder_volume_group': 'cinder-volumes'}
    update_kolla_globals_yml(lvm_config)
    storage_ip_list = config_data.get('Storage_ips')
    if len(storage_ip_list) == 1:
        LOG.info(_("this is all in one environment \
                    to enable ceph backend"))
        storage_ip = storage_ip_list[0]
        fp = '/var/log/daisy/api.log'
        cmd = 'ssh -o StrictHostKeyChecking=no %s \
              "dd if=/dev/zero of=/var/lib/cinder_data.img\
               bs=1G count=20" ' % \
              (storage_ip)
        daisy_cmn.subprocess_call(cmd, fp)
        cmd = 'ssh -o StrictHostKeyChecking=no %s \
              "losetup --find --show /var/lib/cinder_data.img"' % \
              (storage_ip)
        obj = subprocess.Popen(cmd,
                               stdout=subprocess.PIPE,
                               shell=True)
        dev_name = obj.stdout.read().decode('utf8')
        cmd = 'ssh -o StrictHostKeyChecking=no %s \
              "pvcreate %s" ' % \
              (storage_ip, dev_name)
        daisy_cmn.subprocess_call(cmd, fp)
        cmd = 'ssh -o StrictHostKeyChecking=no %s \
              "vgcreate cinder-volumes %s" ' % \
              (storage_ip, dev_name)
        daisy_cmn.subprocess_call(cmd, fp)
        LOG.info(_("execute all four commands on \
                    storage node %s ok!" % storage_ip))
Exemplo n.º 20
0
def config_ceph_for_cinder(config_data, disk):
    ceph_config = {'enable_cinder': 'yes',
                   'enable_ceph': 'yes'}
    update_kolla_globals_yml(ceph_config)
    disk_name = disk.get('partition', None)
    storage_ip_list = config_data.get('Storage_ips')
    if len(storage_ip_list) > 2:
        LOG.info(_("this is CEPH backend environment \
                    with %s nodes" % len(storage_ip_list)))
        for storage_ip in storage_ip_list:
            fp = '/var/log/daisy/api.log'
            cmd = 'ssh -o StrictHostKeyChecking=no %s \
                  "parted %s -s -- mklabel gpt mkpart\
                   KOLLA_CEPH_OSD_BOOTSTRAP 1 -1" ' % \
                  (storage_ip, disk_name)
            daisy_cmn.subprocess_call(cmd, fp)
            exc_result = subprocess.check_output(
                'ssh -o StrictHostKeyChecking=no %s \
                "parted %s print" ' % (storage_ip, disk_name),
                shell=True, stderr=subprocess.STDOUT)
            LOG.info(_("parted label is %s" % exc_result))
            LOG.info(_("execute labeled command successfully\
                        on %s node" % storage_ip))
Exemplo n.º 21
0
def version_load(kolla_version_pkg_file, hosts_list):
    get_container_id = "docker ps -a |grep registry |awk -F ' ' '{printf $1}' "
    container_id = subprocess.check_output(get_container_id, shell=True)
    if container_id:
        stop_container = 'docker stop %s' % container_id
        daisy_cmn.subprocess_call(stop_container)
        remove_container = 'docker rm %s' % container_id
        daisy_cmn.subprocess_call(remove_container)

    remove_tmp_registry = 'rm -rf %s/tmp' % daisy_kolla_ver_path
    daisy_cmn.subprocess_call(remove_tmp_registry)

    LOG.info(_('begin to unzip kolla image,please wait.'))
    tar_for_kolla_version = 'cd %s && tar -mzxf %s ' % (daisy_kolla_ver_path,
                                                        kolla_version_pkg_file)
    subprocess.call(tar_for_kolla_version, shell=True)
    LOG.info(_('unzip kolla image successfully!'))

    registry_file = daisy_kolla_ver_path + "/tmp/registry"
    daisy_cmn.subprocess_call('docker run -d -p 4000:5000 --restart=always \
        -e REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/tmp/registry \
        -v %s:/tmp/registry  --name registry registry:2' % registry_file)
    LOG.info(_('docker server loaded finished.'))
Exemplo n.º 22
0
def version_load(kolla_version_pkg_file, hosts_list):
    get_container_id = "docker ps -a |grep registry |awk -F ' ' '{printf $1}' "
    container_id = subprocess.check_output(get_container_id, shell=True)
    if container_id:
        stop_container = 'docker stop %s' % container_id
        daisy_cmn.subprocess_call(stop_container)
        remove_container = 'docker rm %s' % container_id
        daisy_cmn.subprocess_call(remove_container)

    remove_tmp_registry = 'rm -rf %s/tmp' % daisy_kolla_ver_path
    daisy_cmn.subprocess_call(remove_tmp_registry)

    LOG.info(_('begin to unzip kolla image,please wait.'))
    tar_for_kolla_version = 'cd %s && tar -mzxf %s ' % (daisy_kolla_ver_path,
                                                        kolla_version_pkg_file)
    subprocess.call(tar_for_kolla_version, shell=True)
    LOG.info(_('unzip kolla image successfully!'))

    registry_file = daisy_kolla_ver_path + "/tmp/registry"
    daisy_cmn.subprocess_call(
        'docker run -d -p 4000:5000 --restart=always \
        -e REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/tmp/registry \
        -v %s:/tmp/registry  --name registry registry:2' % registry_file)
    LOG.info(_('docker server loaded finished.'))
Exemplo n.º 23
0
    def upgrade(self, req, cluster_id):
        """
        update TECS to a cluster.

        :param req: The WSGI/Webob Request object

        :raises HTTPBadRequest if x-install-cluster is missing
        """
        (role_id_list,host_ip_list,host_ha_list,hosts_list) = self._get_roles_and_hosts_ip_list(req, cluster_id)
        if role_id_list:
            if not host_ip_list:
                msg = _("there is no host in cluster %s") % cluster_id
                raise exception.ThreadBinException(msg)
            unreached_hosts = daisy_cmn.check_ping_hosts(host_ip_list, 1)
            if unreached_hosts:
                self.message = "hosts %s ping failed" % unreached_hosts
                raise exception.NotFound(message=self.message)
                
            daisy_cmn.subprocess_call('rm -rf /root/.ssh/known_hosts')
            
            if os_handle.check_tfg_exist():
                os_handle.upgrade_os(req, hosts_list)
                unreached_hosts = daisy_cmn.check_ping_hosts(host_ip_list, 30)
                if unreached_hosts:
                    self.message = "hosts %s ping failed after tfg upgrade" % unreached_hosts
                    raise exception.NotFound(message=self.message)
            # check and get TECS version
            tecs_version_pkg_file = tecs_cmn.check_and_get_tecs_version(tecs_cmn.daisy_tecs_path)
            if not tecs_version_pkg_file:
                self.state = tecs_state['INSTALL_FAILED']
                self.message = "TECS version file not found in %s" % tecs_cmn.daisy_tecs_path
                raise exception.NotFound(message=self.message)
            threads = []
            LOG.info(_("Begin to update TECS controller nodes, please waiting...."))
            upgrd.update_progress_to_db(req, role_id_list, tecs_state['UPDATING'], hosts_list)
            for host_ip in host_ha_list:
                LOG.info(_("Update TECS controller node %s..." % host_ip))
                rc = upgrd.thread_bin(req,role_id_list,host_ip,hosts_list)
                if rc == 0:
                    LOG.info(_("Update TECS for %s successfully" % host_ip))
                else:
                    LOG.info(_("Update TECS failed for %s, return %s" % (host_ip,rc)))
                    return
            LOG.info(_("Begin to update TECS other nodes, please waiting...."))
            max_parallel_upgrade_number = int(CONF.max_parallel_os_upgrade_number)
            compute_ip_list = host_ip_list - host_ha_list
            while compute_ip_list:
                threads = []
                if len(compute_ip_list) > max_parallel_upgrade_number:
                    upgrade_hosts = compute_ip_list[:max_parallel_upgrade_number]
                    compute_ip_list = compute_ip_list[max_parallel_upgrade_number:]
                else:
                    upgrade_hosts = compute_ip_list
                    compute_ip_list = []
                for host_ip in upgrade_hosts:
                    t = threading.Thread(target=upgrd.thread_bin,args=(req,role_id_list,host_ip,hosts_list))
                    t.setDaemon(True)
                    t.start()
                    threads.append(t)
                try:
                    for t in threads:
                        t.join()
                except:
                    LOG.warn(_("Join update thread %s failed!" % t))
                
            for role_id in role_id_list:
                role_hosts=daisy_cmn.get_hosts_of_role(req,role_id)
                for role_host in role_hosts:
                    if (role_host['status'] == tecs_state['UPDATE_FAILED'] or 
                        role_host['status'] == tecs_state['UPDATING']):
                        role_id = [role_host['role_id']]
                        upgrd.update_progress_to_db(req,
                                                    role_id,
                                                    tecs_state['UPDATE_FAILED'],
                                                    hosts_list)
                        break
                    elif role_host['status'] == tecs_state['ACTIVE']:
                        role_id = [role_host['role_id']]
                        upgrd.update_progress_to_db(req,
                                                    role_id,
                                                    tecs_state['ACTIVE'],
                                                    hosts_list)
Exemplo n.º 24
0
    def _run(self):
        host_ip = self.host['mgtip']

        cmd = 'ssh -o StrictHostKeyChecking=no %s \
              "docker ps"' % host_ip
        docker_result = subprocess.check_output(cmd,
                                                shell=True,
                                                stderr=subprocess.STDOUT)
        if 'registry' in docker_result:

            # stop registry server
            cmd = 'ssh -o StrictHostKeyChecking=no %s \
                  "docker stop registry"' % host_ip
            subprocess.check_output(cmd,
                                    shell=True,
                                    stderr=subprocess.STDOUT)

            cmd = 'ssh -o StrictHostKeyChecking=no %s \
                  "docker rm -f registry"' % host_ip
            subprocess.check_output(cmd,
                                    shell=True,
                                    stderr=subprocess.STDOUT)

        cmd = 'ssh -o StrictHostKeyChecking=no %s \
              "if [ ! -d %s ];then mkdir -p %s;fi" ' % \
              (host_ip, daisy_kolla_ver_path, daisy_kolla_ver_path)
        subprocess.check_output(cmd,
                                shell=True,
                                stderr=subprocess.STDOUT)

        # receive image from daisy server
        cmd = 'ssh -o StrictHostKeyChecking=no %s \
               "jasminec %s %s > %s"' % (host_ip,
                                         host_ip,
                                         _get_local_ip(),
                                         self.kolla_version_pkg_file)
        subprocess.check_output(cmd,
                                shell=True,
                                stderr=subprocess.STDOUT)

        # clean up the old version files
        cmd = 'ssh -o StrictHostKeyChecking=no %s \
               "rm -rf %s/tmp"' % (host_ip,
                                   daisy_kolla_ver_path)

        daisy_cmn.subprocess_call(cmd)

        # install the new version files
        cmd = 'ssh -o StrictHostKeyChecking=no %s \
               "cd %s && tar mzxf %s"' % (host_ip,
                                          daisy_kolla_ver_path,
                                          self.kolla_version_pkg_file)

        subprocess.call(cmd, shell=True)

        registry_file = daisy_kolla_ver_path + "/tmp/registry"

        # start registry server again
        cmd = 'ssh -o StrictHostKeyChecking=no %s \
               "docker run -d -p 4000:5000 --restart=always \
               -e REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/tmp/registry \
               -v %s:/tmp/registry  --name registry registry:2"'\
                % (host_ip, registry_file)

        subprocess.call(cmd, shell=True)
Exemplo n.º 25
0
def _thread_bin(req, cluster_id, host, root_passwd, fp, host_name_ip_list,
                host_prepare_file, docker_registry_ip, role_id_list):
    host_ip = host['mgtip']

    config_nodes_hosts(host_name_ip_list, host_ip)
    cmd = 'ssh -o StrictHostKeyChecking=no %s \
          "if [ ! -d %s ];then mkdir %s;fi" '                                              % \
          (host_ip, host_prepare_file, host_prepare_file)
    daisy_cmn.subprocess_call(cmd, fp)

    LOG.info("Remote directory created on %s", host_ip)

    # scp daisy4nfv-jasmine.rpm to the same dir of prepare.sh at target host
    cmd = "scp -o ConnectTimeout=10 \
           /var/lib/daisy/tools/daisy4nfv-jasmine*.rpm \
           root@%s:%s" % (host_ip, host_prepare_file)
    daisy_cmn.subprocess_call(cmd, fp)

    # scp docker-engine.rpm to the same dir of prepare.sh at target host
    cmd = "scp -o ConnectTimeout=10 \
           /var/lib/daisy/tools/docker-engine.rpm \
           root@%s:%s" % (host_ip, host_prepare_file)
    daisy_cmn.subprocess_call(cmd, fp)

    # scp registry-server.tar to the same dir of prepare.sh at target host
    cmd = "scp -o ConnectTimeout=10 \
           /var/lib/daisy/tools/registry-server.tar \
           root@%s:%s" % (host_ip, host_prepare_file)
    daisy_cmn.subprocess_call(cmd, fp)

    LOG.info("Files copied successfully to %s", host_ip)

    cmd = "scp -o ConnectTimeout=10 \
           /var/lib/daisy/kolla/prepare.sh \
           root@%s:%s" % (host_ip, host_prepare_file)
    daisy_cmn.subprocess_call(cmd, fp)

    cmd = 'ssh -o StrictHostKeyChecking=no %s \
          chmod u+x %s/prepare.sh'                                   % \
          (host_ip, host_prepare_file)
    daisy_cmn.subprocess_call(cmd, fp)

    LOG.info("Ready to execute prepare.sh on %s", host_ip)

    try:
        exc_result = subprocess.check_output(
            'ssh -o StrictHostKeyChecking='
            'no %s %s/prepare.sh %s' %
            (host_ip, host_prepare_file, docker_registry_ip),
            shell=True,
            stderr=subprocess.STDOUT)
    except subprocess.CalledProcessError as e:
        message = "exec prepare.sh on %s failed!", host_ip
        LOG.error(message)
        fp.write(e.output.strip())
        raise exception.InstallException(message)
    else:
        LOG.info("prepare for %s successfully!", host_ip)
        fp.write(exc_result)
        message = "Preparing for installation successful!"
        update_host_progress_to_db(req, role_id_list, host,
                                   kolla_state['INSTALLING'], message, 10)
Exemplo n.º 26
0
    def prepare_ssh_discovered_node(self, req, fp, discover_host_meta):
        try:
            trustme_result = subprocess.check_output(
                '/var/lib/daisy/trustme.sh %s %s' %
                (discover_host_meta['ip'], discover_host_meta['passwd']),
                shell=True, stderr=subprocess.STDOUT)
            if 'Permission denied' in trustme_result:
                # when passwd was wrong
                update_info = {}
                update_info['status'] = 'DISCOVERY_FAILED'
                update_info['message'] = "Passwd was wrong, do" \
                                         "trustme.sh %s failed!"\
                                         % discover_host_meta['ip']
                self.update_progress_to_db(req, update_info,
                                           discover_host_meta)
                msg = (_("Do trustme.sh %s failed!" %
                         discover_host_meta['ip']))
                LOG.warning(_(msg))
                fp.write(msg)
            elif 'is unreachable' in trustme_result:
                # when host ip was unreachable
                update_info = {}
                update_info['status'] = 'DISCOVERY_FAILED'
                update_info['message'] = "Host ip was unreachable," \
                                         " do trustme.sh %s failed!" %\
                                         discover_host_meta['ip']
                self.update_progress_to_db(req, update_info,
                                           discover_host_meta)
                msg = (_("Do trustme.sh %s failed!" %
                         discover_host_meta['ip']))
                LOG.warning(_(msg))
        except subprocess.CalledProcessError as e:
            update_info = {}
            update_info['status'] = 'DISCOVERY_FAILED'
            msg = "discover host for %s failed! raise CalledProcessError" \
                  " when execute trustme.sh." % discover_host_meta['ip']
            update_info['message'] = msg
            self.update_progress_to_db(
                req, update_info, discover_host_meta)
            LOG.error(_(msg))
            fp.write(e.output.strip())
            return
        except:
            update_info = {}
            update_info['status'] = 'DISCOVERY_FAILED'
            update_info['message'] = "discover host for %s failed!" \
                                     % discover_host_meta['ip']
            self.update_progress_to_db(
                req, update_info, discover_host_meta)
            LOG.error(_("discover host for %s failed!"
                        % discover_host_meta['ip']))
            fp.write("discover host for %s failed!"
                     % discover_host_meta['ip'])
            return

        try:
            cmd = 'clush -S -b -w %s "rm -rf /home/daisy/discover_host"'\
                  % (discover_host_meta['ip'],)
            daisy_cmn.subprocess_call(cmd, fp)
            cmd = 'clush -S -w %s "mkdir -p /home/daisy/discover_host"'\
                  % (discover_host_meta['ip'],)
            daisy_cmn.subprocess_call(cmd, fp)
            cmd = 'clush -S -w %s "chmod 777 /home/daisy/discover_host"'\
                  % (discover_host_meta['ip'],)
            daisy_cmn.subprocess_call(cmd, fp)
        except subprocess.CalledProcessError as e:
            update_info = {}
            update_info['status'] = 'DISCOVERY_FAILED'
            msg = "raise CalledProcessError when execute cmd for host %s."\
                  % discover_host_meta['ip']
            update_info['message'] = msg
            self.update_progress_to_db(
                req, update_info, discover_host_meta)
            LOG.error(_(msg))
            fp.write(e.output.strip())
            return

        try:
            subprocess.check_output(
                'clush -S -w %s -c /var/lib/daisy/kolla/getnodeinfo.sh '
                '--dest=/home/daisy/discover_host' %
                (discover_host_meta['ip'],),
                shell=True, stderr=subprocess.STDOUT)
        except subprocess.CalledProcessError as e:
            update_info = {}
            update_info['status'] = 'DISCOVERY_FAILED'
            update_info['message'] = "scp getnodeinfo.sh" \
                                     " failed!" % discover_host_meta['ip']
            self.update_progress_to_db(req, update_info,
                                       discover_host_meta)
            LOG.error(_("scp getnodeinfo.sh for %s failed!"
                        % discover_host_meta['ip']))
            fp.write(e.output.strip())
            return

        try:
            subprocess.check_output(
                'clush -S -w %s yum install -y epel-release'
                % (discover_host_meta['ip'],),
                shell=True, stderr=subprocess.STDOUT)
        except subprocess.CalledProcessError as e:
            update_info = {}
            update_info['status'] = 'DISCOVERY_FAILED'
            update_info['message'] = \
                "creat repo epel for %s failed!"\
                % discover_host_meta['ip']
            self.update_progress_to_db(req, update_info,
                                       discover_host_meta)
            LOG.error(_("creat repo epel for %s failed!"
                        % discover_host_meta['ip']))
            fp.write(e.output.strip())

            return
        try:
            subprocess.check_output(
                'clush -S -w %s yum install -y jq'
                % (discover_host_meta['ip'],),
                shell=True, stderr=subprocess.STDOUT)
        except subprocess.CalledProcessError as e:
            update_info = {}
            update_info['status'] = 'DISCOVERY_FAILED'
            update_info['message'] = \
                "install jq rpm for %s failed!"\
                % discover_host_meta['ip']
            self.update_progress_to_db(req, update_info,
                                       discover_host_meta)
            LOG.error(_("install jq rpm for %s failed!"
                        % discover_host_meta['ip']))
            fp.write(e.output.strip())
            return
Exemplo n.º 27
0
 def getnodeinfo_listen_port(self, listen_port):
     cmd = 'port_linenumber=`grep -n "listen_port="' \
           ' /var/lib/daisy/kolla/getnodeinfo.sh|cut -d ":" -f 1` && ' \
           'sed -i "${port_linenumber}c listen_port=\'%s\'" ' \
           '/var/lib/daisy/kolla/getnodeinfo.sh' % (listen_port,)
     daisy_cmn.subprocess_call(cmd)
Exemplo n.º 28
0
    def _run(self):
        cluster_data = registry.get_cluster_metadata(self.req.context,
                                                     self.cluster_id)

        (kolla_config, self.mgt_ip_list, host_name_ip_list) = \
            kolla_cmn.get_cluster_kolla_config(self.req, self.cluster_id)
        if not self.mgt_ip_list:
            msg = _("there is no host in cluster %s") % self.cluster_id
            LOG.error(msg)
            raise exception.ThreadBinException(msg)

        unreached_hosts = _check_ping_hosts(self.mgt_ip_list, self.ping_times)
        if unreached_hosts:
            self.message = "hosts %s ping failed" % unreached_hosts
            LOG.error(self.message)
            raise exception.InstallException(self.message)

        root_passwd = 'ossdbg1'
        threads_net = []
        for mgnt_ip in self.mgt_ip_list:
            check_hosts_id = _get_hosts_id_by_mgnt_ips(self.req,
                                                       self.cluster_id,
                                                       mgnt_ip.split(","))
            is_ssh_host = daisy_cmn._judge_ssh_host(self.req,
                                                    check_hosts_id[0])
            if not is_ssh_host:
                cmd = '/var/lib/daisy/trustme.sh %s %s' % \
                      (mgnt_ip, root_passwd)
                daisy_cmn.subprocess_call(cmd)
                LOG.info(_("Begin to config network on %s" % mgnt_ip))
                ssh_host_info = {'ip': mgnt_ip, 'root_pwd': root_passwd}
                configure_external_interface_vlan(self.req, self.cluster_id,
                                                  mgnt_ip)

                t_net = threading.Thread(target=api_cmn.config_network,
                                         args=(ssh_host_info, 'kolla'))
                t_net.setDaemon(True)
                t_net.start()
                threads_net.append(t_net)
        try:
            LOG.info(
                _("config network threads"
                  " have started, please waiting...."))
            for t_net in threads_net:
                t_net.join()
        except:
            LOG.error("join config network " "thread %s failed!", t_net)

        time.sleep(20)

        (role_id_list, host_id_list, hosts_list) = \
            kolla_cmn.get_roles_and_hosts_list(self.req, self.cluster_id)
        self.message = "Begin install"
        update_all_host_progress_to_db(self.req, role_id_list, host_id_list,
                                       kolla_state['INSTALLING'], self.message,
                                       5)

        docker_registry_ip = kolla_cmn._get_local_ip()

        # Do prepare.sh for each host
        threads = []
        for host in hosts_list:
            t = threading.Thread(target=thread_bin,
                                 args=(self.req, self.cluster_id, host,
                                       root_passwd, host_name_ip_list,
                                       self.host_prepare_file,
                                       docker_registry_ip, role_id_list))
            t.setDaemon(True)
            t.start()
            threads.append(t)
            LOG.info("prepare.sh threads for %s started", host['mgtip'])

        try:
            LOG.info(
                _("prepare kolla installation threads have started, "
                  "please waiting...."))
            for t in threads:
                t.join()
        except:
            LOG.error("join kolla prepare installation "
                      "thread %s failed!", t)

        if thread_flag.get('flag', None) is not None and \
                thread_flag['flag'] == False:
            self.message = "prepare deploy nodes failed!"
            LOG.error(self.message)
            raise exception.InstallException(self.message)

        # Check, load and multicast version
        if cluster_data.get('tecs_version_id', None):
            vid = cluster_data['tecs_version_id']
            version_info = registry.get_version_metadata(self.req.context, vid)
            kolla_version_pkg_file = \
                kolla_cmn.check_and_get_kolla_version(daisy_kolla_ver_path,
                                                      version_info['name'])
        else:
            kolla_version_pkg_file =\
                kolla_cmn.check_and_get_kolla_version(daisy_kolla_ver_path)
        if not kolla_version_pkg_file:
            self.state = kolla_state['INSTALL_FAILED']
            self.message =\
                "kolla version file not found in %s" % daisy_kolla_ver_path
            raise exception.NotFound(message=self.message)

        try:
            LOG.info(_("load kolla registry..."))
            kolla_cmn.version_load(kolla_version_pkg_file, hosts_list)
        except exception.SubprocessCmdFailed as e:
            self.message = "load kolla registry failed!"
            LOG.error(self.message)
            raise exception.InstallException(self.message)

        res = kolla_cmn.version_load_mcast(kolla_version_pkg_file, hosts_list)
        update_all_host_progress_to_db(self.req, role_id_list, host_id_list,
                                       kolla_state['INSTALLING'], self.message,
                                       15)

        # always call generate_kolla_config_file after version_load()
        LOG.info(_("begin to generate kolla config file ..."))
        (kolla_config, self.mgt_ip_list, host_name_ip_list) = \
            kolla_cmn.get_cluster_kolla_config(self.req, self.cluster_id)
        kolla_cmn.generate_kolla_config_file(self.req, self.cluster_id,
                                             kolla_config, res)
        LOG.info(_("generate kolla config file in /etc/kolla/ dir..."))

        # Kolla prechecks
        with open(self.precheck_file, "w+") as fp:
            LOG.info(_("kolla-ansible precheck..."))
            cmd = subprocess.Popen(
                'cd %s/kolla-ansible && ./tools/kolla-ansible prechecks '
                ' -i %s/kolla-ansible/ansible/inventory/multinode -vvv' %
                (self.kolla_file, self.kolla_file),
                shell=True,
                stdout=fp,
                stderr=fp)
            execute_times = 0
            while True:
                time.sleep(5)
                return_code = cmd.poll()
                if return_code == 0:
                    break
                elif return_code == 1:
                    self.message = "kolla-ansible preckecks failed!"
                    LOG.error(self.message)
                    raise exception.InstallException(self.message)
                else:
                    if execute_times >= 1440:
                        self.message = "kolla-ansible preckecks timeout"
                        LOG.error(self.message)
                        raise exception.InstallTimeoutException(
                            cluster_id=self.cluster_id)
                execute_times += 1

            self.message = "kolla-ansible preckecks successfully(%d)!" % \
                (return_code)
            self.progress = 20
            update_all_host_progress_to_db(self.req, role_id_list,
                                           host_id_list,
                                           kolla_state['INSTALLING'],
                                           self.message, self.progress)

        with open(self.log_file, "w+") as fp:
            LOG.info(_("kolla-ansible begin to deploy openstack ..."))
            cmd = subprocess.Popen(
                'cd %s/kolla-ansible && ./tools/kolla-ansible deploy -i '
                '%s/kolla-ansible/ansible/inventory/multinode' %
                (self.kolla_file, self.kolla_file),
                shell=True,
                stdout=fp,
                stderr=fp)
            self.message = "begin deploy openstack"
            self.progress = 25
            execute_times = 0
            while True:
                time.sleep(5)
                return_code = cmd.poll()
                if self.progress == 95:
                    break
                elif return_code == 0:
                    self.progress = 95
                elif return_code == 1:
                    self.message = "KOLLA deploy openstack failed"
                    LOG.error(self.message)
                    raise exception.InstallException(self.message)
                else:
                    self.progress = _calc_progress(self.log_file)
                if execute_times >= 1440:
                    self.message = "KOLLA deploy openstack timeout"
                    LOG.error(self.message)
                    raise exception.InstallTimeoutException(
                        cluster_id=self.cluster_id)
                else:
                    update_all_host_progress_to_db(self.req, role_id_list,
                                                   host_id_list,
                                                   kolla_state['INSTALLING'],
                                                   self.message, self.progress)
                execute_times += 1

            try:
                LOG.info(_("kolla-ansible post-deploy for each node..."))
                exc_result = subprocess.check_output(
                    'cd %s/kolla-ansible && ./tools/kolla-ansible post-deploy '
                    ' -i %s/kolla-ansible/ansible/inventory/multinode' %
                    (self.kolla_file, self.kolla_file),
                    shell=True,
                    stderr=subprocess.STDOUT)
            except subprocess.CalledProcessError as e:
                self.message = "kolla-ansible post-deploy failed!"
                LOG.error(self.message)
                fp.write(e.output.strip())
                raise exception.InstallException(self.message)
            else:
                LOG.info(_("kolla-ansible post-deploy successfully!"))
                fp.write(exc_result)
                self.message = "post-deploy successfully!"
                update_all_host_progress_to_db(self.req, role_id_list,
                                               host_id_list,
                                               kolla_state['ACTIVE'],
                                               self.message, 100)
                update_progress_to_db(self.req, role_id_list,
                                      kolla_state['ACTIVE'], 100)
                for host_id in host_id_list:
                    daisy_cmn.update_db_host_status(
                        self.req, host_id, {
                            'tecs_version_id': cluster_data['tecs_version_id'],
                            'tecs_patch_id': ''
                        })
Exemplo n.º 29
0
    def _run(self):
        cluster_data = registry.get_cluster_metadata(self.req.context,
                                                     self.cluster_id)

        (kolla_config, self.mgt_ip_list, host_name_ip_list) = \
            kolla_cmn.get_cluster_kolla_config(self.req, self.cluster_id)
        if not self.mgt_ip_list:
            msg = _("there is no host in cluster %s") % self.cluster_id
            LOG.error(msg)
            raise exception.ThreadBinException(msg)

        unreached_hosts = _check_ping_hosts(self.mgt_ip_list, self.ping_times)
        if unreached_hosts:
            self.message = "hosts %s ping failed" % unreached_hosts
            LOG.error(self.message)
            raise exception.InstallException(self.message)

        root_passwd = 'ossdbg1'
        threads_net = []
        for mgnt_ip in self.mgt_ip_list:
            check_hosts_id = _get_hosts_id_by_mgnt_ips(self.req,
                                                       self.cluster_id,
                                                       mgnt_ip.split(","))
            is_ssh_host = daisy_cmn._judge_ssh_host(self.req,
                                                    check_hosts_id[0])
            if not is_ssh_host:
                cmd = '/var/lib/daisy/trustme.sh %s %s' % \
                      (mgnt_ip, root_passwd)
                daisy_cmn.subprocess_call(cmd)
                LOG.info(_("Begin to config network on %s" % mgnt_ip))
                ssh_host_info = {'ip': mgnt_ip, 'root_pwd': root_passwd}
                configure_external_interface_vlan(self.req,
                                                  self.cluster_id,
                                                  mgnt_ip)

                t_net = threading.Thread(target=api_cmn.config_network,
                                         args=(ssh_host_info, 'kolla'))
                t_net.setDaemon(True)
                t_net.start()
                threads_net.append(t_net)
        try:
            LOG.info(_("config network threads"
                       " have started, please waiting...."))
            for t_net in threads_net:
                t_net.join()
        except:
            LOG.error("join config network "
                      "thread %s failed!", t_net)

        time.sleep(20)

        (role_id_list, host_id_list, hosts_list) = \
            kolla_cmn.get_roles_and_hosts_list(self.req, self.cluster_id)
        self.message = "Begin install"
        update_all_host_progress_to_db(self.req, role_id_list,
                                       host_id_list, kolla_state['INSTALLING'],
                                       self.message, 5)

        docker_registry_ip = kolla_cmn._get_local_ip()

        # Do prepare.sh for each host
        threads = []
        for host in hosts_list:
            t = threading.Thread(target=thread_bin,
                                 args=(self.req, self.cluster_id, host,
                                       root_passwd, host_name_ip_list,
                                       self.host_prepare_file,
                                       docker_registry_ip, role_id_list))
            t.setDaemon(True)
            t.start()
            threads.append(t)
            LOG.info("prepare.sh threads for %s started", host['mgtip'])

        try:
            LOG.info(_("prepare kolla installation threads have started, "
                       "please waiting...."))
            for t in threads:
                t.join()
        except:
            LOG.error("join kolla prepare installation "
                      "thread %s failed!", t)

        if thread_flag.get('flag', None) is not None and \
                thread_flag['flag'] == False:
            self.message = "prepare deploy nodes failed!"
            LOG.error(self.message)
            raise exception.InstallException(self.message)

        # Check, load and multicast version
        if cluster_data.get('tecs_version_id', None):
            vid = cluster_data['tecs_version_id']
            version_info = registry.get_version_metadata(self.req.context,
                                                         vid)
            kolla_version_pkg_file = \
                kolla_cmn.check_and_get_kolla_version(daisy_kolla_ver_path,
                                                      version_info['name'])
        else:
            kolla_version_pkg_file =\
                kolla_cmn.check_and_get_kolla_version(daisy_kolla_ver_path)
        if not kolla_version_pkg_file:
            self.state = kolla_state['INSTALL_FAILED']
            self.message =\
                "kolla version file not found in %s" % daisy_kolla_ver_path
            raise exception.NotFound(message=self.message)

        try:
            LOG.info(_("load kolla registry..."))
            kolla_cmn.version_load(kolla_version_pkg_file, hosts_list)
        except exception.SubprocessCmdFailed as e:
            self.message = "load kolla registry failed!"
            LOG.error(self.message)
            raise exception.InstallException(self.message)

        res = kolla_cmn.version_load_mcast(kolla_version_pkg_file,
                                           hosts_list)
        update_all_host_progress_to_db(self.req, role_id_list,
                                       host_id_list,
                                       kolla_state['INSTALLING'],
                                       self.message, 15)

        # always call generate_kolla_config_file after version_load()
        LOG.info(_("begin to generate kolla config file ..."))
        (kolla_config, self.mgt_ip_list, host_name_ip_list) = \
            kolla_cmn.get_cluster_kolla_config(self.req, self.cluster_id)
        kolla_cmn.generate_kolla_config_file(self.req, self.cluster_id,
                                             kolla_config, res)
        LOG.info(_("generate kolla config file in /etc/kolla/ dir..."))

        # Kolla prechecks
        with open(self.precheck_file, "w+") as fp:
            LOG.info(_("kolla-ansible precheck..."))
            cmd = subprocess.Popen(
                'cd %s/kolla-ansible && ./tools/kolla-ansible prechecks '
                ' -i %s/kolla-ansible/ansible/inventory/multinode -vvv' %
                (self.kolla_file, self.kolla_file),
                shell=True, stdout=fp, stderr=fp)
            execute_times = 0
            while True:
                time.sleep(5)
                return_code = cmd.poll()
                if return_code == 0:
                    break
                elif return_code == 1:
                    self.message = "kolla-ansible preckecks failed!"
                    LOG.error(self.message)
                    raise exception.InstallException(self.message)
                else:
                    if execute_times >= 1440:
                        self.message = "kolla-ansible preckecks timeout"
                        LOG.error(self.message)
                        raise exception.InstallTimeoutException(
                            cluster_id=self.cluster_id)
                execute_times += 1

            self.message = "kolla-ansible preckecks successfully(%d)!" % \
                (return_code)
            self.progress = 20
            update_all_host_progress_to_db(self.req, role_id_list,
                                           host_id_list,
                                           kolla_state['INSTALLING'],
                                           self.message, self.progress)

        with open(self.log_file, "w+") as fp:
            LOG.info(_("kolla-ansible begin to deploy openstack ..."))
            cmd = subprocess.Popen(
                'cd %s/kolla-ansible && ./tools/kolla-ansible deploy -i '
                '%s/kolla-ansible/ansible/inventory/multinode' %
                (self.kolla_file, self.kolla_file),
                shell=True, stdout=fp, stderr=fp)
            self.message = "begin deploy openstack"
            self.progress = 25
            execute_times = 0
            while True:
                time.sleep(5)
                return_code = cmd.poll()
                if self.progress == 95:
                    break
                elif return_code == 0:
                    self.progress = 95
                elif return_code == 1:
                    self.message = "KOLLA deploy openstack failed"
                    LOG.error(self.message)
                    raise exception.InstallException(self.message)
                else:
                    self.progress = _calc_progress(self.log_file)
                if execute_times >= 1440:
                    self.message = "KOLLA deploy openstack timeout"
                    LOG.error(self.message)
                    raise exception.InstallTimeoutException(
                        cluster_id=self.cluster_id)
                else:
                    update_all_host_progress_to_db(self.req, role_id_list,
                                                   host_id_list,
                                                   kolla_state['INSTALLING'],
                                                   self.message, self.progress)
                execute_times += 1

            try:
                LOG.info(_("kolla-ansible post-deploy for each node..."))
                exc_result = subprocess.check_output(
                    'cd %s/kolla-ansible && ./tools/kolla-ansible post-deploy '
                    ' -i %s/kolla-ansible/ansible/inventory/multinode' %
                    (self.kolla_file, self.kolla_file),
                    shell=True, stderr=subprocess.STDOUT)
            except subprocess.CalledProcessError as e:
                self.message = "kolla-ansible post-deploy failed!"
                LOG.error(self.message)
                fp.write(e.output.strip())
                raise exception.InstallException(self.message)
            else:
                LOG.info(_("kolla-ansible post-deploy successfully!"))
                fp.write(exc_result)
                self.message = "post-deploy successfully!"
                update_all_host_progress_to_db(self.req, role_id_list,
                                               host_id_list,
                                               kolla_state['ACTIVE'],
                                               self.message, 100)
                update_progress_to_db(self.req, role_id_list,
                                      kolla_state['ACTIVE'], 100)
                for host_id in host_id_list:
                    daisy_cmn.update_db_host_status(
                        self.req, host_id,
                        {'tecs_version_id': cluster_data['tecs_version_id'],
                         'tecs_patch_id': ''})
Exemplo n.º 30
0
def _os_thread_bin(req, host_ip, host_id):
    host_meta = {}
    password = "******"
    LOG.info(_("Begin update os for host %s." % (host_ip)))
    cmd = 'mkdir -p /var/log/daisy/daisy_update/'
    daisy_cmn.subprocess_call(cmd)

    var_log_path = "/var/log/daisy/daisy_update/%s_update_tfg.log" % host_ip
    with open(var_log_path, "w+") as fp:
        cmd = '/var/lib/daisy/tecs/trustme.sh %s %s' % (host_ip, password)
        daisy_cmn.subprocess_call(cmd,fp)
        cmd = 'clush -S -w %s "mkdir -p /home/daisy_update"' % (host_ip,)
        daisy_cmn.subprocess_call(cmd,fp)
        cmd = 'clush -S -b -w %s  "rm -rf /home/daisy_update/*"' % (host_ip,)
        daisy_cmn.subprocess_call(cmd,fp)
        cmd = 'clush -S -w %s -c /var/lib/daisy/tecs/*CGSL_VPLAT*.iso /var/lib/daisy/tecs/tfg_upgrade.sh --dest=/home/daisy_update' % (host_ip,)
        daisy_cmn.subprocess_call(cmd,fp)
        cmd = 'clush -S -w %s "chmod 777 /home/daisy_update/*"' % (host_ip,)
        daisy_cmn.subprocess_call(cmd,fp)
        host_meta['os_progress'] = 30
        host_meta['os_status'] = host_os_status['UPDATING']
        host_meta['messages'] = ""
        update_db_host_status(req, host_id, host_meta)
        try:
            exc_result = subprocess.check_output(
                'clush -S -w %s "/home/daisy_update/tfg_upgrade.sh"' % (host_ip,),
                shell=True, stderr=subprocess.STDOUT)
        except subprocess.CalledProcessError as e:
            if e.returncode == 255 and "reboot" in e.output.strip():
                host_meta['os_progress'] = 100
                host_meta['os_status'] = host_os_status['ACTIVE']
                host_meta['messages'] = "upgrade tfg successfully,os reboot"
                LOG.info(_("Update tfg for %s successfully,os reboot!" % host_ip))
                daisy_cmn.check_reboot_ping(host_ip)
            else:
                host_meta['os_progress'] = 0
                host_meta['os_status'] = host_os_status['UPDATE_FAILED']
                host_meta['messages'] = e.output.strip()[-400:-200].replace('\n',' ')
                LOG.error(_("Update tfg for %s failed!" % host_ip))
            update_db_host_status(req, host_id, host_meta)            
            fp.write(e.output.strip())
        else:
            host_meta['os_progress'] = 100
            host_meta['os_status'] = host_os_status['ACTIVE']
            host_meta['messages'] = "upgrade tfg successfully"
            update_db_host_status(req, host_id, host_meta)
            LOG.info(_("Update os for %s successfully!" % host_ip))
            fp.write(exc_result)
            if "reboot" in exc_result:
                daisy_cmn.check_reboot_ping(host_ip)
Exemplo n.º 31
0
    def export_db_to_json(self, req, template):
        """
        Template backend to a cluster.
        :param req: The WSGI/Webob Request object
        :raises HTTPBadRequest if x-Template-cluster is missing
        """
        cluster_name = template.get('cluster_name', None)
        type = template.get('type', None)
        description = template.get('description', None)
        template_name = template.get('template_name', None)
        self._enforce(req, 'export_db_to_json')
        cinder_volume_list = []
        neutron_backend_list = []
        service_disk_list = []
        optical_switch_list = []
        template_content = {}
        template_json = {}
        template_id = ""
        if not type or type == "tecs":
            try:
                params = {'filters': {'name': cluster_name}}
                clusters = registry.get_clusters_detail(req.context, **params)
                if clusters:
                    cluster_id = clusters[0]['id']
                else:
                    msg = "the cluster %s is not exist" % cluster_name
                    LOG.error(msg)
                    raise HTTPForbidden(explanation=msg,
                                        request=req,
                                        content_type="text/plain")

                params = {'filters': {'cluster_id': cluster_id}}
                cluster = registry.get_cluster_metadata(
                    req.context, cluster_id)
                roles = registry.get_roles_detail(req.context, **params)
                networks = registry.get_networks_detail(
                    req.context, cluster_id, **params)
                for role in roles:
                    cinder_volumes = self._get_cinder_volumes(req, role)
                    cinder_volume_list += cinder_volumes
                    services_disk = self._get_services_disk(req, role)
                    service_disk_list += services_disk
                    optical_switchs = self._get_optical_switchs(req, role)
                    optical_switch_list += optical_switchs
                    neutron_backends = self._get_neutron_backends(req, role)
                    neutron_backend_list += neutron_backends

                    if role.get('config_set_id', None):
                        config_set = registry.get_config_set_metadata(
                            req.context, role['config_set_id'])
                        if config_set.get("config", None):
                            role['config_set'] = config_set['config']
                    del role['cluster_id']
                    del role['status']
                    del role['progress']
                    del role['messages']
                    del role['config_set_update_progress']
                    self._del_general_params(role)
                for network in networks:
                    network_detail = registry.get_network_metadata(
                        req.context, network['id'])
                    if network_detail.get('ip_ranges', None):
                        network['ip_ranges'] = network_detail['ip_ranges']
                    del network['cluster_id']
                    self._del_general_params(network)
                if cluster.get('routers', None):
                    for router in cluster['routers']:
                        del router['cluster_id']
                        self._del_general_params(router)
                if cluster.get('logic_networks', None):
                    for logic_network in cluster['logic_networks']:
                        for subnet in logic_network['subnets']:
                            del subnet['logic_network_id']
                            del subnet['router_id']
                            self._del_general_params(subnet)
                        del logic_network['cluster_id']
                        self._del_general_params(logic_network)
                if cluster.get('nodes', None):
                    del cluster['nodes']
                self._del_general_params(cluster)
                self._del_cluster_params(cluster)
                cluster['tecs_version_id'] = ""
                template_content['cluster'] = cluster
                template_content['cluster_name'] = cluster_name
                template_content['roles'] = roles
                template_content['networks'] = networks
                template_content['cinder_volumes'] = cinder_volume_list
                template_content['neutron_backends'] = neutron_backend_list
                template_content['optical_switchs'] = optical_switch_list
                template_content['services_disk'] = service_disk_list
                template_json['content'] = json.dumps(template_content)
                template_json['type'] = 'tecs'
                template_json['name'] = template_name
                template_json['description'] = description

                template_host_params = {'cluster_name': cluster_name}
                template_hosts = registry.host_template_lists_metadata(
                    req.context, **template_host_params)
                if template_hosts:
                    template_json['hosts'] = template_hosts[0]['hosts']
                else:
                    template_json['hosts'] = "[]"

                template_params = {'filters': {'name': template_name}}
                template_list = registry.template_lists_metadata(
                    req.context, **template_params)
                if template_list:
                    update_template = registry.update_template_metadata(
                        req.context, template_list[0]['id'], template_json)
                    template_id = template_list[0]['id']
                else:
                    add_template = registry.add_template_metadata(
                        req.context, template_json)
                    template_id = add_template['id']

                if template_id:
                    template_detail = registry.template_detail_metadata(
                        req.context, template_id)
                    self._del_general_params(template_detail)
                    template_detail['content'] = json.loads(
                        template_detail['content'])
                    if template_detail['hosts']:
                        template_detail['hosts'] = json.loads(
                            template_detail['hosts'])

                    tecs_json = daisy_path + "%s.json" % template_name
                    cmd = 'rm -rf  %s' % (tecs_json, )
                    daisy_cmn.subprocess_call(cmd)
                    with open(tecs_json, "w+") as fp:
                        json.dump(template_detail, fp, indent=2)

            except exception.Invalid as e:
                raise HTTPBadRequest(explanation=e.msg, request=req)

        return {"template": template_detail}
Exemplo n.º 32
0
def _thread_bin(req, cluster_id, host, root_passwd, fp, host_name_ip_list,
                host_prepare_file, docker_registry_ip, role_id_list):
    host_ip = host['mgtip']

    config_nodes_hosts(host_name_ip_list, host_ip)
    cmd = 'ssh -o StrictHostKeyChecking=no %s \
          "if [ ! -d %s ];then mkdir %s;fi" ' % \
          (host_ip, host_prepare_file, host_prepare_file)
    daisy_cmn.subprocess_call(cmd, fp)

    LOG.info("Remote directory created on %s", host_ip)

    # scp daisy4nfv-jasmine.rpm to the same dir of prepare.sh at target host
    cmd = "scp -o ConnectTimeout=10 \
           /var/lib/daisy/tools/daisy4nfv-jasmine*.rpm \
           root@%s:%s" % (host_ip, host_prepare_file)
    daisy_cmn.subprocess_call(cmd, fp)

    # scp docker-engine.rpm to the same dir of prepare.sh at target host
    cmd = "scp -o ConnectTimeout=10 \
           /var/lib/daisy/tools/docker-engine.rpm \
           root@%s:%s" % (host_ip, host_prepare_file)
    daisy_cmn.subprocess_call(cmd, fp)

    # scp registry-server.tar to the same dir of prepare.sh at target host
    cmd = "scp -o ConnectTimeout=10 \
           /var/lib/daisy/tools/registry-server.tar \
           root@%s:%s" % (host_ip, host_prepare_file)
    daisy_cmn.subprocess_call(cmd, fp)

    LOG.info("Files copied successfully to %s", host_ip)

    cmd = "scp -o ConnectTimeout=10 \
           /var/lib/daisy/kolla/prepare.sh \
           root@%s:%s" % (host_ip, host_prepare_file)
    daisy_cmn.subprocess_call(cmd, fp)

    cmd = 'ssh -o StrictHostKeyChecking=no %s \
          chmod u+x %s/prepare.sh' % \
          (host_ip, host_prepare_file)
    daisy_cmn.subprocess_call(cmd, fp)

    LOG.info("Ready to execute prepare.sh on %s", host_ip)

    try:
        exc_result = subprocess.check_output(
            'ssh -o StrictHostKeyChecking='
            'no %s %s/prepare.sh %s' %
            (host_ip, host_prepare_file, docker_registry_ip),
            shell=True, stderr=subprocess.STDOUT)
    except subprocess.CalledProcessError as e:
        message = "exec prepare.sh on %s failed!", host_ip
        LOG.error(message)
        fp.write(e.output.strip())
        raise exception.InstallException(message)
    else:
        LOG.info("prepare for %s successfully!", host_ip)
        fp.write(exc_result)
        message = "Preparing for installation successful!"
        update_host_progress_to_db(req, role_id_list, host,
                                   kolla_state['INSTALLING'],
                                   message, 10)
Exemplo n.º 33
0
    def export_db_to_json(self, req, template):
        """
        Template backend to a cluster.
        :param req: The WSGI/Webob Request object
        :raises HTTPBadRequest if x-Template-cluster is missing
        """
        cluster_name = template.get('cluster_name', None)
        type = template.get('type', None)
        description = template.get('description', None)
        template_name = template.get('template_name', None)
        self._enforce(req, 'export_db_to_json')
        cinder_volume_list = []
        neutron_backend_list = []
        service_disk_list = []
        optical_switch_list = []
        template_content = {}
        template_json = {}
        template_id = ""
        if not type or type == "tecs":
            try:
                params = {'filters': {'name': cluster_name}}
                clusters = registry.get_clusters_detail(req.context, **params)
                if clusters:
                    cluster_id = clusters[0]['id']
                else:
                    msg = "the cluster %s is not exist" % cluster_name
                    LOG.error(msg)
                    raise HTTPForbidden(
                        explanation=msg,
                        request=req,
                        content_type="text/plain")

                params = {'filters': {'cluster_id': cluster_id}}
                cluster = registry.get_cluster_metadata(
                    req.context, cluster_id)
                roles = registry.get_roles_detail(req.context, **params)
                networks = registry.get_networks_detail(
                    req.context, cluster_id, **params)
                for role in roles:
                    cinder_volumes = self._get_cinder_volumes(req, role)
                    cinder_volume_list += cinder_volumes
                    services_disk = self._get_services_disk(req, role)
                    service_disk_list += services_disk
                    optical_switchs = self._get_optical_switchs(req, role)
                    optical_switch_list += optical_switchs
                    neutron_backends = self._get_neutron_backends(req, role)
                    neutron_backend_list += neutron_backends

                    if role.get('config_set_id', None):
                        config_set = registry.get_config_set_metadata(
                            req.context, role['config_set_id'])
                        if config_set.get("config", None):
                            role['config_set'] = config_set['config']
                    del role['cluster_id']
                    del role['status']
                    del role['progress']
                    del role['messages']
                    del role['config_set_update_progress']
                    self._del_general_params(role)
                for network in networks:
                    network_detail = registry.get_network_metadata(
                        req.context, network['id'])
                    if network_detail.get('ip_ranges', None):
                        network['ip_ranges'] = network_detail['ip_ranges']
                    del network['cluster_id']
                    self._del_general_params(network)
                if cluster.get('routers', None):
                    for router in cluster['routers']:
                        del router['cluster_id']
                        self._del_general_params(router)
                if cluster.get('logic_networks', None):
                    for logic_network in cluster['logic_networks']:
                        for subnet in logic_network['subnets']:
                            del subnet['logic_network_id']
                            del subnet['router_id']
                            self._del_general_params(subnet)
                        del logic_network['cluster_id']
                        self._del_general_params(logic_network)
                if cluster.get('nodes', None):
                    del cluster['nodes']
                self._del_general_params(cluster)
                self._del_cluster_params(cluster)
                cluster['tecs_version_id'] = ""
                template_content['cluster'] = cluster
                template_content['cluster_name'] = cluster_name
                template_content['roles'] = roles
                template_content['networks'] = networks
                template_content['cinder_volumes'] = cinder_volume_list
                template_content['neutron_backends'] = neutron_backend_list
                template_content['optical_switchs'] = optical_switch_list
                template_content['services_disk'] = service_disk_list
                template_json['content'] = json.dumps(template_content)
                template_json['type'] = 'tecs'
                template_json['name'] = template_name
                template_json['description'] = description

                template_host_params = {'cluster_name': cluster_name}
                template_hosts = registry.host_template_lists_metadata(
                    req.context, **template_host_params)
                if template_hosts:
                    template_json['hosts'] = template_hosts[0]['hosts']
                else:
                    template_json['hosts'] = "[]"

                template_params = {'filters': {'name': template_name}}
                template_list = registry.template_lists_metadata(
                    req.context, **template_params)
                if template_list:
                    update_template = registry.update_template_metadata(
                        req.context, template_list[0]['id'], template_json)
                    template_id = template_list[0]['id']
                else:
                    add_template = registry.add_template_metadata(
                        req.context, template_json)
                    template_id = add_template['id']

                if template_id:
                    template_detail = registry.template_detail_metadata(
                        req.context, template_id)
                    self._del_general_params(template_detail)
                    template_detail['content'] = json.loads(
                        template_detail['content'])
                    if template_detail['hosts']:
                        template_detail['hosts'] = json.loads(
                            template_detail['hosts'])

                    tecs_json = daisy_path + "%s.json" % template_name
                    cmd = 'rm -rf  %s' % (tecs_json,)
                    daisy_cmn.subprocess_call(cmd)
                    with open(tecs_json, "w+") as fp:
                        json.dump(template_detail, fp, indent=2)

            except exception.Invalid as e:
                raise HTTPBadRequest(explanation=e.msg, request=req)

        return {"template": template_detail}
Exemplo n.º 34
0
def thread_bin(req,host, role_id_list, pkg_name, install_progress_percentage):
    host_ip = host['mgtip']
    password = host['rootpwd']
    
    cmd = 'mkdir -p /var/log/daisy/daisy_install/'
    daisy_cmn.subprocess_call(cmd)
    
    var_log_path = "/var/log/daisy/daisy_install/%s_install_zenic.log" % host_ip
    with open(var_log_path, "w+") as fp:

        cmd = '/var/lib/daisy/zenic/trustme.sh %s %s' % (host_ip, password)
        daisy_cmn.subprocess_call(cmd,fp)
        
        cmd = 'clush -S -b -w %s  mkdir -p /home/workspace' % (host_ip,)
        daisy_cmn.subprocess_call(cmd,fp)
        
        cmd = 'clush -S -b -w %s  mkdir -p /etc/zenic' % (host_ip,)
        daisy_cmn.subprocess_call(cmd,fp)

        cmd = 'clush -S -b -w %s  rm -rf /etc/zenic/config' % (host_ip,)
        daisy_cmn.subprocess_call(cmd,fp)

        cmd = 'clush -S -b -w %s  rm -rf /home/zenic' % (host_ip,)
        daisy_cmn.subprocess_call(cmd,fp)

        cmd = 'clush -S -b -w %s  rm -rf /home/workspace/unipack' % (host_ip,)
        daisy_cmn.subprocess_call(cmd,fp)

        pkg_file = daisy_zenic_path + pkg_name
        cmd = 'clush -S -b -w %s  rm -rf /home/workspace/%s' % (host_ip,pkg_name)
        daisy_cmn.subprocess_call(cmd,fp)

        
        cfg_file = daisy_zenic_path + host_ip + "_zenic.conf" 
        try:
            exc_result = subprocess.check_output(
                'sshpass -p ossdbg1 scp %s root@%s:/etc/zenic/config' % (cfg_file,host_ip,),
                shell=True, stderr=fp)
        except subprocess.CalledProcessError as e:
            update_progress_to_db(req, role_id_list, zenic_state['INSTALL_FAILED'])
            LOG.info(_("scp zenic pkg for %s failed!" % host_ip))
            fp.write(e.output.strip())
            exit()
        else:            
            LOG.info(_("scp zenic config for %s successfully!" % host_ip))
            fp.write(exc_result)            


        try:
            exc_result = subprocess.check_output(
                'sshpass -p ossdbg1 scp %s root@%s:/home/workspace/' % (pkg_file,host_ip,),
                shell=True, stderr=fp)
        except subprocess.CalledProcessError as e:
            update_progress_to_db(req, role_id_list, zenic_state['INSTALL_FAILED'])
            LOG.info(_("scp zenic pkg for %s failed!" % host_ip))
            fp.write(e.output.strip())
            exit()
        else:            
            LOG.info(_("scp zenic pkg for %s successfully!" % host_ip))
            fp.write(exc_result)
        
        cmd = 'clush -S -b -w %s unzip /home/workspace/%s -d /home/workspace/unipack' % (host_ip,pkg_name,)
        daisy_cmn.subprocess_call(cmd)

        try:
            exc_result = subprocess.check_output(
                'clush -S -b -w %s  /home/workspace/unipack/node_install.sh' % (host_ip,),
                shell=True, stderr=subprocess.STDOUT)
        except subprocess.CalledProcessError as e:
            update_progress_to_db(req, role_id_list, zenic_state['INSTALL_FAILED'])
            LOG.info(_("install zenic for %s failed!" % host_ip))
            fp.write(e.output.strip())
            exit()
        else:            
            LOG.info(_("install zenic for %s successfully!" % host_ip))
            fp.write(exc_result)
        
        try:
            exc_result = subprocess.check_output(
                'clush -S -b -w %s  /home/zenic/node_start.sh' % (host_ip,),
                shell=True, stderr=subprocess.STDOUT)
        except subprocess.CalledProcessError as e:
            update_progress_to_db(req, role_id_list, zenic_state['INSTALL_FAILED'])
            LOG.info(_("start zenic for %s failed!" % host_ip))
            fp.write(e.output.strip())
            exit()
        else:
            update_progress_to_db(req, role_id_list, zenic_state['INSTALLING'], install_progress_percentage)
            LOG.info(_("start zenic for %s successfully!" % host_ip))
            fp.write(exc_result)
Exemplo n.º 35
0
    def _baremetal_install_os(self, host_detail):
       # os_install_disk = 'sda'
        os_version_file = host_detail['os_version_file']
        if os_version_file:
            test_os_version_exist = 'test -f %s' % os_version_file
            daisy_cmn.subprocess_call(test_os_version_exist)
        else:
            self.message = "no OS version file configed for host %s" %  host_detail['id']
            raise exception.NotFound(message=self.message)

        if host_detail.get('root_disk',None):
            root_disk = host_detail['root_disk']
        else:
            root_disk = 'sda'
        if host_detail.get('root_lv_size',None):
            root_lv_size_m = host_detail['root_lv_size']
        else:
            root_lv_size_m = 51200
        memory_size_b_str = str(host_detail['memory']['total'])
        memory_size_b_int = int(memory_size_b_str.strip().split()[0])
        memory_size_m = memory_size_b_int//1024
        memory_size_g = memory_size_m//1024
        swap_lv_size_m = host_detail['swap_lv_size']
        cinder_vg_size_m = 0
        disk_list = []
        disk_storage_size_b = 0
        for key in host_detail['disks']:
            disk_list.append(host_detail['disks'][key]['name'])
            stroage_size_str = host_detail['disks'][key]['size']
            stroage_size_b_int = int(stroage_size_str.strip().split()[0])
            disk_storage_size_b = disk_storage_size_b + stroage_size_b_int
            
        disk_list = ','.join(disk_list)
        disk_storage_size_m = disk_storage_size_b//(1024*1024)      
        if host_detail.has_key('root_pwd') and host_detail['root_pwd']:
            root_pwd = host_detail['root_pwd']
        else:
            root_pwd = 'ossdbg1'

        if host_detail.has_key('isolcpus') and host_detail['isolcpus']:
            isolcpus = host_detail['isolcpus']
        else:
            isolcpus = None

        if host_detail.get('hugepages',None):
           hugepages = host_detail['hugepages']
        else:
            hugepages = 0

        if host_detail.get('hugepagesize',None):
            hugepagesize = host_detail['hugepagesize']
        else:
            hugepagesize = '1G'
	
        
            
        #tfg_patch_pkg_file = check_tfg_exist()

        if (not host_detail['ipmi_user'] or
            not host_detail['ipmi_passwd'] or
            not host_detail['ipmi_addr'] ):
            self.message = "Invalid ipmi information configed for host %s" %  host_detail['id']
            raise exception.NotFound(message=self.message)



        self._set_boot_or_power_state(host_detail['ipmi_user'],
                                      host_detail['ipmi_passwd'],
                                      host_detail['ipmi_addr'],
                                      'pxe')

        kwargs = {'hostname':host_detail['name'],
                  'iso_path':os_version_file,
                  #'tfg_bin':tfg_patch_pkg_file,
                  'dhcp_mac':host_detail['dhcp_mac'],
                  'storage_size':disk_storage_size_m,
                  'memory_size':memory_size_g,
                  'interfaces':host_detail['interfaces'],
                  'root_lv_size':root_lv_size_m,
                  'swap_lv_size':swap_lv_size_m,
                  'cinder_vg_size':cinder_vg_size_m,
                  'disk_list':disk_list,
                  'root_disk':root_disk,
                  'root_pwd':root_pwd,
                  'isolcpus':isolcpus,
                  'hugepagesize':hugepagesize,
                  'hugepages':hugepages,
                  'reboot':'no'}
        
        if host_detail.has_key('glance_lv_size'):
            kwargs['glance_lv_size'] = host_detail['glance_lv_size']
        else:
            kwargs['glance_lv_size'] = 0
            
        if host_detail.has_key('db_lv_size') and host_detail['db_lv_size']:
            kwargs['db_lv_size'] = host_detail['db_lv_size']
        else:
            kwargs['db_lv_size'] = 0
            
        if host_detail.has_key('mongodb_lv_size') and host_detail['mongodb_lv_size']:
            kwargs['mongodb_lv_size'] = host_detail['mongodb_lv_size']
        else:
            kwargs['mongodb_lv_size'] = 0
            
        if host_detail.has_key('nova_lv_size') and host_detail['nova_lv_size']:
            kwargs['nova_lv_size'] = host_detail['nova_lv_size']
        else:
            kwargs['nova_lv_size'] = 0
        install_os_obj = self.ironicclient.daisy.install_os(**kwargs)
        install_os_dict = dict([(f, getattr(install_os_obj, f, '')) for f in ['return_code', 'info']])
        rc = int(install_os_dict['return_code'])
        if rc != 0:
            install_os_description = install_os_dict['info']
            LOG.info(_("install os config failed because of '%s'" % (install_os_description)))
            host_status = {'os_status':host_os_status['INSTALL_FAILED'],
                           'os_progress':0,
                           'messages':install_os_description}
            update_db_host_status(self.req, host_detail['id'],host_status)
            msg = "ironic install os return failed for host %s" %  host_detail['id']
            raise exception.OSInstallFailed(message=msg)

        self._set_boot_or_power_state(host_detail['ipmi_user'],
                                      host_detail['ipmi_passwd'],
                                      host_detail['ipmi_addr'],
                                      'reset')