def __init__(self):

        # set ambari_url
        self.sbtls = SubTools()
        config = ConfigParser.ConfigParser()
        config.read("/etc/ambari-agent/conf/ambari-agent.ini")
        self.ambari_url = config.get('server', 'hostname')

        # set cluster_name
        headers = {
            'X-Requested-By': 'ambari',
        }
        r = requests.get('http://' + self.ambari_url + ':8080/api/v1/clusters',
                         headers=headers,
                         auth=('admin', 'admin'))
        j = json.loads(r.text)
        items = j["items"][0]["Clusters"]
        print "2 - Cluster Name : " + items["cluster_name"]
        self.cluster_name = items["cluster_name"]

        # change version
        file_path = os.path.dirname(os.path.realpath(__file__))
        json_data = open(file_path + "/config-version.json").read()
        data = json.loads(json_data)

        data["version"] = data["version"] + 1
        with open(file_path + '/config-version.json', 'w') as f:
            json.dump(data, f)

        self.checkVersion = str(data["version"])
class LustreFSKernelMaster(Script):
    def __init__(self):
        self.sbtls = SubTools()

    def install(self, env):
        print 'Installation complete.'

    def stop(self, env):
        print 'Stop LustreFSKernelMaster'
        # Stop your service
        self.sbtls.sp_open(
            'python /var/lib/ambari-agent/cache/stacks/HDP/2.6/services/LUSTREFSKERNEL/package/scripts/daemon-lustrefs-kernel.py stop'
        )

    def start(self, env):
        print 'Start LustreFSKernelMaster'
        # Reconfigure all files
        # Start your service
        self.sbtls.sp_open(
            'python /var/lib/ambari-agent/cache/stacks/HDP/2.6/services/LUSTREFSKERNEL/package/scripts/daemon-lustrefs-kernel.py start'
        )

    def reboot(self, env):
        self.sbtls.excuteDaemon('kernelinstall', 5700)
        print("LustreFS kernel installed!!")

    def status(self, env):
        check = self.sbtls.sp_open(
            'python /var/lib/ambari-agent/cache/stacks/HDP/2.6/services/LUSTREFSKERNEL/package/scripts/daemon-lustrefs-kernel.py status'
        )
        print check
        if 'not' in str(check):
            raise ComponentIsNotRunning
        pass
 def __init__(self):
     self.sbtls = SubTools()
class LustreMDSMgmtService(Script):
    def __init__(self):
        self.sbtls = SubTools()

    def install(self, env):
        import mdt_params

        mds_wget = mdt_params.mds_wget

        # File(
        #     os.path.join("/var/lib/ambari-agent","LustreMDSMgmtService_status.conf"),
        #     owner='root',
        #     group='root',
        #     mode=0644,
        #     content="status=0",
        # )

        print 'Install LustreMDSMgmtService.'

        # Load the all configuration files
        config = Script.get_config()
        # Bind to a local variable
        #LustreMDSMgmtService_user = config['configurations']['my-config-env']['lustremdsmgmtservice_user']

        # Install packages
        self.install_packages(env)
        Execute(
            format(
                "yum install -y wget, libesmtp libyaml net-snmp-agent-libs opensm opensm-libs sg3_utils tcl tk"
            ))

        for pkg in mds_wget:
            download_file = 'wget ' + pkg['url']
            Execute(format(download_file))

        pkg_file = 'yum install -y'
        for pkg in mds_wget:
            pkg_file = pkg_file + ' ' + pkg['name'] + ' '

        try:
            Execute(format(pkg_file))
        except ExecutionFailed as e:
            print(e)
            pass

        print 'Installation complete.'

        self.configure(env)
        # file_path = os.path.dirname(os.path.realpath(__file__))
        # self.sbtls.sp_open('python '+file_path+'/daemon-lustre-mds.py start')
    def stop(self, env):
        print 'Stop LustreMDSMgmtService'
        # Stop your service

        #Since we have not installed a real service, there is no PID file created by it.
        #Therefore we are going to artificially remove the PID.
        # Execute( "rm -f /tmp/LustreMDSMgmtService.pid" )
        file_path = os.path.dirname(os.path.realpath(__file__))
        self.sbtls.sp_open('python ' + file_path +
                           '/daemon-lustre-mds.py stop')

    def start(self, env):
        print 'Start LustreMDSMgmtService'
        # Reconfigure all files
        # Start your service

        #Since we have not installed a real service, there is no PID file created by it.
        #Therefore we are going to artificially create the PID.
        # Execute( "touch /tmp/LustreMDSMgmtService.pid" )
        file_path = os.path.dirname(os.path.realpath(__file__))
        self.sbtls.sp_open('python ' + file_path +
                           '/daemon-lustre-mds.py start')

    def status(self, env):
        # print 'Status of LustreMDSMgmtService'
        # LustreMDSMgmtService_pid_file = "/tmp/LustreMDSMgmtService.pid"
        # #check_process_status(dummy_master_pid_file)
        # Execute( format("cat {LustreMDSMgmtService_pid_file}") )
        file_path = os.path.dirname(os.path.realpath(__file__))
        check = self.sbtls.sp_open('python ' + file_path +
                                   '/daemon-lustre-mds.py status')
        print check
        if 'not' in str(check):
            raise ComponentIsNotRunning
        pass

    def mountmdts(self, env):
        # self.sbtls.excuteDaemon('sample02',5680)
        print("mountmdts!!")

    def unmountmdts(self, env):
        # self.sbtls.excuteDaemon('sample02',5680)
        print("unmountmdts!!")

    def mkfsmdts(self, env):
        # self.sbtls.excuteDaemon('sample02',5680)
        print("mkfsmdts!!")

    def configure(self, env):
        print 'LustreMDSMgmtService Configure start....'
        import mdt_params
        config = Script.get_config()
        mdt_server_device = mdt_params.mdt_server_device

        local_hostname = mdt_params.local_hostname
        mdt_fsname = mdt_params.mdt_fsname
        mdt_index = mdt_params.mdt_index
        mdt_mount = mdt_params.mdt_mount

        modprobe_networks = mdt_server_device['network_mds']

        Lustrecontent = format('options lnet networks="' + modprobe_networks +
                               '"')
        File(
            os.path.join("/etc/modprobe.d", "lustre.conf"),
            owner='root',
            group='root',
            mode=0644,
            content=Lustrecontent,
        )
        Execute(format('modprobe lustre'))
        Execute(format('lsmod | egrep "lustre|lnet"'))
        Execute(
            format('mkfs.lustre --fsname=' + mdt_fsname +
                   '  --mdt --mgs --index=' + mdt_index + ' ' +
                   mdt_server_device['device_mds']))

        Directory(mdt_mount, create_parents=True, owner='root', group='root')

        Execute(
            format('mount -t lustre ' + mdt_server_device['device_mds'] + ' ' +
                   mdt_mount))

        # File(
        #     os.path.join("/var/lib/ambari-agent","LustreMDSMgmtService_status.conf"),
        #     owner='root',
        #     group='root',
        #     mode=0644,
        #     content="status=1",
        # )

        print 'Configure complete.'
Exemple #5
0
class LustreOSSMgmtService(Script):
    def __init__(self):
        self.sbtls = SubTools()
    def install(self, env):
        import oss_params;
        print 'Install LustreOSSMgmtService.'

        mds_wget = oss_params.mds_wget

        # Install packages
        self.install_packages(env)
        Execute(format("yum install -y wget, libesmtp libyaml net-snmp-agent-libs opensm opensm-libs sg3_utils tcl tk"))


        for pkg in mds_wget:
            download_file = 'wget '+pkg['url']
            Execute(format(download_file))

        pkg_file = 'yum install -y'
        for pkg in mds_wget:
            pkg_file = pkg_file +' '+ pkg['name'] + ' '

        try:
            Execute(format(pkg_file))
        except ExecutionFailed as e:
            print(e)
            pass

        print 'Installation complete.'


        self.configure(env)
        # file_path = os.path.dirname(os.path.realpath(__file__))
        # self.sbtls.sp_open('python '+file_path+'/daemon-lustre-oss.py start')

    def stop(self, env):
        print 'Stop LustreOSSMgmtService'
        # Stop your service

        #Since we have not installed a real service, there is no PID file created by it.
        #Therefore we are going to artificially remove the PID.
        # Execute( "rm -f /tmp/LustreOSSMgmtService.pid" )
        file_path = os.path.dirname(os.path.realpath(__file__))
        self.sbtls.sp_open('python '+file_path+'/daemon-lustre-oss.py stop')

    def start(self, env):
        print 'Start LustreOSSMgmtService'
        # Reconfigure all files
        # Start your service

        #Since we have not installed a real service, there is no PID file created by it.
        #Therefore we are going to artificially create the PID.
        #Execute( "touch /tmp/LustreOSSMgmtService.pid" )
        file_path = os.path.dirname(os.path.realpath(__file__))
        self.sbtls.sp_open('python '+file_path+'/daemon-lustre-oss.py start')

    def status(self, env):
        print 'Status of LustreOSSMgmtService'
        # LustreOSSMgmtService_pid_file = "/tmp/LustreOSSMgmtService.pid"
        # #check_process_status(dummy_slave_pid_file)
        # # Execute( format("cat {LustreOSSMgmtService_pid_file}") )
        # # pass

        file_path = os.path.dirname(os.path.realpath(__file__))
        check = self.sbtls.sp_open('python '+file_path+'/daemon-lustre-oss.py status')
        print check
        if 'not' in str(check):
            raise ComponentIsNotRunning
        pass

    def mountosts(self, env):
        self.sbtls.excuteDaemon('sample02',5681)
        print("mountosts!!")

    def unmountosts(self, env):
        self.sbtls.excuteDaemon('sample02',5681)
        print("unmountosts!!")

    def mkfsmdts(self, env):
        self.sbtls.excuteDaemon('sample02',5681)
        print("mkfsosts!!")

    def configure(self, env):
        print 'LustreOSSMgmtService Configure start....'
        import oss_params;


        local_hostname = oss_params.local_hostname;
        oss_server_device = oss_params.oss_server_device[local_hostname];
        modprobe_networks = oss_server_device['network_oss']
        mds_host = oss_params.mds_host;
        mdt_fsname = oss_params.mdt_fsname;
        network_device_mds = oss_params.network_device_mds;
        oss_mount = oss_params.oss_mount;

        Lustrecontent = format('options lnet networks="'+modprobe_networks+'"')
        File(
            os.path.join("/etc/modprobe.d","lustre.conf"),
            owner='root',
            group='root',
            mode=0644,
            content=Lustrecontent,
        )
        Execute(format('modprobe lustre'))
        Execute(format('modprobe ost'))


        for i in range(0, oss_server_device['device_num']):
            Execute(format('mkfs.lustre --fsname='+mdt_fsname+'  --ost --mgsnode='+mds_host+'@'+network_device_mds+' --index='+str(oss_server_device['server_device_index'][i])+' --reformat '+oss_server_device['server_device'][i]))

            Directory(
                oss_mount + str(oss_server_device['server_device_index'][i]),
                create_parents=True,
                owner='root',
                group='root')
            Execute(format('mount -t lustre '+oss_server_device['server_device'][i]+' '+oss_mount+str(oss_server_device['server_device_index'][i])));

        print 'Configure complete.'
class HadoopLustreAdapterMgmtService(Script):
    def __init__(self):
        self.sbtls = SubTools()

    def install(self, env):
        import params

        print 'Install HadoopLustreAdapterMgmtService.'

        # Load the all configuration files
        # config = Script.get_config()
        # Bind to a local variable
        # HadoopLustreAdapterMgmtService_user = config['configurations']['my-config-env']['HadoopLustreAdapterMgmtService_user']

        mds_host = params.mds_host
        # Install packages
        self.install_packages(env)

        ## init process

        Execute(
            format("yum install wget libselinux-devel nfs-utils-lib-devel -y"))
        Execute(format("yum groupinstall development tools -y"))
        # Execute(format("wget -P /tmp http://scteam.ksc.re.kr/~jhkwak/lustre/client/lustre-client-2.7.0-2.6.32_504.8.1.el6.x86_64.src.rpm"))
        # Execute(format("ssh root@localhost -T \"nohup rpmbuild --rebuild --define 'lustre_name lustre-client' --define 'configure_args --disable-server --enable-client' --without servers /tmp/lustre-client-2.7.0-2.6.32_504.8.1.el6.x86_64.src.rpm > /tmp/buildhistory\""))
        # Execute(format("yum localinstall /root/rpmbuild/RPMS/x86_64/lustre-iokit-* -y"))
        # Execute(format("yum localinstall /root/rpmbuild/RPMS/x86_64/lustre-client-* -y"))

        Execute(
            format(
                "wget -P /tmp https://downloads.hpdd.intel.com/public/lustre/lustre-2.10.0/el7/client/RPMS/x86_64/kmod-lustre-client-2.10.0-1.el7.x86_64.rpm"
            ))
        Execute(
            format(
                "wget  -P /tmp https://downloads.hpdd.intel.com/public/lustre/lustre-2.10.0/el7/client/RPMS/x86_64/lustre-client-2.10.0-1.el7.x86_64.rpm"
            ))
        Execute(
            format(
                "wget  -P /tmp https://downloads.hpdd.intel.com/public/lustre/lustre-2.10.0/el7/client/RPMS/x86_64/lustre-iokit-2.10.0-1.el7.x86_64.rpm"
            ))

        Execute(format("yum localinstall /tmp/kmod-lustre-iokit-* -y"))
        Execute(format("yum localinstall /tmp/lustre-iokit-* -y"))
        Execute(format("yum localinstall /tmp/lustre-client-* -y"))

        # Execute(format("wget http://repos.fedorapeople.org/repos/dchen/apache-maven/epel-apache-maven.repo -O /etc/yum.repos.d/epel-apache-maven.repo"))
        # Execute(format("sed -i s/\$releasever/6/g /etc/yum.repos.d/epel-apache-maven.repo"))
        # Execute(format("yum install -y apache-maven git"))
        # Execute(format("git clone https://github.com/Seagate/lustrefs"))
        # Execute(format("mvn -f lustrefs/ package"))

        # os.makedirs("/mnt/lustre/hadoop")
        # os.system("ssh root@localhost -t \"mkdir -p /mnt/lustre/hadoop\"")

        # Execute(format("/bin/mount -t lustre 192.168.1.2116@tcp:/mylustre  /mnt/lustre"))

        # Execute(format("mkdir /lustre"))

        os.makedirs("/mnt/lustre/hadoop")
        os.system("ssh root@localhost -t \"mkdir -p /mnt/lustre/hadoop\"")

        # Execute(format("yes | cp  lustrefs/target/lustrefs-hadoop-0.9.1.jar /usr/hdp/current/hadoop-hdfs-namenode/lib/"))
        # Execute(format("yes | cp  lustrefs/target/lustrefs-hadoop-0.9.1.jar /usr/hdp/current/hadoop-yarn-nodemanager/lib/"))
        # Execute(format("yes | cp  lustrefs/target/lustrefs-hadoop-0.9.1.jar /usr/hdp/current/hadoop-mapreduce-client/lib/"))
        # Execute(format("yes | cp  lustrefs/target/lustrefs-hadoop-0.9.1.jar /usr/hdp/current/hadoop-mapreduce-historyserver/lib/"))

        hosts = self.getHostsInfo()
        file_path = os.path.dirname(os.path.realpath(__file__))

        for hostname in hosts:
            #self.subprocess_open('echo "'+hostname+'" >> /home/daemon/dm-log.info')
            self.sbtls.sp_open("scp " + file_path +
                               "/modules/lustrefs-hadoop-0.9.1.jar root@" +
                               hostname +
                               ":/usr/hdp/current/hadoop-hdfs-namenode/lib/")
            self.sbtls.sp_open(
                "scp " + file_path +
                "/modules/lustrefs-hadoop-0.9.1.jar root@" + hostname +
                ":/usr/hdp/current/hadoop-yarn-nodemanager/lib/")
            self.sbtls.sp_open(
                "scp " + file_path +
                "/modules/lustrefs-hadoop-0.9.1.jar root@" + hostname +
                ":/usr/hdp/current/hadoop-mapreduce-client/lib/")
            self.sbtls.sp_open(
                "scp " + file_path +
                "/modules/lustrefs-hadoop-0.9.1.jar root@" + hostname +
                ":/usr/hdp/current/hadoop-mapreduce-historyserver/lib/")

        print 'Installation complete.'

    def stop(self, env):
        print 'Stop HadoopLustreAdapterMgmtService'
        # Stop your service

        #Since we have not installed a real service, there is no PID file created by it.
        #Therefore we are going to artificially remove the PID.
        # Execute( "rm -f /tmp/HadoopLustreAdapterMgmtService.pid" )
        file_path = os.path.dirname(os.path.realpath(__file__))
        self.sbtls.sp_open('python ' + file_path + '/daemon-lustre.py stop')

    def start(self, env):
        print 'Start HadoopLustreAdapterMgmtService'
        # Reconfigure all files
        # Start your service

        #Since we have not installed a real service, there is no PID file created by it.
        #Therefore we are going to artificially create the PID.
        # Execute( "touch /tmp/HadoopLustreAdapterMgmtService.pid" )
        file_path = os.path.dirname(os.path.realpath(__file__))
        self.sbtls.sp_open('python ' + file_path + '/daemon-lustre.py start')

    # def status(self, env):
    #     print 'Status of HadoopLustreAdapterMgmtService'
    #     HadoopLustreAdapterMgmtService_pid_file = "/tmp/HadoopLustreAdapterMgmtService.pid"
    #     #check_process_status(dummy_master_pid_file)
    #     Execute( format("cat {HadoopLustreAdapterMgmtService_pid_file}") )
    #     pass

    def switchtohdfs(self, env):
        self.sbtls.excuteDaemon('tohdfs', 5679)
        print("switchtohdfs!!")

    def switchtolustrefs(self, env):
        print 'init'
        self.sbtls.excuteDaemon('tolustrefs', 5679)
        print 'init-done'
        print("switchtolustrefs!!")

    def getHostsInfo(self):
        global local_hostname
        local_hostname = socket.gethostname()
        print "Local Hostname : " + local_hostname

        # ambari server hostname
        config = ConfigParser.ConfigParser()
        config.read("/etc/ambari-agent/conf/ambari-agent.ini")
        global ambari_url
        ambari_url = config.get('server', 'hostname')
        print "Ambari server Hostname : " + ambari_url

        # cluster_name
        headers = {
            'X-Requested-By': 'ambari',
        }
        r = requests.get('http://' + ambari_url + ':8080/api/v1/clusters',
                         headers=headers,
                         auth=('admin', 'admin'))
        j = json.loads(r.text)
        items = j["items"][0]["Clusters"]
        print "Cluster Name : " + items["cluster_name"]

        # LustreClient HostNames
        # curl -u admin:admin http://192.168.1.194:8080/api/v1/clusters/bigcluster/services/LUSTRE/components/LUSTRE_CLIENT

        r = requests.get(
            'http://' + ambari_url + ':8080/api/v1/clusters/' +
            items["cluster_name"] +
            '/services/LUSTREMGMTSERVICES/components/LustreClient',
            headers=headers,
            auth=('admin', 'admin'))
        j = json.loads(r.text)

        result = []
        for component in j["host_components"]:
            result.append(component["HostRoles"]["host_name"])

        return result

    def mount(self, env):
        self.sbtls.excuteDaemon('mount', 5679)
        print("mount!!")

    def unmount(self, env):
        self.sbtls.excuteDaemon('umount', 5679)
        print("umount!!")

    def createtomapreducefoder(self, env):
        self.sbtls.excuteDaemon('createToMapreduceFoder', 5679)
        print("create!!")

    def status(self, env):
        file_path = os.path.dirname(os.path.realpath(__file__))
        check = self.sbtls.sp_open('python ' + file_path +
                                   '/daemon-lustre.py status')
        print check
        if 'not' in str(check):
            raise ComponentIsNotRunning
        pass
class SwitchConfig:
    def __init__(self):

        # set ambari_url
        self.sbtls = SubTools()
        config = ConfigParser.ConfigParser()
        config.read("/etc/ambari-agent/conf/ambari-agent.ini")
        self.ambari_url = config.get('server', 'hostname')

        # set cluster_name
        headers = {
            'X-Requested-By': 'ambari',
        }
        r = requests.get('http://' + self.ambari_url + ':8080/api/v1/clusters',
                         headers=headers,
                         auth=('admin', 'admin'))
        j = json.loads(r.text)
        items = j["items"][0]["Clusters"]
        print "2 - Cluster Name : " + items["cluster_name"]
        self.cluster_name = items["cluster_name"]

        # change version
        file_path = os.path.dirname(os.path.realpath(__file__))
        json_data = open(file_path + "/config-version.json").read()
        data = json.loads(json_data)

        data["version"] = data["version"] + 1
        with open(file_path + '/config-version.json', 'w') as f:
            json.dump(data, f)

        self.checkVersion = str(data["version"])

    def toHdfs(self):
        confInfo = self.sbtls.sp_open("curl -u admin:admin -X GET  http://" +
                                      self.ambari_url +
                                      ":8080/api/v1/clusters/" +
                                      self.cluster_name +
                                      "?fields=Clusters/desired_configs")
        j = json.loads(confInfo[0])
        desiredConf = j['Clusters']['desired_configs']
        targetSites = ('mapred-site', 'yarn-site', 'core-site')
        # targetSites = ('mapred-site')
        for targetSite in targetSites:
            print "\n\n\n"
            print "target site : " + targetSite
            print "target version" + desiredConf[targetSite]['tag']
            print "----- target conf -----"
            print desiredConf
            print "----- target conf end -----"
            print "\n"
            try:
                versionNum = desiredConf[targetSite]['tag'].split('version')[1]
            except Exception, e:
                versionNum = desiredConf[targetSite]['tag']

            # nextVersionNum = str(int(versionNum)+1)
            nextVersionNum = self.checkVersion

            targetConf = self.sbtls.sp_open(
                'curl -u admin:admin -H "X-Requested-By: ambari" -X GET "http://'
                + self.ambari_url + ':8080/api/v1/clusters/' +
                self.cluster_name + '/configurations?type=' + targetSite +
                '&tag=' + desiredConf[targetSite]['tag'] + '"')

            targetJson = json.loads(targetConf[0])

            prop = targetJson['items'][0]['properties']
            print targetJson
            #print prop
            if targetSite == 'core-site':
                prop['fs.defaultFS'] = u"hdfs://node01:8020"  #modify
                prop.pop('fs.lustrefs.impl')  #remove
                prop.pop('fs.AbstractFileSystem.lustrefs.impl')  #remove
                prop.pop('fs.lustrefs.mount')  #remove
                prop.pop('hadoop.tmp.dir')  #remove

            elif targetSite == 'yarn-site':
                prop[
                    'yarn.nodemanager.container-executor.class'] = u"org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor"  #modify
                prop[
                    'yarn.nodemanager.local-dirs'] = u"/hadoop/yarn/local"  #modify
                prop[
                    'yarn.nodemanager.log-dirs'] = u"/hadoop/yarn/log"  #modify
                # prop['yarn.timeline-service.leveldb-state-store.path'] = u"/hadoop/yarn/timeline"#modify
                # prop['yarn.timeline-service.leveldb-timeline-store.path'] = u"/hadoop/yarn/timeline"#modify
                prop.pop(
                    'yarn.nodemanager.linux-container-executor.nonsecure-mode.local-user'
                )  #remove
                prop.pop(
                    'yarn.nodemanager.linux-container-executor.nonsecure-mode.limit-users'
                )  #remove

            elif targetSite == 'mapred-site':
                prop['yarn.app.mapreduce.am.staging-dir'] = u"/user"  #modify

                # print json.dumps(prop)

            reqHead = 'curl -u admin:admin -H "X-Requested-By: ambari" -X PUT -d '
            reqBody = '[{"Clusters":{"desired_config":[{"type":"' + targetSite + '","tag":"version' + nextVersionNum + '","service_config_version_note":"New config version"}]}}]'
            reqTail = ' "http://' + self.ambari_url + ':8080/api/v1/clusters/' + self.cluster_name + '"'

            body = json.loads(reqBody)
            body[0]['Clusters']['desired_config'][0][u'properties'] = prop
            reqBody = json.dumps(body)
            print "======= Req Api ======="
            print reqHead + reqBody + reqTail
            print "======= Req Api End ======="
            #TEST
            #getData = reqHead + reqBody+ reqTail
            #f = open("/home/lustre-daemon/test_20170510_2.txt", 'w')
            #f.write(getData)
            #f.close()
            reqResult = self.sbtls.sp_open(reqHead + "'" + reqBody + "'" +
                                           reqTail)
            print str(reqResult)
class YARNJobMonitorMaster(Script):
    def __init__(self):
        self.sbtls = SubTools()

    def install(self, env):
        print 'Install YARNJobMonitorMaster.'

        # Load the all configuration files
        config = Script.get_config()
        # Bind to a local variable
        # HadoopLustreAdapterMgmtService_user = config['configurations']['lustrefs-config-env']['HadoopLustreAdapterMgmtService_user']

        # Install packages
        self.install_packages(env)

        # Create a new user and group
        # Execute( format("groupadd -f {HadoopLustreAdapterMgmtService_user}") )
        # Execute( format("id -u {HadoopLustreAdapterMgmtService_user} &>/dev/null || useradd -s /bin/bash {HadoopLustreAdapterMgmtService_user} -g {HadoopLustreAdapterMgmtService_user}") )

        ### Continue installing and configuring your service

        print 'Installation complete.'

    def stop(self, env):
        print 'Stop YARNJobMonitorMaster'
        # Stop your service

        #Since we have not installed a real service, there is no PID file created by it.
        #Therefore we are going to artificially remove the PID.
        # Execute( "rm -f /tmp/HadoopLustreAdapterMgmtService.pid" )
        file_path = os.path.dirname(os.path.realpath(__file__))
        # self.sbtls.sp_open('python /var/lib/ambari-agent/cache/stacks/HDP/2.4/services/YARNJOBMONITORSERVICE/package/scripts/daemon-yarnjobmonitor.py stop')
        self.sbtls.sp_open('python ' + file_path +
                           '/daemon-yarnjobmonitor.py stop')

    def start(self, env):
        print 'Start YARNJobMonitorMaster'
        file_path = os.path.dirname(os.path.realpath(__file__))
        # Reconfigure all files
        # Start your service

        #Since we have not installed a real service, there is no PID file created by it.
        #Therefore we are going to artificially create the PID.
        # Execute( "touch /tmp/HadoopLustreAdapterMgmtService.pid" )
        # self.sbtls.sp_open('python /var/lib/ambari-agent/cache/stacks/HDP/2.4/services/YARNJOBMONITORSERVICE/package/scripts/daemon-yarnjobmonitor.py start')
        self.sbtls.sp_open('python ' + file_path +
                           '/daemon-yarnjobmonitor.py start')

    # def status(self, env):
    #     print 'Status of HadoopLustreAdapterMgmtService'
    #     HadoopLustreAdapterMgmtService_pid_file = "/tmp/HadoopLustreAdapterMgmtService.pid"
    #     #check_process_status(dummy_master_pid_file)
    #     Execute( format("cat {HadoopLustreAdapterMgmtService_pid_file}") )
    #     pass

    # def switchtohdfs(self, env):
    #     self.sbtls.excuteDaemon('sample02',5679)
    #     print("switchtohdfs!!")

    # def switchtolustrefs(self, env):
    #     print 'init'
    #     self.sbtls.excuteDaemon('sample-time',5679)
    #     print 'init-done'
    #     print("switchtolustrefs!!")

    # def getHostsInfo(self):
    #     global local_hostname
    #     local_hostname = socket.gethostname()
    #     print "Local Hostname : " + local_hostname

    # # ambari server hostname
    #     config = ConfigParser.ConfigParser()
    #     config.read("/etc/ambari-agent/conf/ambari-agent.ini")
    #     global ambari_url
    #     ambari_url = config.get('server','hostname')
    #     print "Ambari server Hostname : " + ambari_url

    #     # cluster_name
    #     headers = {
    #         'X-Requested-By': 'ambari',
    #     }
    #     r = requests.get('http://'+ambari_url+':8080/api/v1/clusters', headers=headers, auth=('admin', 'admin'))
    #     j = json.loads(r.text)
    #     items = j["items"][0]["Clusters"]
    #     print "Cluster Name : " + items["cluster_name"]

    #     # LustreClient HostNames
    #     # curl -u admin:admin http://192.168.1.194:8080/api/v1/clusters/bigcluster/services/LUSTRE/components/LUSTRE_CLIENT
    #     r = requests.get('http://'+ambari_url+':8080/api/v1/clusters/'+items["cluster_name"]+'/services/YARNJOBMONITORSERVICE/components/YARNJobMonitorMaster', headers=headers, auth=('admin', 'admin'))
    #     j = json.loads(r.text)

    #     result =[]
    #     for component in j["host_components"]:
    #       result.append(component["HostRoles"]["host_name"])

    #     return result

    # def mount(self, env):
    #     self.sbtls.excuteDaemon('mount',5679)
    #     print("mount!!")

    # def unmount(self, env):
    #     self.sbtls.excuteDaemon('umount',5679)
    #     print("umount!!")

    def status(self, env):
        file_path = os.path.dirname(os.path.realpath(__file__))
        check = self.sbtls.sp_open('python ' + file_path +
                                   '/daemon-yarnjobmonitor.py status')
        # check = self.sbtls.sp_open('python /var/lib/ambari-agent/cache/stacks/YARNJOBMONITORSERVICE/0.1.0/package/scripts/daemon-yarnjobmonitor.py status')
        print check
        if 'not' in str(check):
            raise ComponentIsNotRunning
        pass
Exemple #9
0
class UserSyncMaster(Script):
    def __init__(self):
        self.sbtls = SubTools()

    def install(self, env):
        Execute(format("yum -y install expect sshpass"))
        Execute(format("yum -y install openldap-servers openldap-clients"))
        Execute(
            format(
                "cp /usr/share/openldap-servers/DB_CONFIG.example /var/lib/ldap/DB_CONFIG"
            ))
        Execute(format("chown ldap. /var/lib/ldap/DB_CONFIG"))

        self.configure(env)
        print 'Installation complete.'

    def stop(self, env):
        print 'Stop UserSyncServiceMaster'
        check = os.path.exists('/var/run/openldap/slapd.pid')
        if check:
            Execute(format("systemctl stop slapd"))
            Execute(format("systemctl disable slapd"))
        else:
            print "Open LDAP is Not Running"
        # self.sbtls.sp_open('python /var/lib/ambari-agent/cache/stacks/HDP/2.6/services/USERSYNC/package/scripts/daemon-usersync.py stop')
        # Stop your service

    def start(self, env):
        print 'Start UserSyncServiceMaster'
        check = os.path.exists('/var/run/openldap/slapd.pid')
        if check:
            print "Open LDAP is Running"
        else:
            Execute(format("systemctl start slapd"))
            Execute(format("systemctl enable slapd"))
        # self.sbtls.sp_open('python /var/lib/ambari-agent/cache/stacks/HDP/2.6/services/USERSYNC/package/scripts/daemon-usersync.py start')
        # Reconfigure all files
        # Start your service

    # def clientsync(self, env):
    #     self.sbtls.excuteDaemon('clientsync',5800)
    #     print("clientsync!!");

    def status(self, env):
        # check = self.sbtls.sp_open('python /var/lib/ambari-agent/cache/stacks/HDP/2.6/services/USERSYNC/package/scripts/daemon-usersync.py status')
        check = os.path.exists('/var/run/openldap/slapd.pid')
        print check
        if check:
            pass
        else:
            raise ComponentIsNotRunning

    def configure(self, env):
        import params
        ldap_manager_pass = params.ldap_manager_pass
        ldap_manager_name = params.ldap_manager_name
        ldap_domain = params.ldap_domain

        Execute(format("systemctl start slapd"))
        Execute(format("systemctl enable slapd"))
        file_path = os.path.dirname(os.path.realpath(__file__))
        check = self.sbtls.sp_open('sh ' + file_path +
                                   '/modules/shellscripts/password.sh ' +
                                   ldap_manager_pass)
        ssha_pass = check[0].split('\r\n')[3]

        self.sbtls.sp_open('rm -rf /tmp/openldap')
        Execute(format("mkdir /tmp/openldap"))
        f = open("/tmp/openldap/chrootpw.ldif", 'w')
        data = """
# specify the password generated above for "olcRootPW" section
dn: olcDatabase={0}config,cn=config
changetype: modify
add: olcRootPW
olcRootPW: """ + ssha_pass
        f.write(data)
        f.close()
        Execute(
            format(
                "ldapadd -Y EXTERNAL -H ldapi:/// -f /tmp/openldap/chrootpw.ldif "
            ))

        Execute(
            format(
                "ldapadd -Y EXTERNAL -H ldapi:/// -f /etc/openldap/schema/cosine.ldif"
            ))
        Execute(
            format(
                "ldapadd -Y EXTERNAL -H ldapi:/// -f /etc/openldap/schema/nis.ldif"
            ))
        Execute(
            format(
                "ldapadd -Y EXTERNAL -H ldapi:/// -f /etc/openldap/schema/inetorgperson.ldif"
            ))

        check = self.sbtls.sp_open('sh ' + file_path +
                                   '/modules/shellscripts/password.sh ' +
                                   ldap_manager_pass)
        ssha_pass = check[0].split('\r\n')[3]
        f = open("/tmp/openldap/chdomain.ldif", 'w')
        data = """# replace to your own domain name for "dc=***,dc=***" section
# specify the password generated above for "olcRootPW" section
dn: olcDatabase={1}monitor,cn=config
changetype: modify
replace: olcAccess
olcAccess: {0}to * by dn.base="gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth"
    read by dn.base="cn=""" + ldap_manager_name + """,""" + ldap_domain + """\" read by * none


dn: olcDatabase={2}hdb,cn=config
changetype: modify
replace: olcSuffix
olcSuffix: """ + ldap_domain + """

dn: olcDatabase={2}hdb,cn=config
changetype: modify
replace: olcRootDN
olcRootDN: cn=""" + ldap_manager_name + """,""" + ldap_domain + """

dn: olcDatabase={2}hdb,cn=config
changetype: modify
add: olcRootPW
olcRootPW: """ + ssha_pass + """

dn: olcDatabase={2}hdb,cn=config
changetype: modify
add: olcAccess
olcAccess: {0}to attrs=userPassword,shadowLastChange by
    dn=\"cn=""" + ldap_manager_name + """,""" + ldap_domain + """" write by anonymous auth by self write by * none
olcAccess: {1}to dn.base="" by * read
olcAccess: {2}to * by dn=\"cn=""" + ldap_manager_name + """,""" + ldap_domain + """" write by * read
"""
        f.write(data)
        f.close()
        Execute(
            format(
                "ldapmodify -Y EXTERNAL -H ldapi:/// -f /tmp/openldap/chdomain.ldif "
            ))

        #ldap_name = ldap_domain.split("=")[1];
        ldap_name = ldap_domain.split('=')[1].split(',')[0]

        f = open("/tmp/openldap/basedomain.ldif", 'w')
        data = """dn: """ + ldap_domain + """
objectClass: top
objectClass: dcObject
objectclass: organization
o: LDAP server
dc: """ + ldap_name + """

dn: cn=""" + ldap_manager_name + """,""" + ldap_domain + """
objectClass: organizationalRole
cn: Manager
description: Directory Manager

dn: ou=People,""" + ldap_domain + """
objectClass: organizationalUnit
ou: People

dn: ou=Group,""" + ldap_domain + """
objectClass: organizationalUnit
ou: Group"""
        f.write(data)
        f.close()

        Execute(
            format("ldapadd -x -D cn=" + ldap_manager_name + "," +
                   ldap_domain + " -w " + ldap_manager_pass +
                   " -f /tmp/openldap/basedomain.ldif"))
        # self.sbtls.sp_open('sh '+file_path+'/modules/shellscripts/ldapadd.sh '+ ldap_manager_name+','+ldap_domain +' '+ldap_manager_pass);

        self.sbtls.sp_open('sh ' + file_path +
                           '/modules/shellscripts/ldapuser.sh ' + ldap_domain)
        Execute(
            format("ldapadd -x -D cn=" + ldap_manager_name + "," +
                   ldap_domain + " -w " + ldap_manager_pass +
                   " -f /tmp/openldap/ldapuser.ldif"))