def changePermissions():

    hadoop_bin_dir = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_HADOOP_BIN_DIR').getValue()
    hadoop_sbin_dir = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_HADOOP_SBIN_DIR').getValue()
    java_home_dir = proxy.getContainer().getRuntimeContext().getVariable('GRIDLIB_JAVA_HOME').getValue()
    
    if ContainerUtils.isWindows():
        pass
    else:
        ContainerUtils.getLogger(proxy).info("[hadoop_enabler_common] Changing file permissions on directory " + hadoop_bin_dir)       
        commandline = "chmod -Rv u+x " + hadoop_bin_dir
        runCommand(commandline)
        
        ContainerUtils.getLogger(proxy).info("[hadoop_enabler_common] Changing file permissions on directory " + hadoop_sbin_dir)       
        commandline = "chmod -Rv u+x " + hadoop_sbin_dir
        runCommand(commandline)
        
        java_bin_dir = os.path.join(java_home_dir, "bin")
        ContainerUtils.getLogger(proxy).info("[hadoop_enabler_common] Changing file permissions on directory " + java_bin_dir)       
        commandline = "chmod -Rv u+x " + java_bin_dir
        runCommand(commandline)
        
        java_jrebin_dir = os.path.join(java_home_dir, "jre", "bin")
        ContainerUtils.getLogger(proxy).info("[hadoop_enabler_common] Changing file permissions on directory " + java_jrebin_dir)       
        commandline = "chmod -Rv u+x " + java_jrebin_dir
        runCommand(commandline)
def createEnvironmentScript():
    
    hadoop_bin_dir = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_HADOOP_BIN_DIR').getValue()
    
    if ContainerUtils.isWindows():
        environmentFilename = os.path.join(hadoop_bin_dir, "hadoop_enabler_ENVIRONMENT.bat")
    else:
        environmentFilename = os.path.join(hadoop_bin_dir, "hadoop_enabler_ENVIRONMENT.sh")
        
    environmentFile = open(environmentFilename, 'w')
    print>>environmentFile, "###################################################"
    print>>environmentFile, "# Generated by Hodoop Enabler"
    print>>environmentFile, "#     " + str(datetime.datetime.now())
    print>>environmentFile, "#"
    print>>environmentFile, "# This file sets all the ENVIRONMENT type runtimecontext"
    print>>environmentFile, "# variables defined by this enabler."
    print>>environmentFile, "#"
    print>>environmentFile, "###################################################"

    runtimeContext = proxy.getContainer().getRuntimeContext()
    for i in range (0, (runtimeContext.getVariableCount() - 1)):
          variable = runtimeContext.getVariable(i)
          ContainerUtils.getLogger(proxy).fine("[hadoop_enabler_common] " + str(variable.getName()) + " has type [" + str(variable.getTypeInt()) + "]")
          if  (variable.getTypeInt() == RuntimeContextVariable.ENVIRONMENT_TYPE):
              print>>environmentFile, variable.getName() + "=" + str(variable.getValue())   
    
    environmentFile.close()
 def startNode(self):
     logInfo("startNode:Enter")
     self.__environ = os.environ.copy()
     ### Starting Node
     self.__ELASTIC_ARGS = "-f -Des.pidfile=" + self.__pidfile
     if ContainerUtils.isWindows():
         self.__ELASTIC_CMD = os.path.join(self.__bindir,
                                           "elasticsearch.bat ")
     else:
         self.__ELASTIC_CMD = os.path.join(self.__bindir, "elasticsearch ")
     self.__CMD = self.__ELASTIC_CMD + self.__ELASTIC_ARGS
     logInfo("StartUp Command to be used : " + self.__CMD)
     args = shlex.split(self.__CMD)
     process = Popen(args,
                     stdout=None,
                     stderr=None,
                     env=self.__environ,
                     shell=False)
     time.sleep(20)
     logInfo("Start return Code : " + str(process.returncode))
     logInfo("finding the archivesDir")
     global archivesDir
     archiveMgmtFeature = ContainerUtils.getFeatureInfo(
         "Archive Management Support", proxy.container,
         proxy.container.currentDomain)
     archivesDir = os.path.join(self.__enginedir,
                                archiveMgmtFeature.archiveDirectory)
     logInfo("Found archives dir " + archivesDir)
     self.__toggleCheck = True
     logInfo("startNode:Exit")
def moveContentFiles():
    
    hadoop_home_dir = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_HADOOP_HOME_DIR').getValue()
    work_dir = proxy.getContainer().getRuntimeContext().getVariable('CONTAINER_WORK_DIR').getValue()
    
    if ContainerUtils.isWindows():
        pass
    else:
        ContainerUtils.getLogger(proxy).info("[hadoop_enabler_common] Copying enabler content files to version-specific sub-directory.")       
        runCommand("cp -rv " + work_dir + "/hadoop-content/* " + hadoop_home_dir, shell=True)
 def __configureDomain(self):
     "configure domain"
     configureDomainCommand = "configDomain.sh"
     if ContainerUtils.isWindows():
         configureDomainCommand = "configDomain.bat"
     cmdfile = os.path.join(self.__baseDir, "bin", configureDomainCommand)
     logger.info("Configure domain")
     self.__copyContainerEnvironment()
     retcode = call([cmdfile])
     logger.info("Return code: " + str(retcode))
	def __configureDomain(self):
		"configure domain"
		configureDomainCommand = "configDomain.sh"
		if ContainerUtils.isWindows():
			configureDomainCommand = "configDomain.bat"
		cmdfile = os.path.join(self.__baseDir, "bin", configureDomainCommand)
		logger.info("Configure domain")
		self.__copyContainerEnvironment()
		retcode = call([cmdfile])
		logger.info("Return code: " + str(retcode))
def getStatistic_disk(directory):
    if ContainerUtils.isWindows():
        raise Exception("[hadoop_enabler_common] Windows not yet implemented in by this enabler.")
        return[0,0,0]
    else:
        
        df = Popen(["df", directory], stdout=PIPE)
        output = df.communicate()[0]
        device, size, used, available, percent, mountpoint = output.split("\n")[1].split()
        
        return [available, used, int(percent[:-1])]
def getStatistic_common(name):

    memoryBean = ManagementFactory.getMemoryMXBean()

    if name == "enabler_HEAP_MEMORY_USAGE":
        bytes = memoryBean.getHeapMemoryUsage().getUsed()
        return bytes / 1024 / 1024 # convert to MiB
    elif name == "enabler_NON_HEAP_MEMORY_USAGE":
        bytes = memoryBean.getNonHeapMemoryUsage().getUsed()
        return bytes / 1024 / 1024 # convert to MiB
    
    elif name == "enabler_DATANODE_DECOMMISION_REQUESTS":
        
        hadoop_home_dir = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_HADOOP_HOME_DIR').getValue()        

        if ContainerUtils.isWindows():
            raise Exception("[hadoop_enabler_common] Windows not yet implemented in by this enabler.")
        else:
            commandline = "sh " + os.path.join(hadoop_home_dir, "bin", "hadoop") + " fs -count " + decommissionqueue_dir
            
        output = runCommand(commandline, expectedReturnCodes=[0, 255], suppressOutput=True)
        if (output[0] == 0): 
            stdout = str(output[1])
            count = int(stdout.split()[1])
            return int(count)
        elif (output[0] == 255):
            # Decommission request directory doesn't exist.  Not expected to exist until the some datanode posts the first request  
            return int(0)    
        else:
            ContainerUtils.getLogger(proxy).warning("[hadoop_enabler_common] Unexpected return code [" + str(output[0]) + 
                                                    "] while attempting to retrieve statistic enabler_DATANODE_DECOMMISION_REQUESTS statistic.  Assuming 0.")
            print output
            return int(0)
        
    elif name.startswith('enabler_DISK_'):

        tmpdir = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_TMP_DIR').getValue()    
        
        if name == "enabler_DISK_SPACE_FREE":
            blocks = int(getStatistic_disk(tmpdir)[0])
            return blocks / 1024 / 1024 # convert 1024-byte blocks to GiB
            #return available
        elif name == "enabler_DISK_SPACE_USED":
            blocks = int(getStatistic_disk(tmpdir)[1])
            return blocks / 1024 / 1024  # convert 1024-byte blocks to GiB
            #return used
        elif name == "enabler_DISK_SPACE_USED_PERCENT":
            return getStatistic_disk(tmpdir)[2]
            #return int(percent[:-1])
        else:
            raise Exception("[hadoop_enabler_common] Unsupported statistic type requested [" + str(name) + "]")

    else:
        raise Exception("[hadoop_enabler_common] Unsupported statistic type requested [" + str(name) + "]")
def killOrphans():     
    ContainerUtils.getLogger(proxy).info("[hadoop_enabler_common] Killing any orphaned process on this engine remaining from a previous execution")
    
    hadoop_home_dir = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_HADOOP_HOME_DIR').getValue()

    if ContainerUtils.isWindows():
        raise Exception("[hadoop_enabler_common] Windows not yet implemented in by this enabler.")
    else:
        commandline = "ps x"
    output = runCommand(commandline, expectedReturnCodes=[0, 255], stdout=PIPE, suppressOutput=True)

    for line in output[1].splitlines():
        if hadoop_home_dir in line:
            if "java" in line:
                ContainerUtils.getLogger(proxy).info("[hadoop_enabler_common] Issuing Kill command for orphaned process [" + str(line) + "]")
                pid = int(line.split(None, 1)[0])
                os.kill(pid, signal.SIGKILL)