コード例 #1
0
def doInit(additionalVariables):

    ContainerUtils.getLogger(proxy).info("[hadoop_enabler_balancer] Beginning doInit()")

    doInit_common(additionalVariables)        
    
    additionalVariables.add(RuntimeContextVariable("hadoop_enabler_balancer_BALANCER_RECEIVED_REQUESTS", "", 
                                                   RuntimeContextVariable.STRING_TYPE))

    additionalVariables.add(RuntimeContextVariable("hadoop_enabler_balancer_BALANCER_NEW_REQUEST_FLAG", "False", 
                                                   RuntimeContextVariable.STRING_TYPE))

    additionalVariables.add(RuntimeContextVariable("hadoop_enabler_balancer_BALANCER_EXIT_FLAG", "False", 
                                                   RuntimeContextVariable.STRING_TYPE))
    
    """
    Create New Thread objects for processing requests and save for later
    """
    logger = ContainerUtils.getLogger(proxy)
    slstdout = StreamToLogger(logger,"STDOUT")
    slstderr = StreamToLogger(logger,"STDERR")
    lock = threading.Lock()
    processReqThread = processRequestsThread(slstdout, slstderr, lock)
    additionalVariables.add(RuntimeContextVariable("hadoop_enabler_balancer_BALANCER_PROCESS_REQUEST_THREAD", processReqThread, 
                                                   RuntimeContextVariable.OBJECT_TYPE))
    
    checkForReqThread = checkForRequestsThread(slstdout, slstderr, lock)
    additionalVariables.add(RuntimeContextVariable("hadoop_enabler_balancer_BALANCER_CHECKFOR_REQUEST_THREAD", checkForReqThread, 
                                                   RuntimeContextVariable.OBJECT_TYPE))

    proxy.doInit(additionalVariables)
    
    ContainerUtils.getLogger(proxy).info("[hadoop_enabler_balancer] Exiting doInit()")
コード例 #2
0
def getBalancerRequestsFromQueue():
    
    hadoop_home_dir = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_HADOOP_HOME_DIR').getValue()
    commandline = os.path.join(hadoop_home_dir, "bin", "hadoop") + " fs -ls " + balancerqueue_dir            
    #Older versions of hadoop (e.g 1.0.4 and 1.1.2) return 255 if file does not exist.  Newer versions return 1
    output = runCommand(commandline, stdout=PIPE, expectedReturnCodes=[0, 255, 1], suppressOutput=True)

    requests = []
    if (output[0] == 0):
        reqsFileList = output[1].splitlines()
        numOfRequests=0
        for i in range(1, len(reqsFileList)):
            if not reqsFileList[i].startswith("Found"):
                """ Workaround - With 2.0.3-alpha a warning message for missing compression libraries is being printed to STDOUT during ls command """
                if not reqsFileList[i].count("Unable to load native-hadoop library for your platform... using builtin-java classes where applicable") > 0: 
                    ContainerUtils.getLogger(proxy).fine("[hadoop_enabler_balancer] Found request in queue [" + str(reqsFileList[i]) + "]")
                    lineTokens = reqsFileList[i].split()
                    if len(lineTokens) == 8:
                        filepath = lineTokens[7]
                        requests.append(filepath)
                        numOfRequests=numOfRequests+1
                    else:
                        ContainerUtils.getLogger(proxy).warning("[hadoop_enabler_balancer] Ignoring Line " + str(i + 1) +  " of queue listing for " + balancerqueue_dir +
                                                             "Line has unexpected format.  Full listing is:" +
                                                             str(reqsFileList) )
        if numOfRequests > 0:
            ContainerUtils.getLogger(proxy).info("[hadoop_enabler_balancer] "+ str(numOfRequests) +" requests found in [" + str(balancerqueue_dir) + "].")
    elif (output[0] == 255) or (output[0] == 1):
        ContainerUtils.getLogger(proxy).fine("[hadoop_enabler_balancer] No requests found in [" + str(balancerqueue_dir) + "].")
    else:
        ContainerUtils.getLogger(proxy).warning("[hadoop_enabler_balancer] Unexpected return code " + str(output[0]) + 
                                                "while trying to get requests from [" + str(balancerqueue_dir) + "].")        
    return requests
コード例 #3
0
def getStatistic(name):

    ContainerUtils.getLogger(proxy).fine("[hadoop_enabler_balancer] Beginning getStatistic()")

    return (getStatistic_common(name))
        
    ContainerUtils.getLogger(proxy).fine("[hadoop_enabler_balancer] Exiting getStatistic()")
コード例 #4
0
def createEnvironmentScript():
    
    hadoop_bin_dir = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_HADOOP_BIN_DIR').getValue()
    
    if ContainerUtils.isWindows():
        environmentFilename = os.path.join(hadoop_bin_dir, "hadoop_enabler_ENVIRONMENT.bat")
    else:
        environmentFilename = os.path.join(hadoop_bin_dir, "hadoop_enabler_ENVIRONMENT.sh")
        
    environmentFile = open(environmentFilename, 'w')
    print>>environmentFile, "###################################################"
    print>>environmentFile, "# Generated by Hodoop Enabler"
    print>>environmentFile, "#     " + str(datetime.datetime.now())
    print>>environmentFile, "#"
    print>>environmentFile, "# This file sets all the ENVIRONMENT type runtimecontext"
    print>>environmentFile, "# variables defined by this enabler."
    print>>environmentFile, "#"
    print>>environmentFile, "###################################################"

    runtimeContext = proxy.getContainer().getRuntimeContext()
    for i in range (0, (runtimeContext.getVariableCount() - 1)):
          variable = runtimeContext.getVariable(i)
          ContainerUtils.getLogger(proxy).fine("[hadoop_enabler_common] " + str(variable.getName()) + " has type [" + str(variable.getTypeInt()) + "]")
          if  (variable.getTypeInt() == RuntimeContextVariable.ENVIRONMENT_TYPE):
              print>>environmentFile, variable.getName() + "=" + str(variable.getValue())   
    
    environmentFile.close()
コード例 #5
0
def stopBalancer():
    ContainerUtils.getLogger(proxy).info("[hadoop_enabler_balancer] Stopping existing Balancer if any")
    hadoop_bin_dir = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_HADOOP_BIN_DIR').getValue()
    #runCommand("sh " + os.path.join(hadoop_bin_dir, "stop-balancer.sh"))
    """ Wordaround: version 0.23.7 returns 1 when successful """
    if getHadoopVersion().startswith("0.23"):
        runCommand(getScript("stop-balancer.sh"), expectedReturnCodes=[1])
    else:
        runCommand(getScript("stop-balancer.sh"))
コード例 #6
0
def moveContentFiles():
    
    hadoop_home_dir = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_HADOOP_HOME_DIR').getValue()
    work_dir = proxy.getContainer().getRuntimeContext().getVariable('CONTAINER_WORK_DIR').getValue()
    
    if ContainerUtils.isWindows():
        pass
    else:
        ContainerUtils.getLogger(proxy).info("[hadoop_enabler_common] Copying enabler content files to version-specific sub-directory.")       
        runCommand("cp -rv " + work_dir + "/hadoop-content/* " + hadoop_home_dir, shell=True)
コード例 #7
0
def getStatistic_common(name):

    memoryBean = ManagementFactory.getMemoryMXBean()

    if name == "enabler_HEAP_MEMORY_USAGE":
        bytes = memoryBean.getHeapMemoryUsage().getUsed()
        return bytes / 1024 / 1024 # convert to MiB
    elif name == "enabler_NON_HEAP_MEMORY_USAGE":
        bytes = memoryBean.getNonHeapMemoryUsage().getUsed()
        return bytes / 1024 / 1024 # convert to MiB
    
    elif name == "enabler_DATANODE_DECOMMISION_REQUESTS":
        
        hadoop_home_dir = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_HADOOP_HOME_DIR').getValue()        

        if ContainerUtils.isWindows():
            raise Exception("[hadoop_enabler_common] Windows not yet implemented in by this enabler.")
        else:
            commandline = "sh " + os.path.join(hadoop_home_dir, "bin", "hadoop") + " fs -count " + decommissionqueue_dir
            
        output = runCommand(commandline, expectedReturnCodes=[0, 255], suppressOutput=True)
        if (output[0] == 0): 
            stdout = str(output[1])
            count = int(stdout.split()[1])
            return int(count)
        elif (output[0] == 255):
            # Decommission request directory doesn't exist.  Not expected to exist until the some datanode posts the first request  
            return int(0)    
        else:
            ContainerUtils.getLogger(proxy).warning("[hadoop_enabler_common] Unexpected return code [" + str(output[0]) + 
                                                    "] while attempting to retrieve statistic enabler_DATANODE_DECOMMISION_REQUESTS statistic.  Assuming 0.")
            print output
            return int(0)
        
    elif name.startswith('enabler_DISK_'):

        tmpdir = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_TMP_DIR').getValue()    
        
        if name == "enabler_DISK_SPACE_FREE":
            blocks = int(getStatistic_disk(tmpdir)[0])
            return blocks / 1024 / 1024 # convert 1024-byte blocks to GiB
            #return available
        elif name == "enabler_DISK_SPACE_USED":
            blocks = int(getStatistic_disk(tmpdir)[1])
            return blocks / 1024 / 1024  # convert 1024-byte blocks to GiB
            #return used
        elif name == "enabler_DISK_SPACE_USED_PERCENT":
            return getStatistic_disk(tmpdir)[2]
            #return int(percent[:-1])
        else:
            raise Exception("[hadoop_enabler_common] Unsupported statistic type requested [" + str(name) + "]")

    else:
        raise Exception("[hadoop_enabler_common] Unsupported statistic type requested [" + str(name) + "]")
コード例 #8
0
def isContainerRunning():

    processRequetsThread = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_balancer_BALANCER_PROCESS_REQUEST_THREAD').getValue()
    checkForRequetsThread = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_balancer_BALANCER_CHECKFOR_REQUEST_THREAD').getValue()

    status = processRequetsThread.isAlive() and checkForRequetsThread.isAlive()

    if status == True:
        pass
    else:
        ContainerUtils.getLogger(proxy).info("[hadoop_enabler_balancer.isContainerRunning()]  Return status is: " + str(status))

    return status
コード例 #9
0
ファイル: mysql-enabler.py プロジェクト: ygirey/mysql-enabler
def isContainerRunning():
    port = runtimeContext.getVariable('MYSQL_PORT').getValue() 
    basedir = runtimeContext.getVariable('MYSQL_BASE_DIR').getValue()
    user = runtimeContext.getVariable('MYSQL_USER').getValue()
    pw = runtimeContext.getVariable('MYSQL_PW').getValue() 
    bindir = os.path.join(basedir, "bin")
    host = socket.gethostname()
    status = call([os.path.join(bindir, "mysqladmin"), "--port=" + port, "-h", host, "--user="******"--password="******"ping"])
    ContainerUtils.getLogger(proxy).info("mysqladmin ping returns " + str(status))
    if status == 0:
        return True
    else:
        return False
コード例 #10
0
def changePermissions():

    hadoop_bin_dir = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_HADOOP_BIN_DIR').getValue()
    hadoop_sbin_dir = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_HADOOP_SBIN_DIR').getValue()
    java_home_dir = proxy.getContainer().getRuntimeContext().getVariable('GRIDLIB_JAVA_HOME').getValue()
    
    if ContainerUtils.isWindows():
        pass
    else:
        ContainerUtils.getLogger(proxy).info("[hadoop_enabler_common] Changing file permissions on directory " + hadoop_bin_dir)       
        commandline = "chmod -Rv u+x " + hadoop_bin_dir
        runCommand(commandline)
        
        ContainerUtils.getLogger(proxy).info("[hadoop_enabler_common] Changing file permissions on directory " + hadoop_sbin_dir)       
        commandline = "chmod -Rv u+x " + hadoop_sbin_dir
        runCommand(commandline)
        
        java_bin_dir = os.path.join(java_home_dir, "bin")
        ContainerUtils.getLogger(proxy).info("[hadoop_enabler_common] Changing file permissions on directory " + java_bin_dir)       
        commandline = "chmod -Rv u+x " + java_bin_dir
        runCommand(commandline)
        
        java_jrebin_dir = os.path.join(java_home_dir, "jre", "bin")
        ContainerUtils.getLogger(proxy).info("[hadoop_enabler_common] Changing file permissions on directory " + java_jrebin_dir)       
        commandline = "chmod -Rv u+x " + java_jrebin_dir
        runCommand(commandline)
コード例 #11
0
def runCommand(commandline, stdin=None, stdout=None, expectedReturnCodes=None, suppressOutput=None, shell=None):

    if (expectedReturnCodes == None): expectedReturnCodes = [0]
    if (suppressOutput == None): suppressOutput = False
    if (shell == None): shell = False
    stderr = None
    if (suppressOutput):
        stdout=PIPE
        stderr=PIPE
    else: 
        ContainerUtils.getLogger(proxy).info("[hadoop_enabler_common] Running command [" + commandline + "]")
            
    if shell:
        args = commandline
    else:
        args = shlex.split(commandline)

    os.unsetenv("LD_LIBRARY_PATH")
    os.unsetenv("LD_PRELOAD")

    if stdin == None:
        p = Popen(args, stdout=stdout, stdin=None, stderr=stderr, shell=shell)
        output = p.communicate()        
    else:
        p = Popen(args, stdout=stdout, stdin=PIPE, stderr=stderr, shell=shell)
        output = p.communicate(input=stdin)
    
    outputlist = [p.returncode]

    for item in output:
        outputlist.append(item)

    if (outputlist[0] in expectedReturnCodes ):
        if not (suppressOutput):
            ContainerUtils.getLogger(proxy).info("[hadoop_enabler_common] Command return code was [" + str(outputlist[0]) + "]")
            printStdoutPipe(stdout, outputlist)
    else:
        
        ContainerUtils.getLogger(proxy).severe("[hadoop_enabler_common] Return code " + str(outputlist[0]) + 
                                               " was not in list of expected return codes" + str(expectedReturnCodes))
        if (suppressOutput):
            ContainerUtils.getLogger(proxy).severe("[hadoop_enabler_common] Command was [" + commandline + "]")

        printStdoutPipe(stdout, outputlist)

    ContainerUtils.getLogger(proxy).finer("[hadoop_enabler_common] exiting runCommand(). Returning outputlist:" + (str(outputlist)))
    return outputlist
コード例 #12
0
def killOrphans():     
    ContainerUtils.getLogger(proxy).info("[hadoop_enabler_common] Killing any orphaned process on this engine remaining from a previous execution")
    
    hadoop_home_dir = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_HADOOP_HOME_DIR').getValue()

    if ContainerUtils.isWindows():
        raise Exception("[hadoop_enabler_common] Windows not yet implemented in by this enabler.")
    else:
        commandline = "ps x"
    output = runCommand(commandline, expectedReturnCodes=[0, 255], stdout=PIPE, suppressOutput=True)

    for line in output[1].splitlines():
        if hadoop_home_dir in line:
            if "java" in line:
                ContainerUtils.getLogger(proxy).info("[hadoop_enabler_common] Issuing Kill command for orphaned process [" + str(line) + "]")
                pid = int(line.split(None, 1)[0])
                os.kill(pid, signal.SIGKILL)
コード例 #13
0
def doShutdown():

    ContainerUtils.getLogger(proxy).info("[hadoop_enabler_balancer] Beginning doShutdown()")

    proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_balancer_BALANCER_EXIT_FLAG').setValue("True")
    
    stopBalancer()
    
    processRequetsThread = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_balancer_BALANCER_PROCESS_REQUEST_THREAD').getValue()
    if processRequetsThread.isAlive():
        ContainerUtils.getLogger(proxy).info("[hadoop_enabler_balancer] Waiting for Processing thread to exit.")
        processRequetsThread.join()

    checkForReqThread = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_balancer_BALANCER_CHECKFOR_REQUEST_THREAD').getValue()
    if checkForReqThread.isAlive():
        ContainerUtils.getLogger(proxy).info("[hadoop_enabler_balancer] Waiting for Queue monitor thread to exit.")
        checkForReqThread.join()
    
    ContainerUtils.getLogger(proxy).info("[hadoop_enabler_balancer] Exiting doShutdown()")
コード例 #14
0
def deleteHDFSFile(file): 

    hadoop_bin_dir = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_HADOOP_BIN_DIR').getValue()        

    commandline = os.path.join(hadoop_bin_dir, "hadoop") + " fs -rm " + str(file)
    output = runCommand(commandline, expectedReturnCodes=[0, 255])

    if (output[0] == 0):
        ContainerUtils.getLogger(proxy).info("[hadoop_enabler_common] Successfully deleted [" + str(file) + "]")            
    elif (output[0] == 255):
        ContainerUtils.getLogger(proxy).warning("[hadoop_enabler_common] File already deleted [" + str(file) + "]. Continuing Processing")
    else:            
        ContainerUtils.getLogger(proxy).severe("[hadoop_enabler_common] Unexpected return code [" + str(output[0]) + "] when attempting to delete.]")
コード例 #15
0
def rcvTrue(rcv):
    
    ContainerUtils.getLogger(proxy).finer("[hadoop_enabler_common] checking runtimecontext variable [" + str(rcv) + "]")
    
    rcvvalue = proxy.getContainer().getRuntimeContext().getVariable(rcv).getValue()
    ContainerUtils.getLogger(proxy).finest("[hadoop_enabler_common] value is [" + str(rcvvalue) + "].")
    if (str(rcvvalue).lower() in ("yes", "y", "true",  "t", "1")): 
        result = True
    elif (str(rcvvalue).lower() in ("no",  "n", "false", "f", "0")): 
        result = False
    else:
        raise Exception("[hadoop_enabler_common] Invalid value for boolean conversion: [" + str(rcvvalue) + "]")
    ContainerUtils.getLogger(proxy).finer("[hadoop_enabler_common] Exiting Checking enabler flag. Result is [" + str(result) + "]")
    return result
コード例 #16
0
    def run(self):
        ContainerUtils.getLogger(proxy).info("[hadoop_enabler_balancer] Starting checkForRequestsThread.run()")

        """
        Redirect stdout and stderr for this thread. 
        """
        sys.stdout = self.slstdout
        sys.stderr = self.slstderr
 
        try:
            checkForRequests(self.lock)
        except:
            ContainerUtils.getLogger(proxy).severe("[hadoop_enabler_balancer] Unexpected error from checkForRequests thread")
            traceback.print_exc()
        ContainerUtils.getLogger(proxy).info("[hadoop_enabler_balancer] Exiting checkForRequestsThread.run()")
コード例 #17
0
def checkForRequests(lock):

    pollperiod = getContainerRunningConditionPollPeriod()/1000

    while not rcvTrue('hadoop_enabler_balancer_BALANCER_EXIT_FLAG'):

        lock.acquire()
        receivedReqs = getBalancerReceivedRequests()
        currentReqs = getBalancerRequestsFromQueue()
            
        ContainerUtils.getLogger(proxy).fine("[hadoop_enabler_balancer] Current request list [" + str(currentReqs) +  "].")
        ContainerUtils.getLogger(proxy).fine("[hadoop_enabler_balancer] Previous request.list [" + str(receivedReqs) +  "].")
        if receivedReqs != currentReqs: 
            ContainerUtils.getLogger(proxy).info("[hadoop_enabler_balancer] Balancer Requests have changed since last checked.")
            proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_balancer_BALANCER_NEW_REQUEST_FLAG').setValue("True")
            setBalancerReceivedRequests(currentReqs)
            stopBalancer()

        lock.release()

        time.sleep(pollperiod)
コード例 #18
0
def processRequests(lock):
    
    pollperiod = getContainerRunningConditionPollPeriod()/1000
    
    while not rcvTrue('hadoop_enabler_balancer_BALANCER_EXIT_FLAG'):
        
        timer = threading.Timer(pollperiod, retryTimerElapsed)
        timer.start()
            
        lock.acquire()
        if rcvTrue("hadoop_enabler_balancer_BALANCER_NEW_REQUEST_FLAG"):

            ContainerUtils.getLogger(proxy).info("[hadoop_enabler_balancer] Beginning processing of new requests")

            receivedReqs = getBalancerReceivedRequests()
            local_receivedReqs = list(receivedReqs)

            proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_balancer_BALANCER_NEW_REQUEST_FLAG').setValue("False")

            lock.release()
            runBalancer()
            lock.acquire()
            
            if not rcvTrue("hadoop_enabler_balancer_BALANCER_NEW_REQUEST_FLAG"):
                ContainerUtils.getLogger(proxy).info("[hadoop_enabler_balancer] Deleting processed requests")
                for request in local_receivedReqs:            
                    deleteHDFSFile(request)
                setBalancerReceivedRequests([])
                    
        lock.release()

        if (rcvTrue("hadoop_enabler_balancer_BALANCER_NEW_REQUEST_FLAG") or rcvTrue("hadoop_enabler_balancer_BALANCER_EXIT_FLAG")):
            if timer.isAlive():
                ContainerUtils.getLogger(proxy).info("[hadoop_enabler_balancer] Terminating retry timer early")
                timer.cancel()
        else:
            timer.join()
コード例 #19
0
def doStart():
    
    ContainerUtils.getLogger(proxy).info("[hadoop_enabler_balancer] Beginning doStart()")
    
    hadoop_home_dir = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_HADOOP_HOME_DIR').getValue()
    
    doStart_common()                 

    processRequetsThread = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_balancer_BALANCER_PROCESS_REQUEST_THREAD').getValue()
    processRequetsThread.start()
    
    startupdelay = 0
    if (True):   #TODO  Base decision to delay on whether Namenode just started or has been running for a while 
        ContainerUtils.getLogger(proxy).info("[hadoop_enabler_balancer] Hadoop cluster is just starting.  Waiting [" + str(startupdelay) + 
                                             "] seconds before processing any balancer requests so more datanodes can join.")
    else:
        startupdelay = 0
        
    time.sleep(startupdelay)
    
    checkForReqThread = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_balancer_BALANCER_CHECKFOR_REQUEST_THREAD').getValue()
    checkForReqThread.start()
    
    ContainerUtils.getLogger(proxy).info("[hadoop_enabler_balancer] Exiting doStart()")
コード例 #20
0
def hasContainerStarted():
    status = True    
    ContainerUtils.getLogger(proxy).info("[hadoop_enabler_balancer - hasContainerStarted()]  Return status is: " + str(status))
    return status
コード例 #21
0
def getContainerRunningConditionPollPeriod():
    pollperiod = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_ENABLER_RUNNING_POLLPERIOD').getValue()
    ContainerUtils.getLogger(proxy).info("[hadoop_enabler_common] Will verify enabler is running every " + str(pollperiod) + " seconds.")
    return float(pollperiod) * 1000
コード例 #22
0
def retryTimerElapsed():
    ContainerUtils.getLogger(proxy).fine("[hadoop_enabler_balancer] Minimum poll-period timer elapsed.")
コード例 #23
0
def printStdoutPipe(stdout, outputlist):

    if (stdout == PIPE):
        ContainerUtils.getLogger(proxy).info("[hadoop_enabler_common] Command STDOUT:")
        print outputlist[1]
コード例 #24
0
                      "dfs.datanode.address",
                      "dfs.datanode.http.address",
                      "dfs.datanode.ipc.address",
                      "dfs.hosts.exclude",
                      "dfs.name.dir",
                      "dfs.data.dir",
                      "mapred.job.tracker",
                      "mapred.job.tracker.http.address",
                      "mapred.task.tracker.http.address"
                      ]

try: proxy
except NameError:    
    globals()['proxy'] = inspect.currentframe().f_back.f_globals['proxy']
else: pass
logger = ContainerUtils.getLogger(proxy)

def getDynamicGridlibDependencies():
    logger.info("[hadoop_enabler_common] Beginning getDynamicGridlibDependencies()")

    hadoopVersion = getHadoopVersion()
    logger.info("[hadoop_enabler_common] Hadoop Distribution version is [" + str(hadoopVersion) +"]")    

    defaultDomainGridlib = GridlibInfo()
    defaultDomainGridlib.name = "default-domain-type"
    
    logger.info("[hadoop_enabler_common] Adding Hadoop distribution dependency")
    gridlib = GridlibInfo()
    gridlib.name = "hadoop-distribution"
    gridlib.version = str(hadoopVersion)
    
コード例 #25
0
def doInitHadoopProps(userProp_RCVname, userPropFile_RCVname, additionalVariables):

    ContainerUtils.getLogger(proxy).info("[hadoop_enabler] Checking for user property file to augment [" + str(userProp_RCVname) + "].")
    
    userPropsRCV = proxy.getContainer().getRuntimeContext().getVariable(userProp_RCVname)
    userPropsPredefined = False
    if (userPropsRCV == None):
        userProps = ""         
    else:
        userPropsPredefined = True
        userProps =  userPropsRCV.getValue()
        ContainerUtils.getLogger(proxy).info("[hadoop_enabler] User properties variable ${" + userProp_RCVname + 
                                                "} is was imported or pre-defined on component.  Starting value is [" + str(userProps) + "].")

    userPropFile = proxy.getContainer().getRuntimeContext().getVariable(userPropFile_RCVname).getValue()

    if (userPropFile != "") and os.path.isfile(userPropFile):
        ContainerUtils.getLogger(proxy).info("[hadoop_enabler] User property file found [" + str(userPropFile) + "].")
        xmldoc = minidom.parse(userPropFile)
        propertylist = xmldoc.getElementsByTagName('property')
    
        if propertylist == None or len(propertylist) == 0:
            ContainerUtils.getLogger(proxy).warning("[hadoop_enabler] No property elements found in user property file.")
        else:
            for element in propertylist:
                nameElements = element.getElementsByTagName("name")
                name = getNodeText(nameElements[0])
    
                isReserved = False                
                for reservedproperty in reservedproperties:
                    if reservedproperty.count(name) > 0:
                       isReserved = True
                       break
                
                if isReserved:
                    ContainerUtils.getLogger(proxy).warning("[hadoop_enabler] The property [" + str(name) + "] is managed by the Hadoop Enabler.  Will ignore user supplied value.")
                else:    
                    ContainerUtils.getLogger(proxy).info("[hadoop_enabler] Applying user property [" + str(element.toxml()) + "].")
                    userProps = userProps + element.toxml()
    
    if userPropsPredefined:
        proxy.getContainer().getRuntimeContext().getVariable(userProp_RCVname).setValue(userProps)
    else:
        additionalVariables.add(RuntimeContextVariable(userProp_RCVname, userProps, RuntimeContextVariable.STRING_TYPE,"User Supplied Hadoop properties" , False, RuntimeContextVariable.NO_INCREMENT))
コード例 #26
0
from com.datasynapse.fabric.admin.info import AllocationInfo
from com.datasynapse.fabric.util import GridlibUtils, ContainerUtils
from com.datasynapse.fabric.common import RuntimeContextVariable, ActivationInfo
from com.datasynapse.fabric.admin.info import ComponentAllocationInfo
from subprocess import PIPE
import os
import time
import socket
import sys
import threading
import logging

sys.path.append(proxy.getContainer().getScript(0).getFile().getParentFile().getAbsolutePath())
ContainerUtils.getLogger(proxy).info("sys.path modified: " + str(sys.path) )
from hadoop_enabler_common import *

def doInit(additionalVariables):

    ContainerUtils.getLogger(proxy).info("[hadoop_enabler_balancer] Beginning doInit()")

    doInit_common(additionalVariables)        
    
    additionalVariables.add(RuntimeContextVariable("hadoop_enabler_balancer_BALANCER_RECEIVED_REQUESTS", "", 
                                                   RuntimeContextVariable.STRING_TYPE))

    additionalVariables.add(RuntimeContextVariable("hadoop_enabler_balancer_BALANCER_NEW_REQUEST_FLAG", "False", 
                                                   RuntimeContextVariable.STRING_TYPE))

    additionalVariables.add(RuntimeContextVariable("hadoop_enabler_balancer_BALANCER_EXIT_FLAG", "False", 
                                                   RuntimeContextVariable.STRING_TYPE))