def doInit(additionalVariables):

    ContainerUtils.getLogger(proxy).info("[hadoop_enabler_balancer] Beginning doInit()")

    doInit_common(additionalVariables)        
    
    additionalVariables.add(RuntimeContextVariable("hadoop_enabler_balancer_BALANCER_RECEIVED_REQUESTS", "", 
                                                   RuntimeContextVariable.STRING_TYPE))

    additionalVariables.add(RuntimeContextVariable("hadoop_enabler_balancer_BALANCER_NEW_REQUEST_FLAG", "False", 
                                                   RuntimeContextVariable.STRING_TYPE))

    additionalVariables.add(RuntimeContextVariable("hadoop_enabler_balancer_BALANCER_EXIT_FLAG", "False", 
                                                   RuntimeContextVariable.STRING_TYPE))
    
    """
    Create New Thread objects for processing requests and save for later
    """
    logger = ContainerUtils.getLogger(proxy)
    slstdout = StreamToLogger(logger,"STDOUT")
    slstderr = StreamToLogger(logger,"STDERR")
    lock = threading.Lock()
    processReqThread = processRequestsThread(slstdout, slstderr, lock)
    additionalVariables.add(RuntimeContextVariable("hadoop_enabler_balancer_BALANCER_PROCESS_REQUEST_THREAD", processReqThread, 
                                                   RuntimeContextVariable.OBJECT_TYPE))
    
    checkForReqThread = checkForRequestsThread(slstdout, slstderr, lock)
    additionalVariables.add(RuntimeContextVariable("hadoop_enabler_balancer_BALANCER_CHECKFOR_REQUEST_THREAD", checkForReqThread, 
                                                   RuntimeContextVariable.OBJECT_TYPE))

    proxy.doInit(additionalVariables)
    
    ContainerUtils.getLogger(proxy).info("[hadoop_enabler_balancer] Exiting doInit()")
def createEnvironmentScript():
    
    hadoop_bin_dir = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_HADOOP_BIN_DIR').getValue()
    
    if ContainerUtils.isWindows():
        environmentFilename = os.path.join(hadoop_bin_dir, "hadoop_enabler_ENVIRONMENT.bat")
    else:
        environmentFilename = os.path.join(hadoop_bin_dir, "hadoop_enabler_ENVIRONMENT.sh")
        
    environmentFile = open(environmentFilename, 'w')
    print>>environmentFile, "###################################################"
    print>>environmentFile, "# Generated by Hodoop Enabler"
    print>>environmentFile, "#     " + str(datetime.datetime.now())
    print>>environmentFile, "#"
    print>>environmentFile, "# This file sets all the ENVIRONMENT type runtimecontext"
    print>>environmentFile, "# variables defined by this enabler."
    print>>environmentFile, "#"
    print>>environmentFile, "###################################################"

    runtimeContext = proxy.getContainer().getRuntimeContext()
    for i in range (0, (runtimeContext.getVariableCount() - 1)):
          variable = runtimeContext.getVariable(i)
          ContainerUtils.getLogger(proxy).fine("[hadoop_enabler_common] " + str(variable.getName()) + " has type [" + str(variable.getTypeInt()) + "]")
          if  (variable.getTypeInt() == RuntimeContextVariable.ENVIRONMENT_TYPE):
              print>>environmentFile, variable.getName() + "=" + str(variable.getValue())   
    
    environmentFile.close()
def getBalancerRequestsFromQueue():
    
    hadoop_home_dir = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_HADOOP_HOME_DIR').getValue()
    commandline = os.path.join(hadoop_home_dir, "bin", "hadoop") + " fs -ls " + balancerqueue_dir            
    #Older versions of hadoop (e.g 1.0.4 and 1.1.2) return 255 if file does not exist.  Newer versions return 1
    output = runCommand(commandline, stdout=PIPE, expectedReturnCodes=[0, 255, 1], suppressOutput=True)

    requests = []
    if (output[0] == 0):
        reqsFileList = output[1].splitlines()
        numOfRequests=0
        for i in range(1, len(reqsFileList)):
            if not reqsFileList[i].startswith("Found"):
                """ Workaround - With 2.0.3-alpha a warning message for missing compression libraries is being printed to STDOUT during ls command """
                if not reqsFileList[i].count("Unable to load native-hadoop library for your platform... using builtin-java classes where applicable") > 0: 
                    ContainerUtils.getLogger(proxy).fine("[hadoop_enabler_balancer] Found request in queue [" + str(reqsFileList[i]) + "]")
                    lineTokens = reqsFileList[i].split()
                    if len(lineTokens) == 8:
                        filepath = lineTokens[7]
                        requests.append(filepath)
                        numOfRequests=numOfRequests+1
                    else:
                        ContainerUtils.getLogger(proxy).warning("[hadoop_enabler_balancer] Ignoring Line " + str(i + 1) +  " of queue listing for " + balancerqueue_dir +
                                                             "Line has unexpected format.  Full listing is:" +
                                                             str(reqsFileList) )
        if numOfRequests > 0:
            ContainerUtils.getLogger(proxy).info("[hadoop_enabler_balancer] "+ str(numOfRequests) +" requests found in [" + str(balancerqueue_dir) + "].")
    elif (output[0] == 255) or (output[0] == 1):
        ContainerUtils.getLogger(proxy).fine("[hadoop_enabler_balancer] No requests found in [" + str(balancerqueue_dir) + "].")
    else:
        ContainerUtils.getLogger(proxy).warning("[hadoop_enabler_balancer] Unexpected return code " + str(output[0]) + 
                                                "while trying to get requests from [" + str(balancerqueue_dir) + "].")        
    return requests
Example #4
0
def doInstall(info):
    " do install of activation info"

    logger.info("OracleDatabaseContainer: doInstall:Enter")
    hostname = "localhost"
    try:
        hostname = InetAddress.getLocalHost().getCanonicalHostName()
    except:
        type, value, traceback = sys.exc_info()
        logger.severe("Hostname error:" + ` value `)

    dbInstallOption = getVariableValue("DB_INSTALL_OPTION")
    if dbInstallOption == "INSTALL_DB_AND_CONFIG":
        globalLockString = "OracleEnabler-" + hostname
        ContainerUtils.releaseGlobalLock(globalLockString)
        logger.info("Released global lock with name: " + globalLockString)

    try:
        oracleDatabase = getVariableValue("ORACLE_DATABASE_OBJECT")
        if oracleDatabase:
            oracleDatabase.installActivationInfo(info)
    except:
        type, value, traceback = sys.exc_info()
        logger.severe(
            "Unexpected error in OracleDatabaseContainer:doInstall:" +
            ` value `)

    logger.info("OracleDatabaseContainer: doInstall:Exit")
 def startNode(self):
     logInfo("startNode:Enter")
     self.__environ = os.environ.copy()
     ### Starting Node
     self.__ELASTIC_ARGS = "-f -Des.pidfile=" + self.__pidfile
     if ContainerUtils.isWindows():
         self.__ELASTIC_CMD = os.path.join(self.__bindir,
                                           "elasticsearch.bat ")
     else:
         self.__ELASTIC_CMD = os.path.join(self.__bindir, "elasticsearch ")
     self.__CMD = self.__ELASTIC_CMD + self.__ELASTIC_ARGS
     logInfo("StartUp Command to be used : " + self.__CMD)
     args = shlex.split(self.__CMD)
     process = Popen(args,
                     stdout=None,
                     stderr=None,
                     env=self.__environ,
                     shell=False)
     time.sleep(20)
     logInfo("Start return Code : " + str(process.returncode))
     logInfo("finding the archivesDir")
     global archivesDir
     archiveMgmtFeature = ContainerUtils.getFeatureInfo(
         "Archive Management Support", proxy.container,
         proxy.container.currentDomain)
     archivesDir = os.path.join(self.__enginedir,
                                archiveMgmtFeature.archiveDirectory)
     logInfo("Found archives dir " + archivesDir)
     self.__toggleCheck = True
     logInfo("startNode:Exit")
def getStatistic(name):

    ContainerUtils.getLogger(proxy).fine("[hadoop_enabler_balancer] Beginning getStatistic()")

    return (getStatistic_common(name))
        
    ContainerUtils.getLogger(proxy).fine("[hadoop_enabler_balancer] Exiting getStatistic()")
def doInstall(info):
    " do install of activation info"

    logger.info("OracleDatabaseContainer: doInstall:Enter")
    hostname = "localhost";
    try:
        hostname = InetAddress.getLocalHost().getCanonicalHostName()
    except:
        type, value, traceback = sys.exc_info()
        logger.severe("Hostname error:" + `value`)
    
    dbInstallOption = getVariableValue("DB_INSTALL_OPTION")
    if dbInstallOption == "INSTALL_DB_AND_CONFIG":        
        globalLockString = "OracleEnabler-" + hostname
        ContainerUtils.releaseGlobalLock(globalLockString)
        logger.info("Released global lock with name: " + globalLockString)
        
    try:
        oracleDatabase = getVariableValue("ORACLE_DATABASE_OBJECT")
        if oracleDatabase:
            oracleDatabase.installActivationInfo(info)
    except:
        type, value, traceback = sys.exc_info()
        logger.severe("Unexpected error in OracleDatabaseContainer:doInstall:" + `value`)
        
    logger.info("OracleDatabaseContainer: doInstall:Exit")
def stopBalancer():
    ContainerUtils.getLogger(proxy).info("[hadoop_enabler_balancer] Stopping existing Balancer if any")
    hadoop_bin_dir = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_HADOOP_BIN_DIR').getValue()
    #runCommand("sh " + os.path.join(hadoop_bin_dir, "stop-balancer.sh"))
    """ Wordaround: version 0.23.7 returns 1 when successful """
    if getHadoopVersion().startswith("0.23"):
        runCommand(getScript("stop-balancer.sh"), expectedReturnCodes=[1])
    else:
        runCommand(getScript("stop-balancer.sh"))
def moveContentFiles():
    
    hadoop_home_dir = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_HADOOP_HOME_DIR').getValue()
    work_dir = proxy.getContainer().getRuntimeContext().getVariable('CONTAINER_WORK_DIR').getValue()
    
    if ContainerUtils.isWindows():
        pass
    else:
        ContainerUtils.getLogger(proxy).info("[hadoop_enabler_common] Copying enabler content files to version-specific sub-directory.")       
        runCommand("cp -rv " + work_dir + "/hadoop-content/* " + hadoop_home_dir, shell=True)
def changePermissions():

    hadoop_bin_dir = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_HADOOP_BIN_DIR').getValue()
    hadoop_sbin_dir = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_HADOOP_SBIN_DIR').getValue()
    java_home_dir = proxy.getContainer().getRuntimeContext().getVariable('GRIDLIB_JAVA_HOME').getValue()
    
    if ContainerUtils.isWindows():
        pass
    else:
        ContainerUtils.getLogger(proxy).info("[hadoop_enabler_common] Changing file permissions on directory " + hadoop_bin_dir)       
        commandline = "chmod -Rv u+x " + hadoop_bin_dir
        runCommand(commandline)
        
        ContainerUtils.getLogger(proxy).info("[hadoop_enabler_common] Changing file permissions on directory " + hadoop_sbin_dir)       
        commandline = "chmod -Rv u+x " + hadoop_sbin_dir
        runCommand(commandline)
        
        java_bin_dir = os.path.join(java_home_dir, "bin")
        ContainerUtils.getLogger(proxy).info("[hadoop_enabler_common] Changing file permissions on directory " + java_bin_dir)       
        commandline = "chmod -Rv u+x " + java_bin_dir
        runCommand(commandline)
        
        java_jrebin_dir = os.path.join(java_home_dir, "jre", "bin")
        ContainerUtils.getLogger(proxy).info("[hadoop_enabler_common] Changing file permissions on directory " + java_jrebin_dir)       
        commandline = "chmod -Rv u+x " + java_jrebin_dir
        runCommand(commandline)
def getStatistic_common(name):

    memoryBean = ManagementFactory.getMemoryMXBean()

    if name == "enabler_HEAP_MEMORY_USAGE":
        bytes = memoryBean.getHeapMemoryUsage().getUsed()
        return bytes / 1024 / 1024 # convert to MiB
    elif name == "enabler_NON_HEAP_MEMORY_USAGE":
        bytes = memoryBean.getNonHeapMemoryUsage().getUsed()
        return bytes / 1024 / 1024 # convert to MiB
    
    elif name == "enabler_DATANODE_DECOMMISION_REQUESTS":
        
        hadoop_home_dir = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_HADOOP_HOME_DIR').getValue()        

        if ContainerUtils.isWindows():
            raise Exception("[hadoop_enabler_common] Windows not yet implemented in by this enabler.")
        else:
            commandline = "sh " + os.path.join(hadoop_home_dir, "bin", "hadoop") + " fs -count " + decommissionqueue_dir
            
        output = runCommand(commandline, expectedReturnCodes=[0, 255], suppressOutput=True)
        if (output[0] == 0): 
            stdout = str(output[1])
            count = int(stdout.split()[1])
            return int(count)
        elif (output[0] == 255):
            # Decommission request directory doesn't exist.  Not expected to exist until the some datanode posts the first request  
            return int(0)    
        else:
            ContainerUtils.getLogger(proxy).warning("[hadoop_enabler_common] Unexpected return code [" + str(output[0]) + 
                                                    "] while attempting to retrieve statistic enabler_DATANODE_DECOMMISION_REQUESTS statistic.  Assuming 0.")
            print output
            return int(0)
        
    elif name.startswith('enabler_DISK_'):

        tmpdir = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_TMP_DIR').getValue()    
        
        if name == "enabler_DISK_SPACE_FREE":
            blocks = int(getStatistic_disk(tmpdir)[0])
            return blocks / 1024 / 1024 # convert 1024-byte blocks to GiB
            #return available
        elif name == "enabler_DISK_SPACE_USED":
            blocks = int(getStatistic_disk(tmpdir)[1])
            return blocks / 1024 / 1024  # convert 1024-byte blocks to GiB
            #return used
        elif name == "enabler_DISK_SPACE_USED_PERCENT":
            return getStatistic_disk(tmpdir)[2]
            #return int(percent[:-1])
        else:
            raise Exception("[hadoop_enabler_common] Unsupported statistic type requested [" + str(name) + "]")

    else:
        raise Exception("[hadoop_enabler_common] Unsupported statistic type requested [" + str(name) + "]")
Example #12
0
def isContainerRunning():
    port = runtimeContext.getVariable('MYSQL_PORT').getValue() 
    basedir = runtimeContext.getVariable('MYSQL_BASE_DIR').getValue()
    user = runtimeContext.getVariable('MYSQL_USER').getValue()
    pw = runtimeContext.getVariable('MYSQL_PW').getValue() 
    bindir = os.path.join(basedir, "bin")
    host = socket.gethostname()
    status = call([os.path.join(bindir, "mysqladmin"), "--port=" + port, "-h", host, "--user="******"--password="******"ping"])
    ContainerUtils.getLogger(proxy).info("mysqladmin ping returns " + str(status))
    if status == 0:
        return True
    else:
        return False
def isContainerRunning():

    processRequetsThread = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_balancer_BALANCER_PROCESS_REQUEST_THREAD').getValue()
    checkForRequetsThread = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_balancer_BALANCER_CHECKFOR_REQUEST_THREAD').getValue()

    status = processRequetsThread.isAlive() and checkForRequetsThread.isAlive()

    if status == True:
        pass
    else:
        ContainerUtils.getLogger(proxy).info("[hadoop_enabler_balancer.isContainerRunning()]  Return status is: " + str(status))

    return status
Example #14
0
 def __lock(self):
     "get build lock"
     logger.info("Acquire build lock:" + self.__buildLock)
     self.__locked = ContainerUtils.acquireGlobalLock(
         self.__buildLock, self.__lockExpire, self.__lockWait)
     if not self.__locked:
         raise Exception("Unable to acquire build lock:" + self.__buildLock)
    def archiveDeploy(self, archiveName, archiveLocators, properties):

        try:
            if self.__role != "primary":
                raise Exception("Not a primary Swam manage node")

            archiveZip = str(
                ContainerUtils.retrieveAndConfigureArchiveFile(
                    proxy.container, archiveName, archiveLocators, properties))

            if archiveZip[-4:] != ".zip":
                raise Exception(
                    "Archive must be a ZIP file containing Docker Compose project"
                )

            logger.info("Deploying archive:" + archiveZip)
            project = getArchiveDeployProperty("project-name", properties,
                                               None)

            if not project:
                raise Exception(
                    "project-name is required: Must be unique within this Docker swarm cluster"
                )

            dir = os.path.join(self.__deploydir, archiveName, project)

            if os.path.isdir(dir):
                raise Exception("Archive is already deployed")

            extractZip(archiveZip, dir)

            composeFile = getDockerComposeFile(dir)

            copyContainerEnvironment()
            os.environ["DOCKER_HOST"] = "tcp://" + self.__manageAddr
            os.environ["COMPOSE_HTTP_TIMEOUT"] = "300"

            cmdlist = [
                self.__dockerCompose, "--file", composeFile, "--project-name",
                project, "create", "--force-recreate"
            ]
            logger.info("Executing:" + list2str(cmdlist))
            retcode = call(cmdlist)
            logger.info("Return code:" + str(retcode))
            if retcode != 0:
                try:
                    self.archiveUndeploy(archiveName, properties)
                except:
                    pass
                finally:
                    if os.path.isdir(dir):
                        distutils.dir_util.remove_tree(dir)
                        path = os.path.join(self.__deploydir, archiveName)
                        os.rmdir(path)
                raise Exception("Archive deploy failed:" + archiveName)

        except:
            type, value, traceback = sys.exc_info()
            logger.severe("archiveDeploy error:" + ` value `)
            raise
Example #16
0
 def __lock(self):
     "get global lock"
     self.__locked = ContainerUtils.acquireGlobalLock(
         self.__clusterConfigDir, self.__lockExpire, self.__lockWait)
     if not self.__locked:
         raise Exception("Unable to acquire global lock:" +
                         self.__clusterConfigDir)
def runCommand(commandline, stdin=None, stdout=None, expectedReturnCodes=None, suppressOutput=None, shell=None):

    if (expectedReturnCodes == None): expectedReturnCodes = [0]
    if (suppressOutput == None): suppressOutput = False
    if (shell == None): shell = False
    stderr = None
    if (suppressOutput):
        stdout=PIPE
        stderr=PIPE
    else: 
        ContainerUtils.getLogger(proxy).info("[hadoop_enabler_common] Running command [" + commandline + "]")
            
    if shell:
        args = commandline
    else:
        args = shlex.split(commandline)

    os.unsetenv("LD_LIBRARY_PATH")
    os.unsetenv("LD_PRELOAD")

    if stdin == None:
        p = Popen(args, stdout=stdout, stdin=None, stderr=stderr, shell=shell)
        output = p.communicate()        
    else:
        p = Popen(args, stdout=stdout, stdin=PIPE, stderr=stderr, shell=shell)
        output = p.communicate(input=stdin)
    
    outputlist = [p.returncode]

    for item in output:
        outputlist.append(item)

    if (outputlist[0] in expectedReturnCodes ):
        if not (suppressOutput):
            ContainerUtils.getLogger(proxy).info("[hadoop_enabler_common] Command return code was [" + str(outputlist[0]) + "]")
            printStdoutPipe(stdout, outputlist)
    else:
        
        ContainerUtils.getLogger(proxy).severe("[hadoop_enabler_common] Return code " + str(outputlist[0]) + 
                                               " was not in list of expected return codes" + str(expectedReturnCodes))
        if (suppressOutput):
            ContainerUtils.getLogger(proxy).severe("[hadoop_enabler_common] Command was [" + commandline + "]")

        printStdoutPipe(stdout, outputlist)

    ContainerUtils.getLogger(proxy).finer("[hadoop_enabler_common] exiting runCommand(). Returning outputlist:" + (str(outputlist)))
    return outputlist
def killOrphans():     
    ContainerUtils.getLogger(proxy).info("[hadoop_enabler_common] Killing any orphaned process on this engine remaining from a previous execution")
    
    hadoop_home_dir = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_HADOOP_HOME_DIR').getValue()

    if ContainerUtils.isWindows():
        raise Exception("[hadoop_enabler_common] Windows not yet implemented in by this enabler.")
    else:
        commandline = "ps x"
    output = runCommand(commandline, expectedReturnCodes=[0, 255], stdout=PIPE, suppressOutput=True)

    for line in output[1].splitlines():
        if hadoop_home_dir in line:
            if "java" in line:
                ContainerUtils.getLogger(proxy).info("[hadoop_enabler_common] Issuing Kill command for orphaned process [" + str(line) + "]")
                pid = int(line.split(None, 1)[0])
                os.kill(pid, signal.SIGKILL)
	def __configureDomain(self):
		"configure domain"
		configureDomainCommand = "configDomain.sh"
		if ContainerUtils.isWindows():
			configureDomainCommand = "configDomain.bat"
		cmdfile = os.path.join(self.__baseDir, "bin", configureDomainCommand)
		logger.info("Configure domain")
		self.__copyContainerEnvironment()
		retcode = call([cmdfile])
		logger.info("Return code: " + str(retcode))
 def __configureDomain(self):
     "configure domain"
     configureDomainCommand = "configDomain.sh"
     if ContainerUtils.isWindows():
         configureDomainCommand = "configDomain.bat"
     cmdfile = os.path.join(self.__baseDir, "bin", configureDomainCommand)
     logger.info("Configure domain")
     self.__copyContainerEnvironment()
     retcode = call([cmdfile])
     logger.info("Return code: " + str(retcode))
def archiveDeploy(archiveName, archiveLocators):
    logInfo("deploying archive " + archiveName)
    try:
        elastic = getVariableValue("ELASTICSEARCH_NODE_OBJECT")
    except:
        type, value, traceback = sys.exc_info()
        logSevere("Unexpected error in ElasticSearch:archiveDeploy:" +
                  ` value `)
    else:
        try:
            elastic.killNode()
        except:
            pass
        else:
            elastic.installPlugins(archiveName, archivesDir)
            elastic.startNode()
    ContainerUtils.retrieveAndConfigureArchiveFile(proxy.container,
                                                   archiveName,
                                                   archiveLocators, None)
    logInfo("End of deploying archive " + archiveName)
def getStatistic_disk(directory):
    if ContainerUtils.isWindows():
        raise Exception("[hadoop_enabler_common] Windows not yet implemented in by this enabler.")
        return[0,0,0]
    else:
        
        df = Popen(["df", directory], stdout=PIPE)
        output = df.communicate()[0]
        device, size, used, available, percent, mountpoint = output.split("\n")[1].split()
        
        return [available, used, int(percent[:-1])]
 def archiveDeploy(self, archiveName, archiveLocators, properties):
     
     try:
         if self.__role != "primary":
             raise Exception("Not a primary Swam manage node")
         
         archiveZip = str(ContainerUtils.retrieveAndConfigureArchiveFile(proxy.container, archiveName, archiveLocators,  properties))
         
         if archiveZip[-4:] != ".zip":
              raise Exception("Archive must be a ZIP file containing Docker Compose project")
          
         logger.info("Deploying archive:" + archiveZip)
         project = getArchiveDeployProperty("project-name", properties,  None)
         
         if not project:
             raise Exception("project-name is required: Must be unique within this Docker swarm cluster")
         
         dir = os.path.join(self.__deploydir,  archiveName, project)
         
         if os.path.isdir(dir):
             raise Exception("Archive is already deployed")
         
         extractZip(archiveZip, dir)
         
         composeFile=getDockerComposeFile(dir)
         
         copyContainerEnvironment()
         os.environ["DOCKER_HOST"] = "tcp://" + self.__manageAddr
         os.environ["COMPOSE_HTTP_TIMEOUT"] = "300"
        
         cmdlist = [self.__dockerCompose, "--file", composeFile, "--project-name", project, "create", "--force-recreate"]
         logger.info("Executing:"+ list2str(cmdlist))
         retcode = call(cmdlist)
         logger.info("Return code:" + str(retcode))
         if retcode != 0:
             try:
                 self.archiveUndeploy(archiveName, properties)
             except:
                 pass
             finally:
                 if os.path.isdir(dir):
                     distutils.dir_util.remove_tree(dir)
                     path = os.path.join(self.__deploydir,  archiveName)
                     os.rmdir(path)
             raise Exception("Archive deploy failed:" + archiveName)
         
     except:
         type, value, traceback = sys.exc_info()
         logger.severe("archiveDeploy error:" + `value`)
         raise
def doShutdown():

    ContainerUtils.getLogger(proxy).info("[hadoop_enabler_balancer] Beginning doShutdown()")

    proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_balancer_BALANCER_EXIT_FLAG').setValue("True")
    
    stopBalancer()
    
    processRequetsThread = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_balancer_BALANCER_PROCESS_REQUEST_THREAD').getValue()
    if processRequetsThread.isAlive():
        ContainerUtils.getLogger(proxy).info("[hadoop_enabler_balancer] Waiting for Processing thread to exit.")
        processRequetsThread.join()

    checkForReqThread = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_balancer_BALANCER_CHECKFOR_REQUEST_THREAD').getValue()
    if checkForReqThread.isAlive():
        ContainerUtils.getLogger(proxy).info("[hadoop_enabler_balancer] Waiting for Queue monitor thread to exit.")
        checkForReqThread.join()
    
    ContainerUtils.getLogger(proxy).info("[hadoop_enabler_balancer] Exiting doShutdown()")
def deleteHDFSFile(file): 

    hadoop_bin_dir = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_HADOOP_BIN_DIR').getValue()        

    commandline = os.path.join(hadoop_bin_dir, "hadoop") + " fs -rm " + str(file)
    output = runCommand(commandline, expectedReturnCodes=[0, 255])

    if (output[0] == 0):
        ContainerUtils.getLogger(proxy).info("[hadoop_enabler_common] Successfully deleted [" + str(file) + "]")            
    elif (output[0] == 255):
        ContainerUtils.getLogger(proxy).warning("[hadoop_enabler_common] File already deleted [" + str(file) + "]. Continuing Processing")
    else:            
        ContainerUtils.getLogger(proxy).severe("[hadoop_enabler_common] Unexpected return code [" + str(output[0]) + "] when attempting to delete.]")
def rcvTrue(rcv):
    
    ContainerUtils.getLogger(proxy).finer("[hadoop_enabler_common] checking runtimecontext variable [" + str(rcv) + "]")
    
    rcvvalue = proxy.getContainer().getRuntimeContext().getVariable(rcv).getValue()
    ContainerUtils.getLogger(proxy).finest("[hadoop_enabler_common] value is [" + str(rcvvalue) + "].")
    if (str(rcvvalue).lower() in ("yes", "y", "true",  "t", "1")): 
        result = True
    elif (str(rcvvalue).lower() in ("no",  "n", "false", "f", "0")): 
        result = False
    else:
        raise Exception("[hadoop_enabler_common] Invalid value for boolean conversion: [" + str(rcvvalue) + "]")
    ContainerUtils.getLogger(proxy).finer("[hadoop_enabler_common] Exiting Checking enabler flag. Result is [" + str(result) + "]")
    return result
    def run(self):
        ContainerUtils.getLogger(proxy).info("[hadoop_enabler_balancer] Starting checkForRequestsThread.run()")

        """
        Redirect stdout and stderr for this thread. 
        """
        sys.stdout = self.slstdout
        sys.stderr = self.slstderr
 
        try:
            checkForRequests(self.lock)
        except:
            ContainerUtils.getLogger(proxy).severe("[hadoop_enabler_balancer] Unexpected error from checkForRequests thread")
            traceback.print_exc()
        ContainerUtils.getLogger(proxy).info("[hadoop_enabler_balancer] Exiting checkForRequestsThread.run()")
def doStart():
    "do start"
    logger.info("Enter OracleCoherenceEnabler:doStart")

    # install archives: we need to install them before starting the oracle coherence server

    archiveFeatureInfo = ContainerUtils.getFeatureInfo(
        Feature.ARCHIVE_FEATURE_NAME, proxy.container,
        proxy.container.currentDomain)
    if archiveFeatureInfo != None:
        rootDirPath = runtimeContext.rootDirectory.absolutePath
        archiveDir = archiveFeatureInfo.archiveDirectory
        archiveDirPath = os.path.join(rootDirPath, archiveDir)
        if os.path.isdir(archiveDirPath):
            deploymentDir = getVariableValue(
                "COHERENCE_RUNTIME_DEPLOYMENT_DIR")
            logger.info("OracleCoherenceEnabler:doStart:Extracting archives:" +
                        archiveDirPath + " --> " + deploymentDir)
            uzip = UnZipFile()
            for entry in os.listdir(archiveDirPath):
                archiveFile = os.path.join(archiveDirPath, entry)
                if os.path.isfile(archiveFile):
                    logger.info(
                        "OracleCoherenceEnabler:doStart:Extracting archive:" +
                        archiveFile)
                    try:
                        uzip.extract(archiveFile, deploymentDir)
                    except:
                        type, value, traceback = sys.exc_info()
                        logger.severe(
                            "OracleCoherenceEnabler:doStart:Error extracting archive file:"
                            + ` value `)

        # we do not want the archive feature to install the archives
        archiveFeatureInfo.setInstallArchivesViaFileCopy(False)

    # start coherence server
    proxy.doStart()
    logger.info("Exit OracleCoherenceEnabler:doStart")
def checkForRequests(lock):

    pollperiod = getContainerRunningConditionPollPeriod()/1000

    while not rcvTrue('hadoop_enabler_balancer_BALANCER_EXIT_FLAG'):

        lock.acquire()
        receivedReqs = getBalancerReceivedRequests()
        currentReqs = getBalancerRequestsFromQueue()
            
        ContainerUtils.getLogger(proxy).fine("[hadoop_enabler_balancer] Current request list [" + str(currentReqs) +  "].")
        ContainerUtils.getLogger(proxy).fine("[hadoop_enabler_balancer] Previous request.list [" + str(receivedReqs) +  "].")
        if receivedReqs != currentReqs: 
            ContainerUtils.getLogger(proxy).info("[hadoop_enabler_balancer] Balancer Requests have changed since last checked.")
            proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_balancer_BALANCER_NEW_REQUEST_FLAG').setValue("True")
            setBalancerReceivedRequests(currentReqs)
            stopBalancer()

        lock.release()

        time.sleep(pollperiod)
def processRequests(lock):
    
    pollperiod = getContainerRunningConditionPollPeriod()/1000
    
    while not rcvTrue('hadoop_enabler_balancer_BALANCER_EXIT_FLAG'):
        
        timer = threading.Timer(pollperiod, retryTimerElapsed)
        timer.start()
            
        lock.acquire()
        if rcvTrue("hadoop_enabler_balancer_BALANCER_NEW_REQUEST_FLAG"):

            ContainerUtils.getLogger(proxy).info("[hadoop_enabler_balancer] Beginning processing of new requests")

            receivedReqs = getBalancerReceivedRequests()
            local_receivedReqs = list(receivedReqs)

            proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_balancer_BALANCER_NEW_REQUEST_FLAG').setValue("False")

            lock.release()
            runBalancer()
            lock.acquire()
            
            if not rcvTrue("hadoop_enabler_balancer_BALANCER_NEW_REQUEST_FLAG"):
                ContainerUtils.getLogger(proxy).info("[hadoop_enabler_balancer] Deleting processed requests")
                for request in local_receivedReqs:            
                    deleteHDFSFile(request)
                setBalancerReceivedRequests([])
                    
        lock.release()

        if (rcvTrue("hadoop_enabler_balancer_BALANCER_NEW_REQUEST_FLAG") or rcvTrue("hadoop_enabler_balancer_BALANCER_EXIT_FLAG")):
            if timer.isAlive():
                ContainerUtils.getLogger(proxy).info("[hadoop_enabler_balancer] Terminating retry timer early")
                timer.cancel()
        else:
            timer.join()
def doStart():
    
    ContainerUtils.getLogger(proxy).info("[hadoop_enabler_balancer] Beginning doStart()")
    
    hadoop_home_dir = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_HADOOP_HOME_DIR').getValue()
    
    doStart_common()                 

    processRequetsThread = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_balancer_BALANCER_PROCESS_REQUEST_THREAD').getValue()
    processRequetsThread.start()
    
    startupdelay = 0
    if (True):   #TODO  Base decision to delay on whether Namenode just started or has been running for a while 
        ContainerUtils.getLogger(proxy).info("[hadoop_enabler_balancer] Hadoop cluster is just starting.  Waiting [" + str(startupdelay) + 
                                             "] seconds before processing any balancer requests so more datanodes can join.")
    else:
        startupdelay = 0
        
    time.sleep(startupdelay)
    
    checkForReqThread = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_balancer_BALANCER_CHECKFOR_REQUEST_THREAD').getValue()
    checkForReqThread.start()
    
    ContainerUtils.getLogger(proxy).info("[hadoop_enabler_balancer] Exiting doStart()")
 def __lock(self):
     "get global lock"
     self.__locked = ContainerUtils.acquireGlobalLock(
         self.__swarmid, self.__lockExpire, self.__lockWait)
     if not self.__locked:
         raise "Unable to acquire global lock:" + self.__swarmid
def getContainerRunningConditionPollPeriod():
    pollperiod = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_ENABLER_RUNNING_POLLPERIOD').getValue()
    ContainerUtils.getLogger(proxy).info("[hadoop_enabler_common] Will verify enabler is running every " + str(pollperiod) + " seconds.")
    return float(pollperiod) * 1000
Example #34
0
 def __unlock(self):
     "unlock global lock"
     logger.info("Release build lock:" + self.__buildLock)
     if self.__locked:
         ContainerUtils.releaseGlobalLock(self.__buildLock)
def printStdoutPipe(stdout, outputlist):

    if (stdout == PIPE):
        ContainerUtils.getLogger(proxy).info("[hadoop_enabler_common] Command STDOUT:")
        print outputlist[1]
    def __init__(self, additionalVariables):
        " initialize oracle database"
        
        self.__hostname = "localhost";
        try:
            self.__hostname = InetAddress.getLocalHost().getCanonicalHostName()
        except:
            type, value, traceback = sys.exc_info()
            logger.severe("Hostname error:" + `value`)
        
        additionalVariables.add(RuntimeContextVariable("ORACLE_HOSTNAME", self.__hostname, RuntimeContextVariable.ENVIRONMENT_TYPE))

        dbPassword = getVariableValue("DB_PASSWORD_ALL")
        
        if dbPassword and dbPassword.strip():
            self.__sysPassword = dbPassword
            additionalVariables.add(RuntimeContextVariable("SYS_PWD", dbPassword, RuntimeContextVariable.ENVIRONMENT_TYPE))
            additionalVariables.add(RuntimeContextVariable("DBSNMP_PWD", dbPassword, RuntimeContextVariable.ENVIRONMENT_TYPE));
            additionalVariables.add(RuntimeContextVariable("SYSMAN_PWD", dbPassword, RuntimeContextVariable.ENVIRONMENT_TYPE))
            additionalVariables.add(RuntimeContextVariable("SYSTEM_PWD", dbPassword, RuntimeContextVariable.ENVIRONMENT_TYPE));
        else:
            self.__sysPassword = getVariableValue("SYS_PWD")
            
        dbDataLocation = getVariableValue("DB_DATA_LOC")
        if dbDataLocation and os.path.isdir(dbDataLocation):
            dbName = getVariableValue("DB_NAME")
            
            dbDataDir = os.path.join(dbDataLocation, dbName)
            if os.path.isdir(dbDataDir):
                logger.info("DB Data directory already exists:" + dbDataDir + "; Setting DB_INSTALL_OPTION to INSTALL_DB_SWONLY")
                additionalVariables.add(RuntimeContextVariable( "DB_INSTALL_OPTION", "INSTALL_DB_SWONLY", RuntimeContextVariable.ENVIRONMENT_TYPE))
        

        tcpPort = getVariableValue("TCP_PORT");
        self.__serviceName = getVariableValue("DB_GLOBAL_NAME")

        sb = StringBuilder("jdbc:oracle:thin:@(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)")
        sb.append("(HOST=").append(self.__hostname).append(")")
        sb.append("(PORT=").append(tcpPort).append("))")
        sb.append("(CONNECT_DATA=(SERVICE_NAME=").append(self.__serviceName).append(")))")
        
        self.__oracleServiceUrl = sb.toString()
        
        logger.info("Oracle listener service URL:" + self.__oracleServiceUrl)
        
        self.__jdbcUrl = "jdbc:oracle:thin:@" + self.__hostname +":"+ tcpPort + ":" + self.__serviceName
        runtimeContext.addVariable(RuntimeContextVariable("JDBC_URL", self.__jdbcUrl, RuntimeContextVariable.STRING_TYPE, "Oracle Thin Driver JDBC Url", True, RuntimeContextVariable.NO_INCREMENT))
        
        
        oracleDriver = "oracle.jdbc.OracleDriver"
        runtimeContext.addVariable(RuntimeContextVariable("JDBC_DRIVER", oracleDriver, RuntimeContextVariable.STRING_TYPE, "Oracle Thin Driver class", True, RuntimeContextVariable.NO_INCREMENT))
        
        self.__dbControl = Boolean.parseBoolean(getVariableValue("CONFIG_DBCONTROL", "false"))
        
        if self.__dbControl:
            self.__dbCtrlPort = getVariableValue("DBCONTROL_HTTP_PORT")
            additionalVariables.add(RuntimeContextVariable( "HTTPS_PORT", self.__dbCtrlPort, RuntimeContextVariable.STRING_TYPE))
        
        oracleDir = getVariableValue("ORACLE_DIR")
        self.__markerFilePath = os.path.join(oracleDir, ".#dsoracle")
        
        self.__maintFilePath = getVariableValue("ORACLE_MAINT_FILE")
        
        dbInstallOption = getVariableValue("DB_INSTALL_OPTION")
        if dbInstallOption == "INSTALL_DB_AND_CONFIG":
            globalLockString = "OracleEnabler-" + self.__hostname
            logger.info("Requesting Global Lock with name: " + globalLockString)
            domain = proxy.getContainer().getCurrentDomain()
            options = domain.getOptions()
            maxActivationTimeOut = options.getProperty(Options.MAX_ACTIVATION_TIME_IN_SECONDS)
            lockTimeOut = Long.parseLong(maxActivationTimeOut) * 1000
            acquired = ContainerUtils.acquireGlobalLock(globalLockString, lockTimeOut , lockTimeOut)
            if acquired:
                logger.info("Acquired Global lock with name: " + globalLockString)
            else:
                logger.severe("Could not acquire Global lock with name: " + globalLockString)
                raise Exception("Could not acquire Global lock with name: " + globalLockString)
 def __lock(self):
     "get build lock"
     logger.info("Acquire build lock:" + self.__buildLock)
     self.__locked = ContainerUtils.acquireGlobalLock(self.__buildLock, self.__lockExpire, self.__lockWait)
     if not self.__locked:
         raise Exception("Unable to acquire build lock:" + self.__buildLock)
 def __unlock(self):
     "unlock global lock"
     logger.info("Release build lock:" + self.__buildLock)
     if self.__locked:
         ContainerUtils.releaseGlobalLock(self.__buildLock)
Example #39
0
 def __unlock(self):
     "unlock global lock"
     if self.__locked:
         ContainerUtils.releaseGlobalLock(self.__clusterConfigDir)
         self.__locked = None
                      "dfs.datanode.address",
                      "dfs.datanode.http.address",
                      "dfs.datanode.ipc.address",
                      "dfs.hosts.exclude",
                      "dfs.name.dir",
                      "dfs.data.dir",
                      "mapred.job.tracker",
                      "mapred.job.tracker.http.address",
                      "mapred.task.tracker.http.address"
                      ]

try: proxy
except NameError:    
    globals()['proxy'] = inspect.currentframe().f_back.f_globals['proxy']
else: pass
logger = ContainerUtils.getLogger(proxy)

def getDynamicGridlibDependencies():
    logger.info("[hadoop_enabler_common] Beginning getDynamicGridlibDependencies()")

    hadoopVersion = getHadoopVersion()
    logger.info("[hadoop_enabler_common] Hadoop Distribution version is [" + str(hadoopVersion) +"]")    

    defaultDomainGridlib = GridlibInfo()
    defaultDomainGridlib.name = "default-domain-type"
    
    logger.info("[hadoop_enabler_common] Adding Hadoop distribution dependency")
    gridlib = GridlibInfo()
    gridlib.name = "hadoop-distribution"
    gridlib.version = str(hadoopVersion)
    
 def __unlock(self):
     "unlock global lock"
     if self.__locked:
         ContainerUtils.releaseGlobalLock(self.__swarmid)
         self.__locked = None
def doInitHadoopProps(userProp_RCVname, userPropFile_RCVname, additionalVariables):

    ContainerUtils.getLogger(proxy).info("[hadoop_enabler] Checking for user property file to augment [" + str(userProp_RCVname) + "].")
    
    userPropsRCV = proxy.getContainer().getRuntimeContext().getVariable(userProp_RCVname)
    userPropsPredefined = False
    if (userPropsRCV == None):
        userProps = ""         
    else:
        userPropsPredefined = True
        userProps =  userPropsRCV.getValue()
        ContainerUtils.getLogger(proxy).info("[hadoop_enabler] User properties variable ${" + userProp_RCVname + 
                                                "} is was imported or pre-defined on component.  Starting value is [" + str(userProps) + "].")

    userPropFile = proxy.getContainer().getRuntimeContext().getVariable(userPropFile_RCVname).getValue()

    if (userPropFile != "") and os.path.isfile(userPropFile):
        ContainerUtils.getLogger(proxy).info("[hadoop_enabler] User property file found [" + str(userPropFile) + "].")
        xmldoc = minidom.parse(userPropFile)
        propertylist = xmldoc.getElementsByTagName('property')
    
        if propertylist == None or len(propertylist) == 0:
            ContainerUtils.getLogger(proxy).warning("[hadoop_enabler] No property elements found in user property file.")
        else:
            for element in propertylist:
                nameElements = element.getElementsByTagName("name")
                name = getNodeText(nameElements[0])
    
                isReserved = False                
                for reservedproperty in reservedproperties:
                    if reservedproperty.count(name) > 0:
                       isReserved = True
                       break
                
                if isReserved:
                    ContainerUtils.getLogger(proxy).warning("[hadoop_enabler] The property [" + str(name) + "] is managed by the Hadoop Enabler.  Will ignore user supplied value.")
                else:    
                    ContainerUtils.getLogger(proxy).info("[hadoop_enabler] Applying user property [" + str(element.toxml()) + "].")
                    userProps = userProps + element.toxml()
    
    if userPropsPredefined:
        proxy.getContainer().getRuntimeContext().getVariable(userProp_RCVname).setValue(userProps)
    else:
        additionalVariables.add(RuntimeContextVariable(userProp_RCVname, userProps, RuntimeContextVariable.STRING_TYPE,"User Supplied Hadoop properties" , False, RuntimeContextVariable.NO_INCREMENT))
Example #43
0
    def __init__(self, additionalVariables):
        " initialize oracle database"

        self.__hostname = "localhost"
        try:
            self.__hostname = InetAddress.getLocalHost().getCanonicalHostName()
        except:
            type, value, traceback = sys.exc_info()
            logger.severe("Hostname error:" + ` value `)

        additionalVariables.add(
            RuntimeContextVariable("ORACLE_HOSTNAME", self.__hostname,
                                   RuntimeContextVariable.ENVIRONMENT_TYPE,
                                   "Oracle Hostname", True,
                                   RuntimeContextVariable.NO_INCREMENT))

        listenAddress = getVariableValue("LISTEN_ADDRESS")
        additionalVariables.add(
            RuntimeContextVariable("ORACLE_LISTEN_ADDRESS", listenAddress,
                                   RuntimeContextVariable.ENVIRONMENT_TYPE,
                                   "Oracle Listen Address", True,
                                   RuntimeContextVariable.NO_INCREMENT))

        dbPassword = getVariableValue("DB_PASSWORD_ALL")

        if dbPassword and dbPassword.strip():
            self.__sysPassword = dbPassword
            additionalVariables.add(
                RuntimeContextVariable(
                    "SYS_PWD", dbPassword,
                    RuntimeContextVariable.ENVIRONMENT_TYPE))
            additionalVariables.add(
                RuntimeContextVariable(
                    "DBSNMP_PWD", dbPassword,
                    RuntimeContextVariable.ENVIRONMENT_TYPE))
            additionalVariables.add(
                RuntimeContextVariable(
                    "SYSMAN_PWD", dbPassword,
                    RuntimeContextVariable.ENVIRONMENT_TYPE))
            additionalVariables.add(
                RuntimeContextVariable(
                    "SYSTEM_PWD", dbPassword,
                    RuntimeContextVariable.ENVIRONMENT_TYPE))
        else:
            self.__sysPassword = getVariableValue("SYS_PWD")

        dbDataLocation = getVariableValue("DB_DATA_LOC")
        if dbDataLocation and os.path.isdir(dbDataLocation):
            dbName = getVariableValue("DB_NAME")

            dbDataDir = os.path.join(dbDataLocation, dbName)
            if os.path.isdir(dbDataDir):
                logger.info("DB Data directory already exists:" + dbDataDir +
                            "; Setting DB_INSTALL_OPTION to INSTALL_DB_SWONLY")
                additionalVariables.add(
                    RuntimeContextVariable(
                        "DB_INSTALL_OPTION", "INSTALL_DB_SWONLY",
                        RuntimeContextVariable.ENVIRONMENT_TYPE))

        tcpPort = getVariableValue("TCP_PORT")
        self.__serviceName = getVariableValue("DB_GLOBAL_NAME")

        sb = StringBuilder(
            "jdbc:oracle:thin:@(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)")
        sb.append("(HOST=").append(self.__hostname).append(")")
        sb.append("(PORT=").append(tcpPort).append("))")
        sb.append("(CONNECT_DATA=(SERVICE_NAME=").append(
            self.__serviceName).append(")))")

        self.__oracleServiceUrl = sb.toString()

        logger.info("Oracle listener service URL:" + self.__oracleServiceUrl)

        self.__jdbcUrl = "jdbc:oracle:thin:@" + self.__hostname + ":" + tcpPort + ":" + self.__serviceName
        additionalVariables.add(
            RuntimeContextVariable("JDBC_URL", self.__jdbcUrl,
                                   RuntimeContextVariable.STRING_TYPE,
                                   "Oracle Thin Driver JDBC Url", True,
                                   RuntimeContextVariable.NO_INCREMENT))

        oracleDriver = "oracle.jdbc.OracleDriver"
        additionalVariables.add(
            RuntimeContextVariable("JDBC_DRIVER", oracleDriver,
                                   RuntimeContextVariable.STRING_TYPE,
                                   "Oracle Thin Driver class", True,
                                   RuntimeContextVariable.NO_INCREMENT))

        self.__dbControl = Boolean.parseBoolean(
            getVariableValue("CONFIG_DBCONTROL", "false"))

        if self.__dbControl:
            self.__dbCtrlPort = getVariableValue("DBCONTROL_HTTP_PORT")
            additionalVariables.add(
                RuntimeContextVariable("HTTPS_PORT", self.__dbCtrlPort,
                                       RuntimeContextVariable.STRING_TYPE))

        oracleDir = getVariableValue("ORACLE_DIR")
        self.__markerFilePath = os.path.join(oracleDir, ".#dsoracle")

        self.__maintFilePath = getVariableValue("ORACLE_MAINT_FILE")

        dbInstallOption = getVariableValue("DB_INSTALL_OPTION")
        if dbInstallOption == "INSTALL_DB_AND_CONFIG":
            globalLockString = "OracleEnabler-" + self.__hostname
            logger.info("Requesting Global Lock with name: " +
                        globalLockString)
            domain = proxy.getContainer().getCurrentDomain()
            options = domain.getOptions()
            maxActivationTimeOut = options.getProperty(
                Options.MAX_ACTIVATION_TIME_IN_SECONDS)
            lockTimeOut = Long.parseLong(maxActivationTimeOut) * 1000
            acquired = ContainerUtils.acquireGlobalLock(
                globalLockString, lockTimeOut, lockTimeOut)
            if acquired:
                logger.info("Acquired Global lock with name: " +
                            globalLockString)
            else:
                logger.severe("Could not acquire Global lock with name: " +
                              globalLockString)
                raise Exception("Could not acquire Global lock with name: " +
                                globalLockString)