Exemplo n.º 1
0
def loadOraConnectionData( inputFilePath = None):
  """read connection data from json input file and return a list of dictionaires to the caller
  NO CONNECTIONs are actually opened to the database! Also password must be acquired by 
  other means.
  """
  if inputFilePath == None:
    moduleSelfDir = os.path.dirname( inspect.getfile(inspect.currentframe()) ) 
    inputFilePath = os.path.join ( moduleSelfDir, './ora_connection_cfg.json' ) 
  if not os.path.exists( inputFilePath ):
    _errorExit( "File %s does not seem to exist!" % ( inputFilePath ) ) 
  
  conns = []
  jData = json.load( open(inputFilePath, "r") )
  _dbx( len( jData ) )
  connRecs = jData[ "connectData" ]
  # __errorExit( "fixme: json structure chagned!" ) 
  for connDict in connRecs : # .keys():
    _dbx( connDict ) 
    nickname= connDict[ 'nickname' ]
    host= connDict[ 'host' ]
    port= connDict[ 'port' ]
    service= connDict[ 'service' ]
    user= connDict[ 'user' ]
    conn = NicknamedOraConnection( nickname= nickname, host=host, port= port, service= service, username= user )
    _dbx( str( conn ) )
    conns.append( conn )
  _dbx( len( conns ) ) 
  return conns
Exemplo n.º 2
0
def getOraPassword ( oraUser, oraPasswordEnvVar, batchMode, envSuffix= "" ):
  """Prompt for Oracle password if it is not found from environment variable. 
  Password entered will be hidden.

  oraPasswordEnvVar defines the OS environment variable name with which this method tries
  to extract password. However, it is not uncommon that DEV environments may have the same 
  username and password for an account that is uses to read the data dictionary, while 
  higher environment like PROD and Pre-production has specific password for this user. 
  Therefore if env_suffix is specified, this method first thing will string oraPasswordEnvVar and envSuffix
  to get the password for that environment (=envSuffix). If such a variable is not found, it will
  try to use the one given by oraPasswordEnvVar

  fixme: batchMode is currently ignored!
  """
  passwordEnv= None; hiddenPassword= ""
  envVarToUse = oraPasswordEnvVar + envSuffix
  if envVarToUse not in os.environ:
    envVarToUse = oraPasswordEnvVar

  if envVarToUse in os.environ:
    passwordEnv= os.environ[ envVarToUse ]

  if platform.system() == "Windows":
    if envVarToUse == None:
      _errorExit('INFO: Password could be passed as environment variable %s however it is not set.' % ( envVarToUse) )
    pwToUse = passwordEnv
  else:
    pwToUse = getpass.getpass('Enter password for Oracle user %s at %s or simple ENTER (The input will be hidden if supported by the OS platform)' % ( oraUser, envSuffix ) )
    if pwToUse == "" :
      pwToUse= passwordEnv

  return pwToUse
Exemplo n.º 3
0
def action_devTest(jsonFile):
    if not os.path.exists(jsonFile):
        _errorExit("File %s does not seem to exist!" % (jsonFile))

    jStr = open(jsonFile, "r").read()
    jData = json.loads(jStr)
    list = jData["filePaths"]
    filePaths = []
    for elem in list:  # .keys():
        _dbx(filePaths)
        filePath = elem['path']
        filePaths.append(filePath)
    return filePaths
Exemplo n.º 4
0
def getEnvList(csv):
    """ convert CSV string in expected format to list of environment code
  ensuring that the code is valid
  """
    retVal = []
    for ix, envCode in enumerate(objectCsv.split(",")):
        _dbx("%d. env %s " % (ix, envCode))
        if envCode not in g_mapDbNameOfEnvCode.keys():
            _errorExit("envCode %s is not mapped!" % (envCode))

        retVal.append(envCode)

    return retVal
Exemplo n.º 5
0
def extractTouchedScripts(commitA, commitB="HEAD"):
    """ extract scripts which have been modfified or added between 2 commits 
  """
    global g_filesToExcludeFromInstall

    args = ["git", "diff", "--name-only", commitA, commitB]
    outFh, tmpOutFile = tempfile.mkstemp()
    _dbx("using %s to capture output from git diff \nunix-style: %s" %
         (tmpOutFile, dosPath2Unix(tmpOutFile)))
    # outFh = open( tmpOutFile, "w" )
    subprocess.run(args, stdout=outFh)

    # _errorExit("test exit")
    gitOutLines = open(tmpOutFile, "r").readlines()
    if len(gitOutLines) == 0:
        _errorExit("No lines found in git diff output file %s" % (tmpOutFile))
    scriptsSet = set()
    for line in gitOutLines:
        if "we used git diff --name-status" == "but then there are issues with renames":
            # _dbx( line )
            match = re.search("^([ADM])\s+(.*)$", line)
            if match == None:
                raise ValueError(
                    "git diff returned line with unexpected content: %s" %
                    line)
            else:
                staCode, script = match.groups(1)[0:2]
                #_dbx( staCode)# ; _dbx( script )
                if staCode in "AM":
                    scriptsSet.add(script)
                elif staCode == "D":
                    scriptsSet.discard(script)
        else:
            doExclude = False
            for blackLine in g_filesToExcludeFromInstall:
                if line.strip() == blackLine.strip():
                    doExclude = True
                    break

            if not doExclude:
                scriptsSet.add(line)
    _dbx(len(scriptsSet))

    return list(scriptsSet)
Exemplo n.º 6
0
def getGitCurrBranchName():
    args = ["git", "branch"]
    outFh, tmpOutFile = tempfile.mkstemp()
    _dbx("using %s to capture output from git branch \nunix-style: %s" %
         (tmpOutFile, tmpOutFile))
    # outFh = open( tmpOutFile, "w" )
    subprocess.run(args, stdout=outFh)

    # _errorExit("test exit")
    gitOutLines = open(tmpOutFile, "r").readlines()
    if len(gitOutLines) == 0:
        _errorExit("No lines found in git branch output file %s" %
                   (tmpOutFile))

    branchName = None
    for line in gitOutLines:
        if line.startswith('*'):
            branchName = line.split()[1]

    return branchName
Exemplo n.º 7
0
def genUnixDiff(oldPath, newPath, recursive=False):
    """Calls the unix diff command and returns its output to the calling function
    bomb out if any error was detected but only displayed upto 10 lines of the stderr
    """
    diffCmdArgsUnix = ['diff', '-b', oldPath, newPath]
    if recursive: diffCmdArgsUnix.insert(1, '-r')

    # for a in diffCmdArgsUnix: _dbx( a ); _errorExit( 'test' )
    proc = subprocess.Popen(diffCmdArgsUnix,
                            stdin=subprocess.PIPE,
                            stdout=subprocess.PIPE,
                            stderr=subprocess.PIPE,
                            universal_newlines=True)
    unixOutMsgs, errMsgs = proc.communicate()

    if len(errMsgs) > 0:  # got error, return immediately
        _errorExit('got error from diff. Only first 10 lines are shown:\n%s ' %
                   '\n'.join(errMsgs[0:10]))

    _dbx(len(unixOutMsgs))
    return unixOutMsgs
Exemplo n.º 8
0
def getObjectList(objectCsv):
    """ convert CSV string in expected format to list of DBObject instances 
  """
    retVal = []
    for ix, obj in enumerate(objectCsv.split(",")):
        _dbx("%d. obj %s " % (ix, obj))
        tokens = obj.split(".")
        if len(tokens) != 3:
            _errorExit(
                "object %s does not conform to expected format of schema.name.type_code"
                % (obj))
        (owner, name, typeCode) = tokens[:]
        _dbx("owner %s typ %s" % (obj, typeCode))
        typeOfficial = oraUtils.g_mapFileExtDbmsMetaDataTypeToad[typeCode]
        _dbx("typeOfficial %s" % (typeOfficial))

        retVal.append(
            oraUtils.DBObject(name=name,
                              owner=owner,
                              type=typeOfficial,
                              fileExt=typeCode))

    return retVal
Exemplo n.º 9
0
def main():
    argParserResult = parseCmdLine()

    setDebug(argParserResult.debug)
    if argParserResult.action == 'dbs':
        action_dbs(envCsv=argParserResult.environments,
                   objCsv=argParserResult.objects)
    elif argParserResult.action == 'extract':
        action_extractScripts(objCsv=argParserResult.objects,
                              envCsv=argParserResult.environments)


#  elif argParserResult.action == 'grepInst':
#    action_grepInst( baseLocation= argParserResult.baseLocation, inputFilePaths = argParserResult.inputFilePaths )
    elif argParserResult.action == 'os':
        action_os(
            inputFilePathsCsv=argParserResult.inputFilePaths,
            branchName=argParserResult.featureName,
            inputPathsFromJsonFile=argParserResult.inputPathsFromJsonFile)
    elif argParserResult.action == 'devTest':
        action_devTest(jsonFile=argParserResult.inputPathsFromJsonFile)
    else:
        _errorExit("action  %s is not yet implemented" %
                   (argParserResult.action))
Exemplo n.º 10
0
def plsqlTokenize(inpLines):
    lnCnt = len(inpLines)
    _dbx(lnCnt)
    lineNo = 0

    nodeStack = TokenStack(name='plsqlTokenize')
    curTreeId = None

    tokBuf = ""
    interceptBufferLines = []
    (interceptStartLineNo, interceptStartColNo) = (-1, -1)
    # just for clarity. First reference is when we hit block_comment_begin
    # match for alphanumString  OR dblQuotedAlphanumString OR assignment OR singleEqual OR doubleEqual OR dotOperator
    #    match 	macros
    #    match 	block_comment_begin OR lineComment
    #    match 	 single quote, assignment operator
    #    match comparison operators, named param operator
    #    match arithmetric operators, left or right parenthesis, comma, semicolon
    #    match Q notation begin in various flavours
    eng = re.compile(
        """^(\s*)(\$\$plsql_unit|\$\$plsql_line|q\{"|[\$#a-z0-9_]+|"[\$#a-z0-9_]+"|:=|>=|<=|>|<|!=|=>|=|/\*|--|\|\||\.\.|\.|%|\(|\)|\+|-|\*|/|,|;|@)(\s*)""",
        re.IGNORECASE)

    curSta = FsmState.start
    for line in inpLines[:29999]:
        colNo = 1
        lineNo += 1
        lnBuf = line
        eoLine = False
        if None != re.search('^(\s*)$', line):  # match empty line
            pass  # _dbx( 'Line %d is empty' % lineNo )
        else:
            pass  # _dbx( "line %d len: %d. Line content >>>>>>>>>%s" % ( lineNo, len( line ), line.rstrip("\n") ) )
        i = 0
        # do we need eoLine indeed or can we just bump colNo accordingly?
        while (i < 999 and colNo < len(line) and not eoLine
               ):  # process line with safety belt against infinite loop
            i += 1
            #_dbx( "Ln/col %d/%d curSta:  '%s'" % ( lineNo, colNo, curSta ) )
            if curSta == FsmState.find_block_comment_end:
                m = re.search('^(.*)(\*/)', lnBuf)  # math end of block comment
                if m == None:
                    #_dbx( "need to cache block comment" )
                    interceptBufferLines.append(lnBuf)
                    eoLine = True
                    continue
                else:  # found end of block comment
                    interceptBufferLines.append(m.group(1) + m.group(2))
                    _dbx("group1>>%s, group2>>%s, lnBuf>>>>%s" %
                         (m.group(1), m.group(2), lnBuf))
                    lenUptoStarSlash = len(m.group(1)) + len(m.group(2))
                    _dbx("lenUptoStarSlash:%d" % (lenUptoStarSlash))
                    colNo += lenUptoStarSlash
                    _dbx("found block comment end at col %d" % colNo)
                    lnBuf = lnBuf[lenUptoStarSlash:]
                    _dbx("stuff at comment is >>>%s" % (lnBuf.rstrip("\n")))
                    curSta = FsmState.start
                    node = TokenNode(text="".join(interceptBufferLines),
                                     type=TokenType.block_comment_begin,
                                     staAtCreation=curSta,
                                     lineNo=interceptStartLineNo,
                                     colNo=interceptStartColNo,
                                     parentId=curTreeId)
                    nodeStack.push(node)

                    continue  # while not EOL

            elif curSta == FsmState.in_single_quoted_literal:
                #_dbx( "scanning for end single quote in >>> %s " % lnBuf )
                endOfLitFound, partOfLit = scanEndOfSQLiteral(lnBuf)
                if not endOfLitFound:  # line break is part of string literal
                    interceptBufferLines.append(lnBuf)
                    eoLine = True  # line is done
                else:  # found end of literal in line, possibly with rest not belonging to literal
                    curSta = FsmState.start
                    interceptBufferLines.append(partOfLit)
                    literalText = "".join(interceptBufferLines)
                    node = TokenNode(
                        text=literalText,
                        type=TokenType.single_quoted_literal_begin,
                        staAtCreation=curSta,
                        lineNo=interceptStartLineNo,
                        colNo=interceptStartColNo,
                        parentId=curTreeId)
                    nodeStack.push(node)
                    colNo += len(partOfLit)
                    lnBuf = line[colNo - 1:]
                    #_dbx( "lnBuf>>>%s" % lnBuf )
                continue
            elif curSta == FsmState.in_q_notation_begin:
                #_dbx( "scanning for end q notation literal in >>> %s " % lnBuf )
                endOfLitFound, partOfLit = scanEndOfQNotationLiteral(
                    q2and3, lnBuf)
                if not endOfLitFound:  # line break is part of string literal
                    interceptBufferLines.append(lnBuf)
                    eoLine = True  # line is done
                else:  # found end of literal in line, possibly with rest not belonging to literal
                    curSta = FsmState.start
                    interceptBufferLines.append(partOfLit)
                    literalText = "".join(interceptBufferLines)
                    node = TokenNode(
                        text=literalText,
                        type=TokenType.single_quoted_literal_begin,
                        staAtCreation=curSta,
                        lineNo=interceptStartLineNo,
                        colNo=interceptStartColNo,
                        parentId=curTreeId)
                    nodeStack.push(node)
                    colNo += len(partOfLit)
                    lnBuf = line[colNo - 1:]
                    # _dbx( "lnBuf>>>%s" % lnBuf )
                continue

            m = re.search('^(\s*)$', lnBuf)  # match empty line
            if m != None:
                eoLine = True

            if eoLine:
                continue

            # process other types of token
            m = eng.match(lnBuf)  # _dbx( type( m ) )
            #_dbx( 'lnBuf being parsed        >>>>>> %s' % lnBuf.rstrip("\n") )
            if m == None:

                # the special scan for single quoted literal is no longer needed since we can use the triple single quotes!
                m = re.search("^(\s*)(')", lnBuf)  # match single quote
                if m != None:  # found single quote
                    # stateStack.push( curSta, curTreeId  )
                    curSta = FsmState.in_single_quoted_literal
                    interceptBufferLines = []
                    (interceptStartLineNo, interceptStartColNo) = (lineNo,
                                                                   colNo)
                    interceptBufferLines.append(m.group(2))
                    colNo += len(m.group(1) + m.group(2))
                    lnBuf = line[colNo - 1:]
                    #_dbx( colNo )

                    continue  # we must skip the fine-grained FSM
                else:
                    _infoTs(
                        "Rest of line %d could not be tokenized. Line content follows \n%s"
                        % (lineNo, lnBuf))
                    return nodeStack

            else:
                # second re group i.e. token
                tok = m.group(2)

                # third re group i.e. optional whitespaces
                #if len( m.group(3) ) > 0: # found trailing whitespaces

                colNo += len(m.group(1)) + len(m.group(2)) + len(m.group(3))
                # _dbx( "colNo: %d" % colNo )

                #_dbx( "Ln/col %d/%d raw tok:  '%s'" % ( lineNo, colNo, tok ) )
                lnBuf = line[colNo - 1:]
                # _dbx( "rest of line: %s" % lnBuf.rstrip("\n") )

                tokTyp, normed = gettokentype(tok)
                #_dbx( "tokTyp:  %s normed: '%s'" % ( tokTyp, normed  ) )

                if tokTyp == TokenType.block_comment_begin:
                    if curSta == FsmState.find_block_comment_end:
                        _errorExit("Encountered tokTyp %s while in state %s!" %
                                   (tokTyp, curSta))
                    else:  # found block_comment if the middle of somewhere, switch the parser to specifically search for end of comment
                        curSta = FsmState.find_block_comment_end
                        interceptBufferLines = []
                        (interceptStartLineNo, interceptStartColNo) = (lineNo,
                                                                       colNo)
                        interceptBufferLines.append(tok)
                        #_dbx( "we must skip the fine-grained FSM ")
                        continue  # we must skip the fine-grained FSM

                elif tokTyp == TokenType.single_line_comment_begin:  # found double minus
                    #_dbx( foo )
                    if curSta == FsmState.find_block_comment_end:
                        _errorExit("Encountered tokTyp %s while in state %s!" %
                                   (tokTyp, curSta))
                    else:  # not in wrong status, just push line comment node, no change of state
                        node = TokenNode(
                            text=normed + lnBuf.rstrip("\n"),
                            type=TokenType.single_line_comment_begin,
                            staAtCreation=curSta,
                            lineNo=lineNo,
                            colNo=colNo - len(normed),
                            parentId=curTreeId)
                        nodeStack.push(node)
                        eoLine = True
                        continue
                elif tokTyp == TokenType.q_notation_begin:
                    #_dbx( foo )
                    if curSta == FsmState.in_q_notation_begin:
                        _errorExit("Encountered tokTyp %s while in state %s!" %
                                   (tokTyp, curSta))
                    else:  # not in wrong status, just push line comment node, no change of state
                        # stateStack.push( curSta, curTreeId  )
                        q2and3 = normed[
                            1:
                            3]  # should be the open bracket and single or double quote, in any order
                        _dbx("normed>>>%s lnBuf>>> %s" % (normed, lnBuf))
                        curSta = FsmState.in_q_notation_begin
                        interceptBufferLines = []
                        (interceptStartLineNo, interceptStartColNo) = (lineNo,
                                                                       colNo)
                        interceptBufferLines.append(m.group(1) + m.group(2))

                    continue  # we must skip the fine-grained FSM
                else:
                    pass  #_dbx( "lineNo/colNo: %d/%d lnBuf >>>%s" % ( lineNo, colNo, lnBuf ))
                #
                #
                node = TokenNode(text=normed,
                                 type=tokTyp,
                                 staAtCreation=None,
                                 lineNo=lineNo,
                                 colNo=colNo - len(normed),
                                 parentId=curTreeId)
                nodeStack.push(node)

    return nodeStack  # plsqlTokenize
Exemplo n.º 11
0
def fsmMain(preTokStack, startStatus=FsmState.start):
    """ make ASSUMPTION that comments tokens are in a different stack and in the main stack 
	we only have non-comment tokens. when later on significant tokens have been linked properpy
	we will have another pass to insert the comment tokens based on lineNo/ColNo
	"""
    retTokStack = TokenStack(name="fsmMain")
    stateStack = StateStack(name="main_state")
    preTokStackLen = len(preTokStack.arr)
    curTreeId = None
    curSta = startStatus
    thenComesFromStack = StateStack(name="thenComesFrom")

    while preTokStack.peekAhead() != None:
        curTokens = preTokStack.popComplexAware()
        tokId, normed, tokTyp = (curTokens[0].id, curTokens[0].text,
                                 curTokens[0].type)
        # lineNo, colNo = (curTokens[0].lineNo, curTokens[0].colNo )
        _dbx("curSta %s curTokens len %d, 1st id:%s type:%s >>>%s" %
             (curSta, len(curTokens), tokId, tokTyp, normed))

        if curSta == FsmState.start:
            if tokTyp == TokenType.relevant_keyword and normed == "CREATE":
                stateStack.push(curSta, curTreeId)
                newSta = FsmState.in_compilation_unit_header
                curTokens[0].state = staAtCreation = newSta
                newTreeId = curTokens[0].id
            else:
                _errorExit("Unknown token id %s type %s in state %s " %
                           (tokId, tokTyp, curSta))
        elif curSta == FsmState.in_compilation_unit_header:
            if tokTyp == TokenType.relevant_keyword and normed == "AS":
                _dbx(foo)
                newSta = FsmState.in_declaration
                curTokens[0].state = staAtCreation = newSta
                newTreeId = curTokens[0].id
            elif tokTyp == TokenType.semicolon:  # forward declaration of function/procedure
                _dbx(foo)
                newSta, newTreeId = stateStack.pop()
            else:
                _dbx("other token type %s in state %s " % (tokTyp, curSta))
        elif curSta == FsmState.in_declaration:
            if tokTyp == TokenType.relevant_keyword and normed in ["BEGIN"]:
                _dbx(foo)
                newSta = FsmState.in_body
                newTreeId = curTokens[0].id
            else:
                _dbx("other token type %s in state %s " % (tokTyp, curSta))
                newSta = FsmState.started_declaration_entry
                newTreeId = curTokens[0].id
        elif curSta == FsmState.started_declaration_entry:
            if tokTyp == TokenType.semicolon:
                _dbx(foo)
                newSta = FsmState.in_declaration
            else:
                _dbx("other token type %s in state %s " % (tokTyp, curSta))
        elif curSta == FsmState.in_body:
            if tokTyp in [TokenType.aggEndIdentSemic, TokenType.aggEndSemic]:
                _dbx(foo)
                newSta, newTreeId = stateStack.pop()
            elif normed == "BEGIN":
                _dbx(foo)
                newSta = FsmState.in_body
                newTreeId = curTokens[0].id
                stateStack.push(curSta, curTreeId)
            elif normed in ['IF', 'WHILE']:
                newSta = FsmState.expect_bool_expression
                newTreeId = curTokens[0].id
                stateStack.push(curSta, curTreeId)
                if normed == "IF":
                    thenComesFromStack.push(FsmState.if_or_case_statement_open,
                                            None)
            elif normed == "CASE" and preTokStack.peekAhead().text == "WHEN":
                _dbx(foo)
                for nextTok in preTokStack.popComplexAware():
                    curTokens.append(nextTok)
                newSta = FsmState.expect_bool_expression
                newTreeId = curTokens[0].id
                stateStack.push(curSta, curTreeId)
                thenComesFromStack.push(FsmState.if_or_case_statement_open,
                                        None)
            elif normed == "CASE" and preTokStack.peekAhead().text != "WHEN":
                # here we must not pop the peeked token, it must go thru normal FSM
                newSta = FsmState.expect_expression
                newTreeId = curTokens[0].id
                #no pop expected!  stateStack.push( curSta, curTreeId)
                thenComesFromStack.push(FsmState.if_or_case_statement_open)
            elif normed in ['DECLARE']:
                newSta = FsmState.in_declaration
                newTreeId = curTokens[0].id
                stateStack.push(curSta, curTreeId)
            else:
                _dbx("other token type %s in state %s " % (tokTyp, curSta))
                stateStack.push(curSta, curTreeId)
                newSta = FsmState.expect_expression
                newTreeId = curTokens[0].id
        elif curSta in [FsmState.expect_expression]:
            if tokTyp in [
                    TokenType.semicolon, TokenType.aggEndSemic,
                    TokenType.aggEndIfSemic, TokenType.aggEndCaseSemic,
                    TokenType.aggEndLoopSemic
            ]:
                _dbx(foo)
                newSta, newTreeId = stateStack.pop()

            elif normed in ['THEN']:  # this is for "CASE WHEN .. THEN .."
                _dbx(foo)
                peekThenComesFrom = thenComesFromStack.peek()[
                    0]  # we dont care about the parentId
                if peekThenComesFrom == FsmState.case_bool_expression_open:
                    newSta = FsmState.in_body
                    newTreeId = curTokens[0].id
                else:
                    _errorExit(
                        "Found THEN at %s without opening CASE token in thenComesFromStack"
                        % tokId)
                thenComesFromStack.pop()  # ignore return values
            elif normed == "ELSE":
                _dbx(foo)
                newSta = FsmState.in_body
                newTreeId = curTokens[0].id
            elif normed == "CASE" and preTokStack.peekAhead().text == "WHEN":
                _dbx(foo)
                for nextTok in preTokStack.popComplexAware():
                    curTokens.append(nextTok)
                newSta = FsmState.expect_bool_expression
                newTreeId = curTokens[0].id
                #do not expect pop!  stateStack.push( curSta, curTreeId)
                thenComesFromStack.push(FsmState.case_bool_expression_open,
                                        None)
            elif normed == "CASE" and preTokStack.peekAhead().text != "WHEN":
                _dbx(foo)
                # here we must not pop the peeked token, it must go thru normal FSM
                newSta = FsmState.expect_expression
                newTreeId = curTokens[0].id
                # stateStack.push( curSta, curTreeId)
                thenComesFromStack.push(FsmState.case_bool_expression_open,
                                        None)
            elif normed in ['LOOP']:
                # this is for "FOR rec IN ( select * from xyz ) LOOP or similar constructs"
                newSta = FsmState.in_body
            elif tokTyp == TokenType.left_bracket:
                _dbx(foo)
                stateStack.push(curSta, curTreeId)
                newTreeId = curTokens[0].id
            elif tokTyp == TokenType.right_bracket:
                newSta, newTreeId = stateStack.pop()
            else:
                _dbx("other token type %s in state %s " % (tokTyp, curSta))
        elif curSta in [FsmState.expect_bool_expression]:
            _dbx(foo)
            if tokTyp in [TokenType.aggEndSemic
                          ]:  # this is for "CASE ... END;"
                _dbx(foo)
                newSta, newTreeId = stateStack.pop()
            elif normed in [
                    'THEN'
            ]:  # this is for "IF x THEN .. ELSE " or "WHILE y LOOP" or "CASE WHEN .. THEN .."
                _dbx(foo)
                peekThenComesFrom = thenComesFromStack.peek()[
                    0]  # we dont care about the parentId
                if peekThenComesFrom == FsmState.if_or_case_statement_open:
                    newSta = FsmState.in_body
                    newTreeId = curTokens[0].id
                elif peekThenComesFrom == FsmState.case_bool_expression_open:
                    newSta = FsmState.expect_expression
                    newTreeId = curTokens[0].id
                else:
                    _errorExit("No matching OPENER for THEN at %s" % tokId)
                thenComesFromStack.pop()  # ignore return values

            elif normed in [
                    'ELSE', 'ELSIF', 'LOOP'
            ]:  # this is for "IF x THEN .. ELSE " or "WHILE y LOOP" or "CASE WHEN .. THEN .."
                _dbx(foo)
                newSta = FsmState.in_body
                newTreeId = curTokens[0].id

            elif tokTyp == TokenType.left_bracket:
                _dbx(foo)
                stateStack.push(curSta, curTreeId)
                newTreeId = curTokens[0].id
            elif tokTyp == TokenType.right_bracket:
                newSta, newTreeId = stateStack.pop()
            elif normed == "CASE" and preTokStack.peekAhead().text == "WHEN":
                for nextTok in preTokStack.popComplexAware():
                    curTokens.append(nextTok)
                newSta = FsmState.expect_bool_expression
                newTreeId = curTokens[0].id
                stateStack.push(curSta, curTreeId)
            elif normed == "CASE" and preTokStack.peekAhead().text != "WHEN":
                # here we must not pop the peeked token, it must go thru normal FSM
                newSta = FsmState.expect_expression
                newTreeId = curTokens[0].id
                stateStack.push(curSta, curTreeId)
        else:
            _errorExit("No handler for state %s with input %s " %
                       (curSta, tokTyp))

        for ix, curTok in enumerate(curTokens):
            # _dbx( "ix: %d type %s" % (ix, type( curTok)) )
            newNode = TokenNode(text=curTok.text,
                                type=curTok.type,
                                staAtCreation=curSta,
                                lineNo=curTok.lineNo,
                                colNo=curTok.colNo,
                                parentId=curTreeId)
            retTokStack.push(newNode)
            #_dbx( "ret stack len %d" % (len( retTokStack.arr ) ) )

        _dbx("cur sta %s new sta %s" % (curSta, newSta))
        curSta, curTreeId = newSta, newTreeId
    _infoTs("final sta was %s" % (newSta))
    if preTokStackLen != len(retTokStack.arr):
        _errorExit(
            "OOPPS preTokStackLen is  %d and len( retTokStack.arr ) is %d" %
            (preTokStackLen, len(retTokStack.arr)))
    return retTokStack

    #
    # editing mark
    #

    return retTokStack
Exemplo n.º 12
0
def createSchemataInstallScripts(sqlScriptTemplatePath,
                                 baseCommit,
                                 lastCommit,
                                 featureName=None,
                                 fileSufix=None,
                                 storeReleaseMetadata=True):
    """ Create SQL and BAT install scripts for the schemata with deployable scripts
  Deployable scripts are: 1) file is not at top level of the schema 
  and 2) extension is not blacklisted 
  """
    global g_internalSepator, g_listIndexedBySchemaType, g_schemataFound

    insertSqlStmtTemplate = """
--------------------------------------------------------------------------------
-- Store software release information: at this position we also record attempted 
-- deployment  
--------------------------------------------------------------------------------
DECLARE
  lv_rel_id NUMBER;
BEGIN 
  SELECT basic_data.APPL_RELEASE_SQ.nextval INTO lv_rel_id FROM dual;
  INSERT INTO basic_data.t_applied_releases( id, release_no, creation_dt ) VALUES( lv_rel_id, q'[{featureName}]', sysdate );
  INSERT INTO basic_data.t_applied_files( id, release_id, filename ) 
  SELECT appl_files_sq.nextval, lv_rel_id, q'[{basenameSqlScript}, git-branch: {featureName}, baseline-commit:{baselineCommit}, last-commit:{lastCommit}]' 
  FROM dual;

  COMMIT;
END;
/
  """

    suffixUsed = "-" + fileSufix if fileSufix else ""
    sentinelPatPrefix = "REM place_here_scripts_for:"
    fh = open(sqlScriptTemplatePath, mode="r")
    inpTemplateLines = fh.readlines()
    _dbx("got %d lines from template" % (len(inpTemplateLines)))
    scriptTemplateText = "".join(inpTemplateLines)

    tmpDir = tempfile.mkdtemp()
    _infoTs("install scripts will be placed under %s" % (tmpDir))

    batchScriptTemplate = """
SET NLS_LANG=GERMAN_GERMANY.WE8MSWIN1252

SQLPLUS /nolog @{sqlScriptBaseName}
"""

    readmeContentHeader = """
Order to run install scripts:
"""
    readmeContentFooter = """
All processes in groups xxx, yyy must be stopped
"""

    batchScripts = []
    for schema in g_schemataFound:
        _dbx("schema %s\n" % (schema))

        script4Schema = scriptTemplateText
        script4Schema = script4Schema.replace("<TARGET_SCHEMA>", schema)
        for schemaType in g_listIndexedBySchemaType.keys():
            if schemaType.startswith(schema):
                typeOnly = schemaType.split(g_internalSepator)[1]
                if typeOnly.upper() not in [
                        '.SQL'
                ]:  # dirty fix to filter out top-level sql script
                    sentinelPattern = "%s%s" % (sentinelPatPrefix,
                                                typeOnly.upper())
                    _dbx("schemaType %s, sentinel %s" %
                         (schemaType, sentinelPattern))
                    listOfScripts = g_listIndexedBySchemaType[schemaType]
                    _dbx("cnt scripts %s" % (len(listOfScripts)))
                    # aggregate scripts of schemaType into one string
                    stringToAppend = "\n".join(listOfScripts)
                    _dbx(stringToAppend)
                    found = script4Schema.find(sentinelPattern)
                    if found > 0:
                        _dbx("found pattern")
                        script4Schema = script4Schema.replace(
                            sentinelPattern,
                            "\n%s\n%s" % (sentinelPattern, stringToAppend))
                    else:
                        _errorExit("Sentinel '%s' not found in template!" %
                                   (sentinelPattern))  # , isWarning = True
        # print( script4Schema )

        # now remove the sentinel's
        tempScript = script4Schema
        newLines = []
        for line in tempScript.split("\n"):
            if not line.startswith(sentinelPatPrefix):
                newLines.append(line)
        script4Schema = "\n".join(newLines)

        schemaDir = os.path.join(tmpDir, schema)
        os.mkdir(schemaDir)

        basenameSqlScript = "install_%s%s.sql" % (schema, suffixUsed)
        _dbx(basenameSqlScript)
        scriptPathSql = os.path.join(schemaDir, basenameSqlScript)

        if storeReleaseMetadata:
            # for INSERT of applied release information
            insertSqlStmt = insertSqlStmtTemplate.format( featureName= featureName \
              , baselineCommit= baseCommit, lastCommit= lastCommit, basenameSqlScript= basenameSqlScript )
            _dbx(insertSqlStmt)
            script4Schema = script4Schema.format(
                placeHolderStoreReleaseMetadata=insertSqlStmt,
                baselineCommit=baseCommit,
                featureName=featureName)
        else:
            script4Schema = script4Schema.format(
                placeHolderStoreReleaseMetadata="",
                baselineCommit=baseCommit,
                featureName=featureName)

        batScriptBaseName = "install_%s%s.bat" % (schema, suffixUsed)

        scriptPathBat = os.path.join(schemaDir, batScriptBaseName)
        fh = open(scriptPathSql, mode="w")
        fh.write(script4Schema)
        fh.close()
        sqlScriptBaseName = os.path.basename(scriptPathSql)

        batchScriptContent = batchScriptTemplate.format(
            sqlScriptBaseName=sqlScriptBaseName)
        fh = open(scriptPathBat, mode="w")
        fh.write(batchScriptContent)
        fh.close()
        _infoTs("output SQL script unix-style >>>>> %s \nDOS style >>>: %s" %
                (dosPath2Unix(scriptPathSql), scriptPathSql))
        _infoTs("output BAT script unix-style >>>>> %s \nDOS style >>>: %s" %
                (dosPath2Unix(scriptPathBat), scriptPathBat))
        #_errorExit( "in test - stopped after 1 schema!! " );
        batchScripts.append(os.path.basename(scriptPathBat))

    # create install readme
    readmeFile = os.path.join(tmpDir, "install%s-readme.txt" % (suffixUsed))
    items = []
    for batchScript in batchScripts:
        items.append("?. %s" % (batchScript))
        _dbx(batchScript)
    itemsText = "\n".join(items)
    readmeText = "%s\n%s\n%s\n" % (readmeContentHeader, itemsText,
                                   readmeContentFooter)

    fh = open(readmeFile, mode="w")
    fh.write(readmeText)
    fh.close()
    _infoTs("readme file unix-style >>>>> %s \nDOS style >>>: %s" %
            (dosPath2Unix(readmeFile), readmeFile))
Exemplo n.º 13
0
def parseCmdLine():
    import argparse

    parser = argparse.ArgumentParser()
    # lowercase shortkeys
    parser.add_argument(
        '-a',
        '--action',
        choices=['dbs', 'extract', 'os', 'twoRepos', 'devTest'],
        help=
        """dbs: input are from databases, os: input is comma separated file paths 
  twoRepos: provide the root location of 2 git repos on the local PC. This program will cd to the root location and extract the branch name. Input file paths are extracted from --inputPathsFromJsonFile, attribute inputFilePaths
  """,
        required=True)
    parser.add_argument('-b',
                        '--baseLocation',
                        help='base location of input files')
    parser.add_argument(
        '-e',
        '--environments',
        help='comma separated list of environment codes, e.g prod, uat2, gt2',
        required=False)
    parser.add_argument(
        '-f',
        '--featureName',
        help='branch or feature name, will be used to qualify the file name')
    parser.add_argument('-I',
                        '--inputFilePaths',
                        help='comma separated input file paths',
                        required=False)
    parser.add_argument('-j',
                        '--inputPathsFromJsonFile',
                        help='json file containing various input data',
                        default=".\\scripts_for_quickDiff.json")
    parser.add_argument(
        '-o',
        '--objects',
        help=
        'comma separated list of objects, e.g: process.sk_process_control.pks',
        required=False)
    parser.add_argument('--debug',
                        help='print debugging messages',
                        required=False,
                        action='store_true')
    parser.add_argument('--no-debug',
                        help='do not print debugging messages',
                        dest='debug',
                        action='store_false')

    result = parser.parse_args()

    if result.featureName == None:
        result.featureName = getGitCurrBranchName()
        if result.featureName == None:
            result.featureName = g_defaultBranchName

    if result.action == 'dbs':
        if result.environments == None or result.objects == None:
            _errorExit("Action '%s' requires both env codes ans object list" %
                       (result.action))
    elif result.action == 'extract':
        #if result.connectQuad == None or result.objects == None:  _errorExit( "Action '%s' require connectQuad and objects" % (result.action ) )
        if result.environments == None or result.objects == None:
            _errorExit("Action '%s' requires both env codes ans object list" %
                       (result.action))
    elif result.action == 'os':
        if result.inputFilePaths == None and result.inputPathsFromJsonFile == None:
            _errorExit(
                "Action '%s' require inputFilePaths or inputPathsFromJsonFile"
                % (result.action))
    elif result.action == 'devTest':
        if result.inputPathsFromJsonFile == None:
            _errorExit("Action '%s' require inputPathsFromJsonFile" %
                       (result.action))

    return result