Exemple #1
0
def scanEndOfSQLiteral(str):
    """ this function is called when FSM found a single quote which starts a single quoted literal.
	We are to return True, <port of literal> if the closing single quote is found within the input. Note that intermediate pairs of of single quotes may occur inside and even at the end of the second return value
	We are to return False, None if the closing single quote is NOT found within the input
	"""
    _dbx("input len:%d >>>%s" % (len(str), str))
    ix = 0
    part = ""
    endFound = False
    while ix < len(str) and not endFound:
        # print("dbx  ix:%d ch:%s" % ( ix, str[ ix ] ) )
        if str[ix] == "'":
            if str[ix:].find("''") == 0:  # found two, at least
                # print( "found 2" )
                part += "''"
                ix += 2
            else:  # ok only found one
                # print( "found 1" )
                part += "'"
                ix += 1
                endFound = True
        else:
            part += str[ix]
            ix += 1

    if endFound:
        rv1, rv2 = True, part
    else:
        rv1, rv2 = False, None
    _dbx("rv1:%s, rv2>>%s" % (rv1, rv2))
    return rv1, rv2
Exemple #2
0
def getDdlScriptPath(
        dbName, object):  # fixme: consider using os.path.join or os.path.sep
    """ compose full path name of DDL script. example: 
    "/C/temp/LIONP01/SK_INVOICE_LSI-LIONP01.pkb"
  """
    retVal = "%s\\%s\\%s-%s.%s" % (g_baseOfDDLScripts, dbName,
                                   object.name.upper(), dbName, object.fileExt)
    _dbx("retVal: %s" % retVal)

    return retVal
Exemple #3
0
def action_devTest(jsonFile):
    if not os.path.exists(jsonFile):
        _errorExit("File %s does not seem to exist!" % (jsonFile))

    jStr = open(jsonFile, "r").read()
    jData = json.loads(jStr)
    list = jData["filePaths"]
    filePaths = []
    for elem in list:  # .keys():
        _dbx(filePaths)
        filePath = elem['path']
        filePaths.append(filePath)
    return filePaths
Exemple #4
0
def getEnvList(csv):
    """ convert CSV string in expected format to list of environment code
  ensuring that the code is valid
  """
    retVal = []
    for ix, envCode in enumerate(objectCsv.split(",")):
        _dbx("%d. env %s " % (ix, envCode))
        if envCode not in g_mapDbNameOfEnvCode.keys():
            _errorExit("envCode %s is not mapped!" % (envCode))

        retVal.append(envCode)

    return retVal
Exemple #5
0
def loadOraConnectionData( inputFilePath = None):
  """read connection data from json input file and return a list of dictionaires to the caller
  NO CONNECTIONs are actually opened to the database! Also password must be acquired by 
  other means.
  """
  if inputFilePath == None:
    moduleSelfDir = os.path.dirname( inspect.getfile(inspect.currentframe()) ) 
    inputFilePath = os.path.join ( moduleSelfDir, './ora_connection_cfg.json' ) 
  if not os.path.exists( inputFilePath ):
    _errorExit( "File %s does not seem to exist!" % ( inputFilePath ) ) 
  
  conns = []
  jData = json.load( open(inputFilePath, "r") )
  _dbx( len( jData ) )
  connRecs = jData[ "connectData" ]
  # __errorExit( "fixme: json structure chagned!" ) 
  for connDict in connRecs : # .keys():
    _dbx( connDict ) 
    nickname= connDict[ 'nickname' ]
    host= connDict[ 'host' ]
    port= connDict[ 'port' ]
    service= connDict[ 'service' ]
    user= connDict[ 'user' ]
    conn = NicknamedOraConnection( nickname= nickname, host=host, port= port, service= service, username= user )
    _dbx( str( conn ) )
    conns.append( conn )
  _dbx( len( conns ) ) 
  return conns
def main():
    global g_listIndexedBySchemaType, g_unknownFeature
    homeLocation = os.path.expanduser("~")
    _dbx(homeLocation)

    cmdLnConfig = parseCmdLine()
    setDebug(cmdLnConfig.debug)

    usedFeatureName = getGitCurrBranchName(
    ) if cmdLnConfig.featureName == g_unknownFeature else cmdLnConfig.featureName
    _infoTs("usedFeatureName: %s" % usedFeatureName)

    if cmdLnConfig.baseCommit:
        linesOfTouchedScripts = extractTouchedScripts(
            commitA=cmdLnConfig.baseCommit, commitB=cmdLnConfig.lastCommit)
    else:
        _infoTs("reading touched lines from stdin..")
        linesOfTouchedScripts = sys.stdin.readlines()

    if cmdLnConfig.action == "extract":
        _infoTs("scripts found: %s" % "\n".join(linesOfTouchedScripts))
    elif cmdLnConfig.action == "make":
        sqlInstallTemplateFile = cmdLnConfig.sqlScriptTemplatePath
        if sqlInstallTemplateFile == None:
            moduleSelfDir = os.path.dirname(
                inspect.getfile(inspect.currentframe()))
            sqlInstallTemplateFile = os.path.join(moduleSelfDir,
                                                  './install_template.sql')
        _infoTs("Will use following file as SQL install template: %s" %
                sqlInstallTemplateFile)

        fill_listIndexedBySchemaType(
            linesOfTouchedScripts=linesOfTouchedScripts)
        createSchemataInstallScripts( sqlScriptTemplatePath= sqlInstallTemplateFile  \
          , baseCommit= cmdLnConfig.baseCommit, lastCommit= cmdLnConfig.lastCommit \
          , featureName= usedFeatureName, storeReleaseMetadata = cmdLnConfig.storeRelMeta  \
          , fileSufix= usedFeatureName \
          )
        if len(g_filesToExcludeFromInstall) > 0:
            _infoTs("Some files may have been excluded based on blacklist!")
    elif cmdLnConfig.action == "zip":
        zipFile = action_createFileTree(files=linesOfTouchedScripts,
                                        targetLocation=os.path.join(
                                            homeLocation, 'Downloads',
                                            usedFeatureName))
        _infoTs("zip file can also be viewed at %s" % zipFile)
        _infoTs("Some files may have been excluded based on blacklist!")
    elif cmdLnConfig.action == "devTest":
        _dbx("got here")
        pass
Exemple #7
0
def action_dbs(envCsv, objCsv):
    """ Extract DDL script for objects given by cmdArgs
  When we compare DDLs from 2 databases, the following additional task is performed:
  1. compute the diff grade of the original DDLs
  2. If the diff grade is zero or we generate the HTML diff report using the original DDLs 
  3. If the diff grade is high, we generate the HTML diff report using the formatted DDLs 
  """
    objectList = getObjectList(objCsv)
    # _errorExit( "test exit %s" % ( len( objectList ) ) )
    envList = envCsv.split(",")
    if len(envList) > 2:
        raise ValueError(
            "diff report cannot be created for more than 2 databases. Consider action extract!"
        )

    # regardless we if need to process 1 or 2 databases, we need to extract the scripts to the target location first
    action_extractScripts(objCsv=objCsv, envCsv=envCsv)

    for ix, env in enumerate(envList):
        if ix == 0:
            dbOneOriginPaths, dbOneFormattedPaths = CopyFilesForObjectListForEnv(
                envCode=env, objectList=objectList, staleMinutesOk=60)
            _dbx("dbOneOriginPaths len: %s" % (len(dbOneOriginPaths)))
        elif ix == 1:
            dbTwoOriginPaths, dbTwoFormattedPaths = CopyFilesForObjectListForEnv(
                envCode=env, objectList=objectList, staleMinutesOk=60)

    concatDiffReport = "\n"
    if len(envList) == 2:
        # _errorExit( "getHtmlDiffOutput method coded but not yet used! " )
        for i in range(len(dbOneOriginPaths)):
            fileAOrigin = dbOneOriginPaths[i]
            fileBOrigin = dbTwoOriginPaths[i]
            lnCntA, lnCntB, newCnt, delOrChgCnt, diffGrade = getDiffStatsFromFiles(
                fileA=fileAOrigin, fileB=fileBOrigin)
            if diffGrade == 0 or diffGrade == 1:
                concatDiffReport += getHtmlDiffOutput(fileA=fileAOrigin,
                                                      fileB=fileBOrigin)
            else:
                fileAFormatted = dbOneFormattedPaths[i]
                fileBFormatted = dbTwoFormattedPaths[i]
                concatDiffReport += getHtmlDiffOutput(fileA=fileAFormatted,
                                                      fileB=fileBFormatted)

            _dbx(len(concatDiffReport))

        diffRepFile = tempfile.mkstemp(suffix="-accu-diffs.html")[1]
        open(diffRepFile, "w").write(concatDiffReport)
        _infoTs("Diff report generated as %s " % (diffRepFile))
Exemple #8
0
def getSenderInfo(infoJsonString):
    """ get sender server, port, username. password will be extracted from environment variable SENDER_SECRET 
  """
    infoDict = json.loads(infoJsonString)
    host = infoDict["host"]
    username = infoDict["username"]
    port = infoDict["port"]
    _dbx(host)
    senderSecret = os.environ["SENDER_SECRET"]
    senderInfo = SenderInfo(host=host,
                            port=port,
                            username=username,
                            secret=senderSecret)

    return senderInfo
def extractTouchedScripts(commitA, commitB="HEAD"):
    """ extract scripts which have been modfified or added between 2 commits 
  """
    global g_filesToExcludeFromInstall

    args = ["git", "diff", "--name-only", commitA, commitB]
    outFh, tmpOutFile = tempfile.mkstemp()
    _dbx("using %s to capture output from git diff \nunix-style: %s" %
         (tmpOutFile, dosPath2Unix(tmpOutFile)))
    # outFh = open( tmpOutFile, "w" )
    subprocess.run(args, stdout=outFh)

    # _errorExit("test exit")
    gitOutLines = open(tmpOutFile, "r").readlines()
    if len(gitOutLines) == 0:
        _errorExit("No lines found in git diff output file %s" % (tmpOutFile))
    scriptsSet = set()
    for line in gitOutLines:
        if "we used git diff --name-status" == "but then there are issues with renames":
            # _dbx( line )
            match = re.search("^([ADM])\s+(.*)$", line)
            if match == None:
                raise ValueError(
                    "git diff returned line with unexpected content: %s" %
                    line)
            else:
                staCode, script = match.groups(1)[0:2]
                #_dbx( staCode)# ; _dbx( script )
                if staCode in "AM":
                    scriptsSet.add(script)
                elif staCode == "D":
                    scriptsSet.discard(script)
        else:
            doExclude = False
            for blackLine in g_filesToExcludeFromInstall:
                if line.strip() == blackLine.strip():
                    doExclude = True
                    break

            if not doExclude:
                scriptsSet.add(line)
    _dbx(len(scriptsSet))

    return list(scriptsSet)
Exemple #10
0
def getGitCurrBranchName():
    args = ["git", "branch"]
    outFh, tmpOutFile = tempfile.mkstemp()
    _dbx("using %s to capture output from git branch \nunix-style: %s" %
         (tmpOutFile, tmpOutFile))
    # outFh = open( tmpOutFile, "w" )
    subprocess.run(args, stdout=outFh)

    # _errorExit("test exit")
    gitOutLines = open(tmpOutFile, "r").readlines()
    if len(gitOutLines) == 0:
        _errorExit("No lines found in git branch output file %s" %
                   (tmpOutFile))

    branchName = None
    for line in gitOutLines:
        if line.startswith('*'):
            branchName = line.split()[1]

    return branchName
Exemple #11
0
def genUnixDiff(oldPath, newPath, recursive=False):
    """Calls the unix diff command and returns its output to the calling function
    bomb out if any error was detected but only displayed upto 10 lines of the stderr
    """
    diffCmdArgsUnix = ['diff', '-b', oldPath, newPath]
    if recursive: diffCmdArgsUnix.insert(1, '-r')

    # for a in diffCmdArgsUnix: _dbx( a ); _errorExit( 'test' )
    proc = subprocess.Popen(diffCmdArgsUnix,
                            stdin=subprocess.PIPE,
                            stdout=subprocess.PIPE,
                            stderr=subprocess.PIPE,
                            universal_newlines=True)
    unixOutMsgs, errMsgs = proc.communicate()

    if len(errMsgs) > 0:  # got error, return immediately
        _errorExit('got error from diff. Only first 10 lines are shown:\n%s ' %
                   '\n'.join(errMsgs[0:10]))

    _dbx(len(unixOutMsgs))
    return unixOutMsgs
Exemple #12
0
def scanEndOfQNotationLiteral(q2and3, str):
    """ this function is called when FSM found a q notation start token, the literal can span many lines.
	We are to return True, <port of literal> if the closing single quote is found within the input. Note that intermediate pairs of of single quotes may occur inside and even at the end of the second return value
	We are to return False, None if the closing single quote is NOT found within the input
	"""
    qNotationCharPairs = {"[": "]", "{": "}", "'": "'", '"': '"', "#": "#"}
    _dbx("q2and3: %s >>>%s" % (q2and3, str))
    q2Closer = qNotationCharPairs[q2and3[0]]
    q3Closer = qNotationCharPairs[q2and3[1]]
    q2and3Closer = q3Closer + q2Closer  # indeed order must be reversed

    part = ""
    endFound = False
    foundAt = str.find(q2and3Closer)
    #_dbx ( "q2and3Closer: %s foundAt %d" % ( q2and3Closer, foundAt ) )
    if foundAt >= 0:
        substrUpTo = foundAt + len(q2and3Closer) + 1
        part += str[0:substrUpTo]
        _dbx("part: %s" % (part))
        endFound = True

    if endFound:
        rv1, rv2 = True, part
    else:
        rv1, rv2 = False, None
    _dbx("rv1:%s, rv2>>%s" % (rv1, rv2))
    return rv1, rv2
def parseCmdLine():
    import argparse

    global g_inpFilePath, g_outFilePath, g_inpLines, g_fsmInitStatusCode

    parser = argparse.ArgumentParser()
    # lowercase shortkeys
    parser.add_argument('-i',
                        '--inFile',
                        help='input file, could also be sent as STDIN',
                        required=False)
    parser.add_argument('-o', '--outFile', help='output file', required=False)
    parser.add_argument('-f',
                        '--fsmStartStatus',
                        help='finite machine start status',
                        required=False)

    result = parser.parse_args()

    if result.inFile != None:
        g_inpFilePath = result.inFile
        g_inpLines = open(g_inpFilePath, "r").readlines()
    else:
        g_inpLines = sys.stdin.readlines()

    _dbx(len(g_inpLines))
    # _dbx( "\n".join( g_inpLines[:3] ) )

    if result.outFile != None:
        pass

    if result.fsmStartStatus != None:
        g_fsmInitStatusCode = result.fsmStartStatus
    else:
        g_fsmInitStatusCode = None

    return result
Exemple #14
0
def CopyFilesForObjectListForEnv(envCode, objectList, staleMinutesOk=20):
    dbName = g_mapDbNameOfEnvCode[envCode]
    _dbx("db %s" % (dbName))
    now = time.time()  # returns seconds since epoch

    originFilePathsInDiffArea = []
    formattedFilePaths = []

    for obj in objectList:
        orginScriptPath = getDdlScriptPath(object=obj, dbName=dbName)
        fileModTime = os.path.getmtime(orginScriptPath)
        _dbx("now: %s mtime: %s" % (now, fileModTime))
        elaMinutues = (now - fileModTime) / 60
        _dbx("elaMinutues %s" % elaMinutues)
        if elaMinutues > staleMinutesOk:
            raise ValueError("file %s is %s minutes old!" %
                             (orginScriptPath, elaMinutues))
        if not os.path.exists(orginScriptPath):
            _infoTs("File %s does not seem to exist!" % (orginScriptPath))
        else:
            prefix, fileExt = os.path.splitext(
                os.path.basename(orginScriptPath))

            # lets also copy the original but leave a copy for users convenience
            newBaseName = prefix + '-orgF' + fileExt
            newPathOfOriginFile = os.path.join(g_diffLocation, newBaseName)
            shutil.copy(orginScriptPath, newPathOfOriginFile)
            _dbx("newPathOfOriginFile %s" % (newPathOfOriginFile))
            originFilePathsInDiffArea.append(newPathOfOriginFile)

            # create formatted copy and MOVE it to diff area
            formattedOutPath = uglyFormat(inputFilePath=orginScriptPath)

            newBaseName = prefix + '-ugly' + fileExt
            newPathOfFormattedFile = os.path.join(g_diffLocation, newBaseName)

            shutil.move(formattedOutPath, newPathOfFormattedFile)
            _infoTs("Formatted file to be found as %s " %
                    (newPathOfFormattedFile))
            formattedFilePaths.append(newPathOfFormattedFile)

    # _errorExit( "originFilePathsInDiffArea len %s, formattedFilePaths len %s" % ( len( originFilePathsInDiffArea), len( formattedFilePaths) ) )

    return originFilePathsInDiffArea, formattedFilePaths
Exemple #15
0
def uglyFormat(inputFilePath):
    """ Read in lines of the input SQL file, format it with the simple/ugly formatter,
  * does some QA
  * dump the format result into a tempfile 
  * return the temppath 
  """
    inputLines = open(inputFilePath, "r").readlines()
    _dbx("read %d lines from %s" % (len(inputLines), inputFilePath))
    tree = fsm.plsqlTokenize(inputLines)
    formattedLines = tree.simpleFormatSemicolonAware()

    if True or "want to" == "QA":
        textWordCounter_a = charCounter.WordCounter(name="sql input",
                                                    lines=inputLines,
                                                    shortCode="sqlInput")
        textWordCounter_a.scan()
        wordCountResultLines_a = textWordCounter_a.report(printToStdout=False)
        forWordCountCheck_a = tempfile.mktemp()
        _dbx("forWordCountCheck_a: %s" % (forWordCountCheck_a))
        open(forWordCountCheck_a, "w").write("\n".join(wordCountResultLines_a))

        textWordCounter_b = charCounter.WordCounter(name="formatted result",
                                                    lines=formattedLines,
                                                    shortCode="sqlFormatted")
        textWordCounter_b.scan()
        wordCountResultLines_b = textWordCounter_b.report(printToStdout=False)
        forWordCountCheck_b = tempfile.mktemp()
        _dbx("forWordCountCheck_b: %s" % (forWordCountCheck_b))
        open(forWordCountCheck_b, "w").write("\n".join(wordCountResultLines_b))

        if "want see result of wordCount diff " == "which is barely usseful":
            _infoTs(" ************ DIFFing WordCounts ... ")
            diffWordCountResult = genUnixDiff(forWordCountCheck_a,
                                              forWordCountCheck_b)

            diffLinesToShow = 10
            _infoTs(
                " ************ result of DIFFing WORD Counts, first %d lines only "
                % diffLinesToShow)
            print("\n".join(
                diffWordCountResult.split("\n")[0:diffLinesToShow]))

    inputFileBaseName = os.path.basename(inputFilePath)
    outPath = persistAndPrintName(textName="formatted %s" % inputFilePath,
                                  textContent=formattedLines,
                                  baseNamePrefix=inputFileBaseName + '-')

    return outPath
def action_createFileTree(files, targetLocation):
    """zip the given files:
  1. if at least 1 file starts with root, find a common root of all. In worst case it is the root. 
     For example /a/b/file1.txt and /a/foo/bar.py  would have /a as common root 
  2. remove the common root from all 
     The 2 files above become b/file1.txt foo/bar.py 
  3. put the files into the zip with the remaining relative paths
  """
    if len(files) == 0:
        raise ValueError("list of files is empty")

    if files[0].startswith("/"):
        # if any file path starts with root, we strip off the common prefix
        commonRoot = os.path.commonprefix(files)
        _dbx("commonRoot %s" % commonRoot)
        pathsUsed = [
            os.path.relpath(file, commonRoot).rstrip("\n") for file in files
        ]
    else:
        pathsUsed = [file.rstrip("\n") for file in files]
    _dbx("pathsUsed %s" % pathsUsed)

    zipArcPath = tempfile.mkstemp(suffix=".zip")[1]
    _dbx("zipArcPath  type %s" % zipArcPath)

    with zipfile.ZipFile(zipArcPath, 'w') as zipWriter:
        for filePath in pathsUsed:
            if os.path.exists(filePath):
                zipWriter.write(filePath)
            else:
                _infoTs("File at path %s does NOT exist!" % filePath)

    # for more efficiency, unzip it to the target location
    with zipfile.ZipFile(zipArcPath, 'r') as zipReader:
        _infoTs("creating file tree in %s ... " % targetLocation)
        zipReader.extractall(path=targetLocation,
                             members=None)  # imples all members

    return zipArcPath
Exemple #17
0
def getObjectList(objectCsv):
    """ convert CSV string in expected format to list of DBObject instances 
  """
    retVal = []
    for ix, obj in enumerate(objectCsv.split(",")):
        _dbx("%d. obj %s " % (ix, obj))
        tokens = obj.split(".")
        if len(tokens) != 3:
            _errorExit(
                "object %s does not conform to expected format of schema.name.type_code"
                % (obj))
        (owner, name, typeCode) = tokens[:]
        _dbx("owner %s typ %s" % (obj, typeCode))
        typeOfficial = oraUtils.g_mapFileExtDbmsMetaDataTypeToad[typeCode]
        _dbx("typeOfficial %s" % (typeOfficial))

        retVal.append(
            oraUtils.DBObject(name=name,
                              owner=owner,
                              type=typeOfficial,
                              fileExt=typeCode))

    return retVal
Exemple #18
0
def spoolScriptWithSqlplusDbmsMetadata ( spoolDestRoot, dirSep, dbObjects, connectQuadruple= None, connDataObj= None ): 
  """ Use sqlplus 
    connectQuadruple is colon(:) separated list of host, port, service, user
  """
  if connectQuadruple == None and connDataObj == None:
    raise ValueError( "either connectQuadruple or connDataObj has to be provided")
  if connectQuadruple != None and connDataObj != None:
    raise ValueError( "either connectQuadruple or connDataObj has to be provided")

  if connectQuadruple != None:
      host, port, service, username = connectQuadruple.split( ":" )
  elif connDataObj != None:
      host, port, service, username = connDataObj.host, connDataObj.part, connDataObj.service, connDataObj.username 
 
  spoolScriptHeader = """
CONNECT {v_ez_connect}
WHENEVER SQLERROR EXIT 

column db_name new_val db_name
column spool_path_current new_val spool_path_current

ALTER SESSION SET NLS_LANGUAGE=GERMAN
;
set termout ON 
SELECT sys_context( 'userenv', 'db_name' ) AS db_name 
  , user connect_as 
FROM dual 
;
"""
  # FIXME: we need to align the mapping of object type in the SQL script template and mapping defined in dict object! 
  scriptBlockFor1Object = """
set termout OFF 

WITH prep_ AS 
( SELECT 
   UPPER( '{v_object_name}' ) || '-'||'&db_name' as obj_name_and_db_name
  , CASE upper('{v_object_type}') 
    WHEN 'PACKAGE_BODY' THEN '.pkb' 
    WHEN 'PACKAGE_SPEC' THEN '.pks' 
    WHEN 'TRIGGER' THEN '.trg' 
    WHEN 'TYPE_BODY' THEN '.tpb' 
    WHEN 'TYPE_SPEC' THEN '.tps' 
    WHEN 'FUNCTION' THEN '.fnc' 
    WHEN 'PROCEDURE' THEN '.prc' 
    WHEN 'VIEW' THEN '.vw' 
    ELSE '.sql' 
    END AS file_ext 
  FROM DUAL 
) 
SELECT '{spool_dest_root}'|| '{dir_sep}' || '&db_name' || '{dir_sep}' ||obj_name_and_db_name ||file_ext 
  AS  spool_path_current 
FROM prep_ 
;

set echo off feedback off head off linesize 1000 longchunksize 9999999 long 9999999 pagesize 49999 termout off trimspool on

spool &spool_path_current

SELECT dbms_metadata.get_ddl( upper('{v_object_type}'), upper( '{v_object_name}' ), upper( '{v_schema}' ) ) 
FROM DUAL
;

spool off
"""

  spoolScriptTrailer = """
EXIT
"""

  password = getOraPassword ( oraUser= username, oraPasswordEnvVar= 'ORA_SECRET', batchMode= False )

  ezConnect = """%s/"%s"@(DESCRIPTION=(ADDRESS_LIST=(ADDRESS=(PROTOCOL=TCP)(HOST=%s)(PORT=%s)))(CONNECT_DATA=(SERVER=DEDICATED)(SERVICE_NAME=%s)))""" % ( username, password, host, port, service )
  spoolPath = tempfile.mktemp()

  if "want to fight" == "the DOS vs gitbash vs unix platform gap":
    sqlTermoutPath = tempfile.mktemp()
    _dbx( "sqlTermoutPath %s" )
    sqlpTermoutFh = open( sqlTermoutPath, "w" )

    sqlpJob = subprocess.Popen( [ "sqlplus", "/nolog" ], stdin = subprocess.PIPE, stdout= sqlpTermoutFh )
    sqlpJob.stdin.write( spoolScript.encode('utf-8') )
    sqlpJob.communicate( )
  
    sqlpTermoutFh = open( sqlTermoutPath, "r" )
    _dbx( sqlpTermoutFh.readlines() )
  else: # build one script block per DBObject
    
    scriptBlocks= []
    for obj in dbObjects:
      scriptBlocks.append( scriptBlockFor1Object.format( spool_dest_root= spoolDestRoot, v_schema= obj.owner, v_object_type= obj.type, v_object_name= obj.name, dir_sep= dirSep ) )

    _dbx( "len( scriptBlocks ) : %d " % len( scriptBlocks ) )

    headerFormatted = spoolScriptHeader.format( v_ez_connect= ezConnect )
    spoolScript = "%s \n%s \n%s" % ( headerFormatted, "\n".join( scriptBlocks), spoolScriptTrailer )
    sqlplusScriptPath =  tempfile.mktemp() + '.sql'
    _dbx( "sqlplusScriptPath: %s" % ( sqlplusScriptPath ) ) 
    open( sqlplusScriptPath, "w").write( spoolScript )

    return sqlplusScriptPath
Exemple #19
0
def fetchOneScriptFromDatabase( objectSchema, objectType, objectName, sqlRunner ) :
  """Extract one DDL scripts from the given sqlRunner handle and return it as list of lines
  """
  _dbx( type( sqlRunner ) )

  extractorQuery = ' '.join( open( "./parameterized_extractor.sql", "r").readlines() )
  _dbx(  extractorQuery [ : 200] ) 
  _dbx(  len( extractorQuery ) )
  if "need to " == "use all_objects":
    extractorQuery = extractorQuery.replace( '/*replace_start*/ dba_objects /*replace_end*/', ' all_objects ' ) 
    _dbx(  len( extractorQuery ) )

  bindVar1 = ",".join( objectSchema );   _dbx( bindVar1 )
  bindVar2 = ",".join( objectType );   _dbx( bindVar2 )
  bindVar3 = ",".join( objectName );   _dbx( bindVar3 )
  # _errorExit( "test" )
  sqlRunner.execute( extractorQuery, [ bindVar1, bindVar2, bindVar3 ] )

  dbName, scriptContent = sqlRunner.fetchone()[0]
  _dbx( type( scriptContent ) )
  _dbx( len( scriptContent ) )
  
  return dbName, scriptContent 
Exemple #20
0
    sqlpJob.communicate( )
  
    sqlpTermoutFh = open( sqlTermoutPath, "r" )
    _dbx( sqlpTermoutFh.readlines() )
  else: # build one script block per DBObject
    _infoTs( "The connecting user will need access to global temporary table %s !" % clobTempTable )
    scriptBlocks= []
    for obj in dbObjects:
      scriptBlocks.append( scriptBlockFor1Object.format( spool_dest_root= spoolDestRoot, 
        lv_schema= obj.owner, lv_object_type= obj.type, lv_object_name= obj.name, dir_sep= dirSep,
        clobTempTable = clobTempTable ) )

    _dbx( "len( scriptBlocks ) : %d " % len( scriptBlocks ) )

    headerFormatted = spoolScriptHeader.format( v_ez_connect= ezConnect )
    spoolScript = "%s \n%s \n%s" % ( headerFormatted, "\n".join( scriptBlocks), spoolScriptTrailer )
    sqlplusScriptPath =  tempfile.mktemp() + '.sql'
    _dbx( "sqlplusScriptPath: %s" % ( sqlplusScriptPath ) ) 
    open( sqlplusScriptPath, "w").write( spoolScript )

    return sqlplusScriptPath




if __name__ == "__main__" : 
  
  setDebug( True )
  conns = loadOraConnectionData()
  _dbx( len( conns ) )
  
Exemple #21
0
def plsqlTokenize(inpLines):
    lnCnt = len(inpLines)
    _dbx(lnCnt)
    lineNo = 0

    nodeStack = TokenStack(name='plsqlTokenize')
    curTreeId = None

    tokBuf = ""
    interceptBufferLines = []
    (interceptStartLineNo, interceptStartColNo) = (-1, -1)
    # just for clarity. First reference is when we hit block_comment_begin
    # match for alphanumString  OR dblQuotedAlphanumString OR assignment OR singleEqual OR doubleEqual OR dotOperator
    #    match 	macros
    #    match 	block_comment_begin OR lineComment
    #    match 	 single quote, assignment operator
    #    match comparison operators, named param operator
    #    match arithmetric operators, left or right parenthesis, comma, semicolon
    #    match Q notation begin in various flavours
    eng = re.compile(
        """^(\s*)(\$\$plsql_unit|\$\$plsql_line|q\{"|[\$#a-z0-9_]+|"[\$#a-z0-9_]+"|:=|>=|<=|>|<|!=|=>|=|/\*|--|\|\||\.\.|\.|%|\(|\)|\+|-|\*|/|,|;|@)(\s*)""",
        re.IGNORECASE)

    curSta = FsmState.start
    for line in inpLines[:29999]:
        colNo = 1
        lineNo += 1
        lnBuf = line
        eoLine = False
        if None != re.search('^(\s*)$', line):  # match empty line
            pass  # _dbx( 'Line %d is empty' % lineNo )
        else:
            pass  # _dbx( "line %d len: %d. Line content >>>>>>>>>%s" % ( lineNo, len( line ), line.rstrip("\n") ) )
        i = 0
        # do we need eoLine indeed or can we just bump colNo accordingly?
        while (i < 999 and colNo < len(line) and not eoLine
               ):  # process line with safety belt against infinite loop
            i += 1
            #_dbx( "Ln/col %d/%d curSta:  '%s'" % ( lineNo, colNo, curSta ) )
            if curSta == FsmState.find_block_comment_end:
                m = re.search('^(.*)(\*/)', lnBuf)  # math end of block comment
                if m == None:
                    #_dbx( "need to cache block comment" )
                    interceptBufferLines.append(lnBuf)
                    eoLine = True
                    continue
                else:  # found end of block comment
                    interceptBufferLines.append(m.group(1) + m.group(2))
                    _dbx("group1>>%s, group2>>%s, lnBuf>>>>%s" %
                         (m.group(1), m.group(2), lnBuf))
                    lenUptoStarSlash = len(m.group(1)) + len(m.group(2))
                    _dbx("lenUptoStarSlash:%d" % (lenUptoStarSlash))
                    colNo += lenUptoStarSlash
                    _dbx("found block comment end at col %d" % colNo)
                    lnBuf = lnBuf[lenUptoStarSlash:]
                    _dbx("stuff at comment is >>>%s" % (lnBuf.rstrip("\n")))
                    curSta = FsmState.start
                    node = TokenNode(text="".join(interceptBufferLines),
                                     type=TokenType.block_comment_begin,
                                     staAtCreation=curSta,
                                     lineNo=interceptStartLineNo,
                                     colNo=interceptStartColNo,
                                     parentId=curTreeId)
                    nodeStack.push(node)

                    continue  # while not EOL

            elif curSta == FsmState.in_single_quoted_literal:
                #_dbx( "scanning for end single quote in >>> %s " % lnBuf )
                endOfLitFound, partOfLit = scanEndOfSQLiteral(lnBuf)
                if not endOfLitFound:  # line break is part of string literal
                    interceptBufferLines.append(lnBuf)
                    eoLine = True  # line is done
                else:  # found end of literal in line, possibly with rest not belonging to literal
                    curSta = FsmState.start
                    interceptBufferLines.append(partOfLit)
                    literalText = "".join(interceptBufferLines)
                    node = TokenNode(
                        text=literalText,
                        type=TokenType.single_quoted_literal_begin,
                        staAtCreation=curSta,
                        lineNo=interceptStartLineNo,
                        colNo=interceptStartColNo,
                        parentId=curTreeId)
                    nodeStack.push(node)
                    colNo += len(partOfLit)
                    lnBuf = line[colNo - 1:]
                    #_dbx( "lnBuf>>>%s" % lnBuf )
                continue
            elif curSta == FsmState.in_q_notation_begin:
                #_dbx( "scanning for end q notation literal in >>> %s " % lnBuf )
                endOfLitFound, partOfLit = scanEndOfQNotationLiteral(
                    q2and3, lnBuf)
                if not endOfLitFound:  # line break is part of string literal
                    interceptBufferLines.append(lnBuf)
                    eoLine = True  # line is done
                else:  # found end of literal in line, possibly with rest not belonging to literal
                    curSta = FsmState.start
                    interceptBufferLines.append(partOfLit)
                    literalText = "".join(interceptBufferLines)
                    node = TokenNode(
                        text=literalText,
                        type=TokenType.single_quoted_literal_begin,
                        staAtCreation=curSta,
                        lineNo=interceptStartLineNo,
                        colNo=interceptStartColNo,
                        parentId=curTreeId)
                    nodeStack.push(node)
                    colNo += len(partOfLit)
                    lnBuf = line[colNo - 1:]
                    # _dbx( "lnBuf>>>%s" % lnBuf )
                continue

            m = re.search('^(\s*)$', lnBuf)  # match empty line
            if m != None:
                eoLine = True

            if eoLine:
                continue

            # process other types of token
            m = eng.match(lnBuf)  # _dbx( type( m ) )
            #_dbx( 'lnBuf being parsed        >>>>>> %s' % lnBuf.rstrip("\n") )
            if m == None:

                # the special scan for single quoted literal is no longer needed since we can use the triple single quotes!
                m = re.search("^(\s*)(')", lnBuf)  # match single quote
                if m != None:  # found single quote
                    # stateStack.push( curSta, curTreeId  )
                    curSta = FsmState.in_single_quoted_literal
                    interceptBufferLines = []
                    (interceptStartLineNo, interceptStartColNo) = (lineNo,
                                                                   colNo)
                    interceptBufferLines.append(m.group(2))
                    colNo += len(m.group(1) + m.group(2))
                    lnBuf = line[colNo - 1:]
                    #_dbx( colNo )

                    continue  # we must skip the fine-grained FSM
                else:
                    _infoTs(
                        "Rest of line %d could not be tokenized. Line content follows \n%s"
                        % (lineNo, lnBuf))
                    return nodeStack

            else:
                # second re group i.e. token
                tok = m.group(2)

                # third re group i.e. optional whitespaces
                #if len( m.group(3) ) > 0: # found trailing whitespaces

                colNo += len(m.group(1)) + len(m.group(2)) + len(m.group(3))
                # _dbx( "colNo: %d" % colNo )

                #_dbx( "Ln/col %d/%d raw tok:  '%s'" % ( lineNo, colNo, tok ) )
                lnBuf = line[colNo - 1:]
                # _dbx( "rest of line: %s" % lnBuf.rstrip("\n") )

                tokTyp, normed = gettokentype(tok)
                #_dbx( "tokTyp:  %s normed: '%s'" % ( tokTyp, normed  ) )

                if tokTyp == TokenType.block_comment_begin:
                    if curSta == FsmState.find_block_comment_end:
                        _errorExit("Encountered tokTyp %s while in state %s!" %
                                   (tokTyp, curSta))
                    else:  # found block_comment if the middle of somewhere, switch the parser to specifically search for end of comment
                        curSta = FsmState.find_block_comment_end
                        interceptBufferLines = []
                        (interceptStartLineNo, interceptStartColNo) = (lineNo,
                                                                       colNo)
                        interceptBufferLines.append(tok)
                        #_dbx( "we must skip the fine-grained FSM ")
                        continue  # we must skip the fine-grained FSM

                elif tokTyp == TokenType.single_line_comment_begin:  # found double minus
                    #_dbx( foo )
                    if curSta == FsmState.find_block_comment_end:
                        _errorExit("Encountered tokTyp %s while in state %s!" %
                                   (tokTyp, curSta))
                    else:  # not in wrong status, just push line comment node, no change of state
                        node = TokenNode(
                            text=normed + lnBuf.rstrip("\n"),
                            type=TokenType.single_line_comment_begin,
                            staAtCreation=curSta,
                            lineNo=lineNo,
                            colNo=colNo - len(normed),
                            parentId=curTreeId)
                        nodeStack.push(node)
                        eoLine = True
                        continue
                elif tokTyp == TokenType.q_notation_begin:
                    #_dbx( foo )
                    if curSta == FsmState.in_q_notation_begin:
                        _errorExit("Encountered tokTyp %s while in state %s!" %
                                   (tokTyp, curSta))
                    else:  # not in wrong status, just push line comment node, no change of state
                        # stateStack.push( curSta, curTreeId  )
                        q2and3 = normed[
                            1:
                            3]  # should be the open bracket and single or double quote, in any order
                        _dbx("normed>>>%s lnBuf>>> %s" % (normed, lnBuf))
                        curSta = FsmState.in_q_notation_begin
                        interceptBufferLines = []
                        (interceptStartLineNo, interceptStartColNo) = (lineNo,
                                                                       colNo)
                        interceptBufferLines.append(m.group(1) + m.group(2))

                    continue  # we must skip the fine-grained FSM
                else:
                    pass  #_dbx( "lineNo/colNo: %d/%d lnBuf >>>%s" % ( lineNo, colNo, lnBuf ))
                #
                #
                node = TokenNode(text=normed,
                                 type=tokTyp,
                                 staAtCreation=None,
                                 lineNo=lineNo,
                                 colNo=colNo - len(normed),
                                 parentId=curTreeId)
                nodeStack.push(node)

    return nodeStack  # plsqlTokenize
Exemple #22
0
    text = message.as_string()

    # Log in to server using secure context and send email
    context = ssl.create_default_context()
    with smtplib.SMTP_SSL(senderInfo.host, senderInfo.port,
                          context=context) as server:
        server.login(senderInfo.username, senderInfo.secret)
        server.sendmail(senderInfo.username, recipients, text)


if __name__ == "__main__":

    argConfig = parseCmdLine()

    senderInfoJson = "".join(open(argConfig.senderInfoFile, "r").readlines())
    _dbx(senderInfoJson)
    senderInfo = getSenderInfo(infoJsonString=senderInfoJson)

    sendMail(senderInfo= senderInfo, recipients= argConfig.recipients \
      , subject= argConfig.subject, plainText = argConfig.messagePlainText \
      , htmlFile = argConfig.htmlTextFile, binAttachFile = argConfig.attachmentFile
    )
    if "want to " == "create json sender info ":
        testDict = {
            'host': 'messenger',
            'port': "123",
            'username': '******',
            'secret': "100"
        }
        print(json.dumps(testDict))
Exemple #23
0
def fsmMain(preTokStack, startStatus=FsmState.start):
    """ make ASSUMPTION that comments tokens are in a different stack and in the main stack 
	we only have non-comment tokens. when later on significant tokens have been linked properpy
	we will have another pass to insert the comment tokens based on lineNo/ColNo
	"""
    retTokStack = TokenStack(name="fsmMain")
    stateStack = StateStack(name="main_state")
    preTokStackLen = len(preTokStack.arr)
    curTreeId = None
    curSta = startStatus
    thenComesFromStack = StateStack(name="thenComesFrom")

    while preTokStack.peekAhead() != None:
        curTokens = preTokStack.popComplexAware()
        tokId, normed, tokTyp = (curTokens[0].id, curTokens[0].text,
                                 curTokens[0].type)
        # lineNo, colNo = (curTokens[0].lineNo, curTokens[0].colNo )
        _dbx("curSta %s curTokens len %d, 1st id:%s type:%s >>>%s" %
             (curSta, len(curTokens), tokId, tokTyp, normed))

        if curSta == FsmState.start:
            if tokTyp == TokenType.relevant_keyword and normed == "CREATE":
                stateStack.push(curSta, curTreeId)
                newSta = FsmState.in_compilation_unit_header
                curTokens[0].state = staAtCreation = newSta
                newTreeId = curTokens[0].id
            else:
                _errorExit("Unknown token id %s type %s in state %s " %
                           (tokId, tokTyp, curSta))
        elif curSta == FsmState.in_compilation_unit_header:
            if tokTyp == TokenType.relevant_keyword and normed == "AS":
                _dbx(foo)
                newSta = FsmState.in_declaration
                curTokens[0].state = staAtCreation = newSta
                newTreeId = curTokens[0].id
            elif tokTyp == TokenType.semicolon:  # forward declaration of function/procedure
                _dbx(foo)
                newSta, newTreeId = stateStack.pop()
            else:
                _dbx("other token type %s in state %s " % (tokTyp, curSta))
        elif curSta == FsmState.in_declaration:
            if tokTyp == TokenType.relevant_keyword and normed in ["BEGIN"]:
                _dbx(foo)
                newSta = FsmState.in_body
                newTreeId = curTokens[0].id
            else:
                _dbx("other token type %s in state %s " % (tokTyp, curSta))
                newSta = FsmState.started_declaration_entry
                newTreeId = curTokens[0].id
        elif curSta == FsmState.started_declaration_entry:
            if tokTyp == TokenType.semicolon:
                _dbx(foo)
                newSta = FsmState.in_declaration
            else:
                _dbx("other token type %s in state %s " % (tokTyp, curSta))
        elif curSta == FsmState.in_body:
            if tokTyp in [TokenType.aggEndIdentSemic, TokenType.aggEndSemic]:
                _dbx(foo)
                newSta, newTreeId = stateStack.pop()
            elif normed == "BEGIN":
                _dbx(foo)
                newSta = FsmState.in_body
                newTreeId = curTokens[0].id
                stateStack.push(curSta, curTreeId)
            elif normed in ['IF', 'WHILE']:
                newSta = FsmState.expect_bool_expression
                newTreeId = curTokens[0].id
                stateStack.push(curSta, curTreeId)
                if normed == "IF":
                    thenComesFromStack.push(FsmState.if_or_case_statement_open,
                                            None)
            elif normed == "CASE" and preTokStack.peekAhead().text == "WHEN":
                _dbx(foo)
                for nextTok in preTokStack.popComplexAware():
                    curTokens.append(nextTok)
                newSta = FsmState.expect_bool_expression
                newTreeId = curTokens[0].id
                stateStack.push(curSta, curTreeId)
                thenComesFromStack.push(FsmState.if_or_case_statement_open,
                                        None)
            elif normed == "CASE" and preTokStack.peekAhead().text != "WHEN":
                # here we must not pop the peeked token, it must go thru normal FSM
                newSta = FsmState.expect_expression
                newTreeId = curTokens[0].id
                #no pop expected!  stateStack.push( curSta, curTreeId)
                thenComesFromStack.push(FsmState.if_or_case_statement_open)
            elif normed in ['DECLARE']:
                newSta = FsmState.in_declaration
                newTreeId = curTokens[0].id
                stateStack.push(curSta, curTreeId)
            else:
                _dbx("other token type %s in state %s " % (tokTyp, curSta))
                stateStack.push(curSta, curTreeId)
                newSta = FsmState.expect_expression
                newTreeId = curTokens[0].id
        elif curSta in [FsmState.expect_expression]:
            if tokTyp in [
                    TokenType.semicolon, TokenType.aggEndSemic,
                    TokenType.aggEndIfSemic, TokenType.aggEndCaseSemic,
                    TokenType.aggEndLoopSemic
            ]:
                _dbx(foo)
                newSta, newTreeId = stateStack.pop()

            elif normed in ['THEN']:  # this is for "CASE WHEN .. THEN .."
                _dbx(foo)
                peekThenComesFrom = thenComesFromStack.peek()[
                    0]  # we dont care about the parentId
                if peekThenComesFrom == FsmState.case_bool_expression_open:
                    newSta = FsmState.in_body
                    newTreeId = curTokens[0].id
                else:
                    _errorExit(
                        "Found THEN at %s without opening CASE token in thenComesFromStack"
                        % tokId)
                thenComesFromStack.pop()  # ignore return values
            elif normed == "ELSE":
                _dbx(foo)
                newSta = FsmState.in_body
                newTreeId = curTokens[0].id
            elif normed == "CASE" and preTokStack.peekAhead().text == "WHEN":
                _dbx(foo)
                for nextTok in preTokStack.popComplexAware():
                    curTokens.append(nextTok)
                newSta = FsmState.expect_bool_expression
                newTreeId = curTokens[0].id
                #do not expect pop!  stateStack.push( curSta, curTreeId)
                thenComesFromStack.push(FsmState.case_bool_expression_open,
                                        None)
            elif normed == "CASE" and preTokStack.peekAhead().text != "WHEN":
                _dbx(foo)
                # here we must not pop the peeked token, it must go thru normal FSM
                newSta = FsmState.expect_expression
                newTreeId = curTokens[0].id
                # stateStack.push( curSta, curTreeId)
                thenComesFromStack.push(FsmState.case_bool_expression_open,
                                        None)
            elif normed in ['LOOP']:
                # this is for "FOR rec IN ( select * from xyz ) LOOP or similar constructs"
                newSta = FsmState.in_body
            elif tokTyp == TokenType.left_bracket:
                _dbx(foo)
                stateStack.push(curSta, curTreeId)
                newTreeId = curTokens[0].id
            elif tokTyp == TokenType.right_bracket:
                newSta, newTreeId = stateStack.pop()
            else:
                _dbx("other token type %s in state %s " % (tokTyp, curSta))
        elif curSta in [FsmState.expect_bool_expression]:
            _dbx(foo)
            if tokTyp in [TokenType.aggEndSemic
                          ]:  # this is for "CASE ... END;"
                _dbx(foo)
                newSta, newTreeId = stateStack.pop()
            elif normed in [
                    'THEN'
            ]:  # this is for "IF x THEN .. ELSE " or "WHILE y LOOP" or "CASE WHEN .. THEN .."
                _dbx(foo)
                peekThenComesFrom = thenComesFromStack.peek()[
                    0]  # we dont care about the parentId
                if peekThenComesFrom == FsmState.if_or_case_statement_open:
                    newSta = FsmState.in_body
                    newTreeId = curTokens[0].id
                elif peekThenComesFrom == FsmState.case_bool_expression_open:
                    newSta = FsmState.expect_expression
                    newTreeId = curTokens[0].id
                else:
                    _errorExit("No matching OPENER for THEN at %s" % tokId)
                thenComesFromStack.pop()  # ignore return values

            elif normed in [
                    'ELSE', 'ELSIF', 'LOOP'
            ]:  # this is for "IF x THEN .. ELSE " or "WHILE y LOOP" or "CASE WHEN .. THEN .."
                _dbx(foo)
                newSta = FsmState.in_body
                newTreeId = curTokens[0].id

            elif tokTyp == TokenType.left_bracket:
                _dbx(foo)
                stateStack.push(curSta, curTreeId)
                newTreeId = curTokens[0].id
            elif tokTyp == TokenType.right_bracket:
                newSta, newTreeId = stateStack.pop()
            elif normed == "CASE" and preTokStack.peekAhead().text == "WHEN":
                for nextTok in preTokStack.popComplexAware():
                    curTokens.append(nextTok)
                newSta = FsmState.expect_bool_expression
                newTreeId = curTokens[0].id
                stateStack.push(curSta, curTreeId)
            elif normed == "CASE" and preTokStack.peekAhead().text != "WHEN":
                # here we must not pop the peeked token, it must go thru normal FSM
                newSta = FsmState.expect_expression
                newTreeId = curTokens[0].id
                stateStack.push(curSta, curTreeId)
        else:
            _errorExit("No handler for state %s with input %s " %
                       (curSta, tokTyp))

        for ix, curTok in enumerate(curTokens):
            # _dbx( "ix: %d type %s" % (ix, type( curTok)) )
            newNode = TokenNode(text=curTok.text,
                                type=curTok.type,
                                staAtCreation=curSta,
                                lineNo=curTok.lineNo,
                                colNo=curTok.colNo,
                                parentId=curTreeId)
            retTokStack.push(newNode)
            #_dbx( "ret stack len %d" % (len( retTokStack.arr ) ) )

        _dbx("cur sta %s new sta %s" % (curSta, newSta))
        curSta, curTreeId = newSta, newTreeId
    _infoTs("final sta was %s" % (newSta))
    if preTokStackLen != len(retTokStack.arr):
        _errorExit(
            "OOPPS preTokStackLen is  %d and len( retTokStack.arr ) is %d" %
            (preTokStackLen, len(retTokStack.arr)))
    return retTokStack

    #
    # editing mark
    #

    return retTokStack
def createSchemataInstallScripts(sqlScriptTemplatePath,
                                 baseCommit,
                                 lastCommit,
                                 featureName=None,
                                 fileSufix=None,
                                 storeReleaseMetadata=True):
    """ Create SQL and BAT install scripts for the schemata with deployable scripts
  Deployable scripts are: 1) file is not at top level of the schema 
  and 2) extension is not blacklisted 
  """
    global g_internalSepator, g_listIndexedBySchemaType, g_schemataFound

    insertSqlStmtTemplate = """
--------------------------------------------------------------------------------
-- Store software release information: at this position we also record attempted 
-- deployment  
--------------------------------------------------------------------------------
DECLARE
  lv_rel_id NUMBER;
BEGIN 
  SELECT basic_data.APPL_RELEASE_SQ.nextval INTO lv_rel_id FROM dual;
  INSERT INTO basic_data.t_applied_releases( id, release_no, creation_dt ) VALUES( lv_rel_id, q'[{featureName}]', sysdate );
  INSERT INTO basic_data.t_applied_files( id, release_id, filename ) 
  SELECT appl_files_sq.nextval, lv_rel_id, q'[{basenameSqlScript}, git-branch: {featureName}, baseline-commit:{baselineCommit}, last-commit:{lastCommit}]' 
  FROM dual;

  COMMIT;
END;
/
  """

    suffixUsed = "-" + fileSufix if fileSufix else ""
    sentinelPatPrefix = "REM place_here_scripts_for:"
    fh = open(sqlScriptTemplatePath, mode="r")
    inpTemplateLines = fh.readlines()
    _dbx("got %d lines from template" % (len(inpTemplateLines)))
    scriptTemplateText = "".join(inpTemplateLines)

    tmpDir = tempfile.mkdtemp()
    _infoTs("install scripts will be placed under %s" % (tmpDir))

    batchScriptTemplate = """
SET NLS_LANG=GERMAN_GERMANY.WE8MSWIN1252

SQLPLUS /nolog @{sqlScriptBaseName}
"""

    readmeContentHeader = """
Order to run install scripts:
"""
    readmeContentFooter = """
All processes in groups xxx, yyy must be stopped
"""

    batchScripts = []
    for schema in g_schemataFound:
        _dbx("schema %s\n" % (schema))

        script4Schema = scriptTemplateText
        script4Schema = script4Schema.replace("<TARGET_SCHEMA>", schema)
        for schemaType in g_listIndexedBySchemaType.keys():
            if schemaType.startswith(schema):
                typeOnly = schemaType.split(g_internalSepator)[1]
                if typeOnly.upper() not in [
                        '.SQL'
                ]:  # dirty fix to filter out top-level sql script
                    sentinelPattern = "%s%s" % (sentinelPatPrefix,
                                                typeOnly.upper())
                    _dbx("schemaType %s, sentinel %s" %
                         (schemaType, sentinelPattern))
                    listOfScripts = g_listIndexedBySchemaType[schemaType]
                    _dbx("cnt scripts %s" % (len(listOfScripts)))
                    # aggregate scripts of schemaType into one string
                    stringToAppend = "\n".join(listOfScripts)
                    _dbx(stringToAppend)
                    found = script4Schema.find(sentinelPattern)
                    if found > 0:
                        _dbx("found pattern")
                        script4Schema = script4Schema.replace(
                            sentinelPattern,
                            "\n%s\n%s" % (sentinelPattern, stringToAppend))
                    else:
                        _errorExit("Sentinel '%s' not found in template!" %
                                   (sentinelPattern))  # , isWarning = True
        # print( script4Schema )

        # now remove the sentinel's
        tempScript = script4Schema
        newLines = []
        for line in tempScript.split("\n"):
            if not line.startswith(sentinelPatPrefix):
                newLines.append(line)
        script4Schema = "\n".join(newLines)

        schemaDir = os.path.join(tmpDir, schema)
        os.mkdir(schemaDir)

        basenameSqlScript = "install_%s%s.sql" % (schema, suffixUsed)
        _dbx(basenameSqlScript)
        scriptPathSql = os.path.join(schemaDir, basenameSqlScript)

        if storeReleaseMetadata:
            # for INSERT of applied release information
            insertSqlStmt = insertSqlStmtTemplate.format( featureName= featureName \
              , baselineCommit= baseCommit, lastCommit= lastCommit, basenameSqlScript= basenameSqlScript )
            _dbx(insertSqlStmt)
            script4Schema = script4Schema.format(
                placeHolderStoreReleaseMetadata=insertSqlStmt,
                baselineCommit=baseCommit,
                featureName=featureName)
        else:
            script4Schema = script4Schema.format(
                placeHolderStoreReleaseMetadata="",
                baselineCommit=baseCommit,
                featureName=featureName)

        batScriptBaseName = "install_%s%s.bat" % (schema, suffixUsed)

        scriptPathBat = os.path.join(schemaDir, batScriptBaseName)
        fh = open(scriptPathSql, mode="w")
        fh.write(script4Schema)
        fh.close()
        sqlScriptBaseName = os.path.basename(scriptPathSql)

        batchScriptContent = batchScriptTemplate.format(
            sqlScriptBaseName=sqlScriptBaseName)
        fh = open(scriptPathBat, mode="w")
        fh.write(batchScriptContent)
        fh.close()
        _infoTs("output SQL script unix-style >>>>> %s \nDOS style >>>: %s" %
                (dosPath2Unix(scriptPathSql), scriptPathSql))
        _infoTs("output BAT script unix-style >>>>> %s \nDOS style >>>: %s" %
                (dosPath2Unix(scriptPathBat), scriptPathBat))
        #_errorExit( "in test - stopped after 1 schema!! " );
        batchScripts.append(os.path.basename(scriptPathBat))

    # create install readme
    readmeFile = os.path.join(tmpDir, "install%s-readme.txt" % (suffixUsed))
    items = []
    for batchScript in batchScripts:
        items.append("?. %s" % (batchScript))
        _dbx(batchScript)
    itemsText = "\n".join(items)
    readmeText = "%s\n%s\n%s\n" % (readmeContentHeader, itemsText,
                                   readmeContentFooter)

    fh = open(readmeFile, mode="w")
    fh.write(readmeText)
    fh.close()
    _infoTs("readme file unix-style >>>>> %s \nDOS style >>>: %s" %
            (dosPath2Unix(readmeFile), readmeFile))
def main():
    global g_fsmInitStatusCode
    argParserResult = parseCmdLine()

    if True:
        tree = fsm.plsqlTokenize(g_inpLines)
        formattedLines = tree.simpleFormatSemicolonAware()
        # print( "\n".join( formattedLines ) )

    if False or "want to" == "compare output manually":
        #print( "*"*20 + "input sql" + "*"*20 )
        #print( "".join( g_inpLines))

        print("*" * 20 + "formatted" + "*" * 20)
        print("\n".join(formattedLines))

    if "want to compare" == "char count":
        forCharCountCheck_A = tempfile.mktemp()
        _dbx("forCharCountCheck_A: %s" % (forCharCountCheck_A))
        charCounter_A = charCounter.TextCharStatsIgnoreCase(
            textName="sql input", txt=g_inpLines)
        charCountResultLines_A = charCounter_A.report(printToStdout=False)
        open(forCharCountCheck_A, "w").write("\n".join(charCountResultLines_A))

        forCharCountCheck_B = tempfile.mktemp()
        _dbx("forCharCountCheck_B: %s" % (forCharCountCheck_B))
        charCounter_B = charCounter.TextCharStatsIgnoreCase(
            textName="formatted output", txt=formattedLines)
        charCountResultLines_B = charCounter_B.report(printToStdout=False)
        open(forCharCountCheck_B, "w").write("\n".join(charCountResultLines_B))

        _infoTs(" ************ DIFFing CharCounts ... ")
        diffCharCountResult = genUnixDiff(forCharCountCheck_A,
                                          forCharCountCheck_B)

        _infoTs(" ************ result of DIFFing CharCounts")
        print(diffCharCountResult)

    if True:
        textWordCounter_a = charCounter.WordCounter(name="sql input",
                                                    lines=g_inpLines,
                                                    shortCode="sqlInput")
        textWordCounter_a.scan()
        wordCountResultLines_a = textWordCounter_a.report(printToStdout=False)
        forWordCountCheck_a = tempfile.mktemp()
        _dbx("forWordCountCheck_a: %s" % (forWordCountCheck_a))
        open(forWordCountCheck_a, "w").write("\n".join(wordCountResultLines_a))

        textWordCounter_b = charCounter.WordCounter(name="sql input",
                                                    lines=formattedLines,
                                                    shortCode="sqlInput")
        textWordCounter_b.scan()
        wordCountResultLines_b = textWordCounter_b.report(printToStdout=False)
        forWordCountCheck_b = tempfile.mktemp()
        _dbx("forWordCountCheck_b: %s" % (forWordCountCheck_b))
        open(forWordCountCheck_b, "w").write("\n".join(wordCountResultLines_b))

        _infoTs(" ************ DIFFing WordCounts ... ")
        diffWordCountResult = genUnixDiff(forWordCountCheck_a,
                                          forWordCountCheck_b)

        _infoTs(" ************ result of DIFFing WORD Counts")
        print(diffWordCountResult)

        persistAndPrintName(textName="formatted %s" % argParserResult.inFile,
                            textContent=formattedLines,
                            baseNamePrefix=argParserResult.inFile + '-')

    if "want to " == "use fsmMain":
        commentStack, signifStack = plstopa.separateCommentsFromSignficants(
            tree)

        #print( "*"*80 ); 		commentStack.simpleDump()
        #print( "*"*80 ); 		signifStack.simpleDump()

        signifStack.assembleComplexTokens()
        #signifStack.simpleDump( markComplexIdents= True )

        useStatus = fsm.kickStartStatusByCode[
            g_fsmInitStatusCode] if g_fsmInitStatusCode != None else plstopa.FsmState.start
        parsedTree = fsm.fsmMain(signifStack, startStatus=useStatus)
        # parsedTree.simpleDump()

        # eunitedTree = plstopa.mergeTokenTrees( commentStack, parsedTree )
        reunitedTree = plstopa.mergeSignifcantAndCommentTrees(
            signifTree=parsedTree, commentTree=commentStack)
        _dbx("reunitedTree len %d" % (len(reunitedTree.arr)))
        print("*" * 30 + "reunited " + "*" * 20)
        #eunitedTree.simpleDump( markComplexIdents = True )

        # reunitedTree.finalizeStats()
        # for node in reunitedTree.arr: node.showInfo()
        print(reunitedTree.formatTokenText())

    if False:
        tree.assembleComplexTokens()
        # tree.simpleDump( markComplexIdents= False )
        tree.simpleDump(markComplexIdents=False)
Exemple #26
0
def spoolScriptWithSqlplusTempClob ( spoolDestRoot, dirSep, dbObjects, conn= None, envCode= None, clobTempTable = 'tt_extract_ddl_clob' ): 
  """ Use sqlplus 
    This method requires a global table accessible by the connecting user to write 
    the source code extracted from DBA_SOURCE line by line as CLOB 
  """
  host, port, service, username = conn.host, conn.port, conn.service, conn.username 
 
  spoolScriptHeader = """
WHENEVER SQLERROR EXIT 
WHENEVER OSERROR EXIT 

CONNECT {v_ez_connect}

column db_name new_val db_name
column spool_path_current new_val spool_path_current 


ALTER SESSION SET NLS_LANGUAGE=GERMAN
;
set termout ON 
SELECT sys_context( 'userenv', 'db_name' ) AS db_name 
  , user connect_as 
FROM dual 
;

set linesize 1000 longchunksize 9999999 long 9999999 pagesize 49999
"""
  scriptBlockFor1Object = """
WITH prep_ AS 
( SELECT 'c:\\temp\&db_name\\' as base_folder
  , UPPER( '{lv_object_name}' ) || '-'||'&db_name' as obj_name_and_db_name
  , CASE upper('{lv_object_type}') 
    WHEN 'PACKAGE_BODY' THEN '.pkb' 
    WHEN 'PACKAGE_SPEC' THEN '.pks' 
    WHEN 'TRIGGER' THEN '.trg' 
    WHEN 'TYPE_BODY' THEN '.tpb' 
    WHEN 'TYPE_SPEC' THEN '.tps' 
    WHEN 'FUNCTION' THEN '.fnc' 
    WHEN 'PROCEDURE' THEN '.prc' 
    WHEN 'VIEW' THEN '.vw' 
    ELSE '.sql' 
    END AS file_ext 
  FROM DUAL 
) 
SELECT base_folder||obj_name_and_db_name||file_ext as  spool_path_current 
FROM prep_ 
;

PROMPT spool_path_current set to &spool_path_current


--CREATE global TEMPORARY TABLE tt_extract_ddl_clob ( owner varchar2(30), type varchar2(30), name varchar2(30), text clob ) on COMMIT preserve rows;

SET ECHO OFF VERIFY OFF 

DECLARE 
  lv_schema VARCHAR2(30) :=  UPPER('{lv_schema}');
  lv_object_type VARCHAR2(30) :=  UPPER('{lv_object_type}');
  lv_type_to_filter  VARCHAR2(30) ;
  lv_object_name VARCHAR2(30) :=  UPPER('{lv_object_name}');
  lv_clob  CLOB := 'CREATE OR REPLACE ';
  lv_text  LONG;
BEGIN
  lv_type_to_filter := 
    CASE lv_object_type 
    WHEN 'PACKAGE_SPEC' THEN 'PACKAGE'
    WHEN 'PACKAGE_BODY' THEN 'PACKAGE BODY'
    WHEN 'TYPE_SPEC' THEN 'TYPE'
    WHEN 'TYPE_BODY' THEN 'TYPE BODY'
    ELSE lv_object_type
    END;

  EXECUTE IMMEDIATE 'truncate  table {clobTempTable}';
  FOR rec IN (
    SELECT line, text
    FROM dba_source
    WHERE owner = lv_schema
      AND type  = lv_type_to_filter 
      AND name  = lv_object_name
    ORDER BY line 
  ) LOOP
    lv_text := rec.text; 
    dbms_lob.append( lv_clob, lv_text );
    -- dbms_OUTPUT.put_line( 'Ln'||$$plsql_line||': '||lv_offset );
    -- IF mod(rec.line, 13) = 1 THEN       dbms_output.put_line( rec.text );    END IF;
  END LOOP;
  INSERT INTO {clobTempTable} ( text ) VALUES ( lv_clob );
  COMMIT;
END;
/

set termout off trimspool on head off 

spool &spool_path_current


SELECT text FROM {clobTempTable} ;

spool off
"""

  spoolScriptTrailer = """
EXIT
"""

  password = getOraPassword ( oraUser= username, oraPasswordEnvVar= 'ORA_SECRET', batchMode= False, envSuffix= envCode  )

  ezConnect = """%s/"%s"@(DESCRIPTION=(ADDRESS_LIST=(ADDRESS=(PROTOCOL=TCP)(HOST=%s)(PORT=%s)))(CONNECT_DATA=(SERVER=DEDICATED)(SERVICE_NAME=%s)))""" % ( username, password, host, port, service )
  spoolPath = tempfile.mktemp()

  if "want to fight" == "the DOS vs gitbash vs unix platform gap":
    sqlTermoutPath = tempfile.mktemp()
    _dbx( "sqlTermoutPath %s" )
    sqlpTermoutFh = open( sqlTermoutPath, "w" )

    sqlpJob = subprocess.Popen( [ "sqlplus", "/nolog" ], stdin = subprocess.PIPE, stdout= sqlpTermoutFh )
    sqlpJob.stdin.write( spoolScript.encode('utf-8') )
    sqlpJob.communicate( )
  
    sqlpTermoutFh = open( sqlTermoutPath, "r" )
    _dbx( sqlpTermoutFh.readlines() )
  else: # build one script block per DBObject
    _infoTs( "The connecting user will need access to global temporary table %s !" % clobTempTable )
    scriptBlocks= []
    for obj in dbObjects:
      scriptBlocks.append( scriptBlockFor1Object.format( spool_dest_root= spoolDestRoot, 
        lv_schema= obj.owner, lv_object_type= obj.type, lv_object_name= obj.name, dir_sep= dirSep,
        clobTempTable = clobTempTable ) )

    _dbx( "len( scriptBlocks ) : %d " % len( scriptBlocks ) )

    headerFormatted = spoolScriptHeader.format( v_ez_connect= ezConnect )
    spoolScript = "%s \n%s \n%s" % ( headerFormatted, "\n".join( scriptBlocks), spoolScriptTrailer )
    sqlplusScriptPath =  tempfile.mktemp() + '.sql'
    _dbx( "sqlplusScriptPath: %s" % ( sqlplusScriptPath ) ) 
    open( sqlplusScriptPath, "w").write( spoolScript )

    return sqlplusScriptPath
def fill_listIndexedBySchemaType(linesOfTouchedScripts):
    global g_internalSepator, g_listIndexedBySchemaType, g_schemataFound
    # _dbx( "foo" );  return
    _dbx(": %d" % (len(linesOfTouchedScripts)))
    schemaScripts = {}
    for line in linesOfTouchedScripts:
        pathNodes = line.split("/")
        _dbx(" nodes: %d" % (len(pathNodes)))
        schema = pathNodes[0]
        if len(
                pathNodes
        ) <= 2:  # skip scripts which are on top level, e.g. BASIC_DATA/master.sql
            continue
        if schema not in schemaScripts.keys():
            _dbx(line)
            _dbx(schema)
            schemaScripts[schema] = []

        relPath = "/".join(pathNodes[1:]).rstrip("\n")

        schemaScripts[schema].append(relPath)

    _infoTs("Found touched scripts for schemata:\n  %s" %
            (",".join(schemaScripts.keys())))

    g_listIndexedBySchemaType = {}
    g_schemataFound = schemaScripts.keys()
    for schema in g_schemataFound:

        scriptList = schemaScripts[schema]
        for script in scriptList:
            fileExt = os.path.splitext(script)[1]
            if fileExt.upper() not in g_excludeTouchWithExtensions:
                scriptType = "UnknownScriptType"
                # _dbx( " ext: %s" % (  fileExt) )
                # extract subfolder name
                pathNodes = script.split("/")
                if len(pathNodes) > 1:  # pattern object_type / script_file
                    subFolder = pathNodes[0]
                else:
                    subFolder = None
                scriptType = "%s%s" % (subFolder if subFolder != None else '',
                                       fileExt.upper())

                schemaType = schema + g_internalSepator + scriptType
                # _dbx("dbx script %s --> schemaType %s" % (script, schemaType) )
                if not schemaType in g_listIndexedBySchemaType.keys():
                    g_listIndexedBySchemaType[schemaType] = []
                _dbx(script)
                script = script.replace('/', '\\')
                script = "@@" + script
                _dbx(script)
                g_listIndexedBySchemaType[schemaType].append(script)
Exemple #28
0
def getDiffStatsFromContents(contentA, contentB):
    lnCntA = len(contentA)
    lnCntB = len(contentB)
    _dbx("lnCntA %d" % (lnCntA))
    diffOutput = difflib.context_diff(contentA,
                                      contentB,
                                      fromfile="fileA",
                                      tofile="fileB",
                                      n=1)
    newCnt = 0
    delOrChgCnt = 0
    for ln in diffOutput:
        if ln.startswith("! "): delOrChgCnt += 1
        elif ln.startswith("+ "): newCnt += 1
    _dbx("B has %d new lines and %d changed or deleted lines versus B" %
         (newCnt, delOrChgCnt))
    _dbx("newCnt: %d" % (newCnt))
    _dbx("delOrChgCnt: %d" % (delOrChgCnt))

    maxLnCnt = lnCntA if lnCntA > lnCntB else lnCntB
    minLnCnt = lnCntA if lnCntA < lnCntB else lnCntA
    if maxLnCnt == 0: maxLnCnt = 1
    if minLnCnt == 0: minLnCnt = 1
    avgLnCnt = (maxLnCnt + minLnCnt) / 2
    _dbx("minLnCnt: %d" % (minLnCnt))
    _dbx("maxLnCnt: %d" % (maxLnCnt))
    _dbx("avgLnCnt: %d" % (avgLnCnt))
    # diffGrade meaning:
    # 0: no delta at all
    # 1: minor changes
    # 2: substantial changes
    if (maxLnCnt > minLnCnt * 2):
        diffGrade = 2
    elif (avgLnCnt > (newCnt + delOrChgCnt) * 10):
        diffGrade = 1
    elif avgLnCnt < 100 and (newCnt + delOrChgCnt) <= 10:
        diffGrade = 1
    elif ((newCnt + delOrChgCnt) == 0):
        diffGrade = 0
    else:
        _dbx("default diffGrade")
        diffGrade = 2
    _dbx(diffGrade)

    return lnCntA, lnCntB, newCnt, delOrChgCnt, diffGrade