def handler(self, col, colNum=''): try: return self.subHandler(col=col) except Exception as e : err = self.errMsg if self.errMsg else '' err = "loaderFunctions->ERROR: Column %s, Msg: %s, Error: %s " %(str(colNum), err, str(e)) p(err, "e") p(traceback.format_exc(),"e")
def __setUpdate(self, val): if str(val).isdigit(): if findEnum(prop=val, obj=eConn.updateMethod): return val else: p("THERE IS %s WHICH IS MAPPED TO UPDATE PROPERTY, MUST HAVE -1(drop), 1(UPDATE), 2(NO_UPDATE), USING -1 DROP--> CREATE METHOD " ) return -1
def ding(self): if self.nodes and len(self.nodes) > 0: src = None tar = None mrgSource = None for node in self.nodes: for k in node: if eJson.SOURCE == k: src = node[k] mrgSource = src elif eJson.TARGET == k: tar = node[k] mrgSource = tar if eJson.SOURCE not in node: tar.create(stt=self.stt, addIndex=self.addIndex) tar.close() tar = None if tar and src: # convert source data type to target data types targetStt = self.updateTargetBySourceAndStt(src=src, tar=tar) if targetStt and len(targetStt) > 0: tar.create(stt=targetStt, addIndex=self.addIndex) else: p( "SOURCE: %s STRUCUTRE NOT DEFINED-> CANNOT CREATE TARGET" % (src.connType), "e") mrgSource = tar src.close() tar.close() src = None if eJson.MERGE == k and mrgSource: mrgTarget = copy.copy(mrgSource) mrgSource.connect() mrgTarget.connect() mrgTarget.connTbl = node[k][eJson.merge.TARGET] mrgTarget.connIsTar = True mrgTarget.connIsSrc = False if eConn.updateMethod.UPDATE in node[k]: mrgTarget.update = node[k][ eConn.updateMethod.UPDATE] sttMerge = self.updateTargetBySourceAndStt( src=mrgSource, tar=mrgTarget) mrgTarget.create(stt=sttMerge, addIndex=self.addIndex) mrgTarget.close() mrgSource.close() mrgSource = None if eJson.CREATE == k: node[k].createFrom(stt=self.stt, addIndex=self.addIndex)
def connect(self): connDbName = self.setProperties(propKey=eConn.props.DB_NAME, propVal=self.dbName) self.connDB = pymongo.MongoClient(self.connUrl) if connDbName: self.cursor = self.connDB[connDbName] p("CONNECTED, MONGODB DB:%s, URL:%s" % (connDbName, self.connUrl), "ii")
def convertToTargetDataType(self, sttVal, src, tar): srcPre, srcPos = src.columnFrame[0], src.columnFrame[1] newSttVal = OrderedDict() if src.connType == tar.connType: for col in sttVal: if eJson.stt.ALIACE in sttVal[col] and sttVal[col][ eJson.stt.ALIACE]: newSttVal[sttVal[col][eJson.stt.ALIACE]] = { eJson.stt.TYPE: sttVal[col][eJson.stt.TYPE] } else: newSttVal[col] = { eJson.stt.TYPE: sttVal[col][eJson.stt.TYPE] } else: for col in sttVal: targetColName = col.replace(srcPre, "").replace(srcPos, "") if eJson.stt.ALIACE in sttVal[col] and sttVal[col][ eJson.stt.ALIACE]: targetColName = sttVal[col][eJson.stt.ALIACE].replace( srcPre, "").replace(srcPos, "") colType = sttVal[col][ eJson.stt. TYPE] if eJson.stt.TYPE in sttVal[col] and sttVal[col][ eJson.stt.TYPE] else tar.defDataType fmatch = re.search(r'(.*)(\(.+\))', colType, re.M | re.I) if fmatch: replaceString = fmatch.group(1) # --> varchar, int , ... postType = fmatch.group(2) # --> (X), (X,X) , .... else: replaceString = colType postType = '' ## Receive list of all dataType in DataTypes Tree newDataTypeTree = src.getDataTypeTree( dataType=replaceString.lower(), ret=([])) if newDataTypeTree is None: p( "SOURCE CONNECTION: %s, COLUMN: %s, DATA TYPE: %s ; IS NOT EXISTS, WILL USE DEFAULT VALUE" % (src.connType, col, replaceString), "w") tarType = tar.defDataType else: targetList = tar.setDataTypeTree( dataTypeTree=newDataTypeTree, allDataTypes=tar.dataTypes, ret=[]) if len(targetList) > 2: targetList = [x for x in targetList if x] tarType = '%s%s' % (targetList[-1], postType) if targetList and len(targetList) > 0 and \ targetList[-1] is not None else tar.defDataType newSttVal[targetColName] = {eJson.stt.TYPE: tarType} return newSttVal
def __notVaildProp(self, currentPropDic, enumPropertyClass): ret = {} for k in currentPropDic: prop = findEnum(prop=k, obj=enumPropertyClass) if not prop: p( "%s: NOT VALID. LEGAL VALUES: %s -> ignore" % (k, str(getAllProp(enumPropertyClass))), "e") ret[prop] = currentPropDic[k] return ret
def test(self): try: maxSevSelDelay = 1 # Assume 1ms maximum server selection delay client = pymongo.MongoClient( self.connUrl, serverSelectionTimeoutMS=maxSevSelDelay) i = client.server_info() p("MONGO TEST: INSTALLED VERSION:%s" % str(i['version'])) except Exception as e: err = "Error connecting MONGODB URL: %s, ERROR: %s\n " % ( self.connUrl, str(e)) p(err, "e")
def getCommitId(self, vId): if not os.path.isfile(self.versionFile): p("FILE NOT EXISTS ") return with io.open(self.versionFile, 'r') as f: for i, line in enumerate(f): row = line.split(",") if str(row[0]).lower() == str(vId).lower(): return row[1] p("CANNOT FIND VERSION %s" % (vId)) return
def getRemoteRepo(self, create=True): createDesc = 'Local testi ng repo ' if not self.remoteConnected: self.connectRemote() if not self.localConnected: self.connectLocal() if not self.remoteObj: p("GET REMOTE: REMOTE GIT IS NOT CONNECTED !") return try: if self.remoteRepo: p("GET REMOTE: REMOTE REPO %s EXISTS, URL: %s" % (self.repoName, self.remoteRepo.git_url)) elif not self.remoteRepo and create: self.remoteRepo = self.remoteUser.create_repo( self.repoName, description=createDesc, has_wiki=False, has_issues=True, auto_init=False) self.remoteUrl = self.remoteRepo.git_url startFrom = self.remoteUrl.find("://") self.remoteUrlFull = 'https://%s' % (self.remoteUrl[startFrom + 3:]) ### Adding sample file to check self.remoteRepo.create_file("src/test.txt", "test", "test", branch="master") p("GET REMOTE: REPO %s CREATED, URL: %s" % (self.remoteRepo.name, self.remoteUrl)) self.__deleteFolder(fPath=self.localPath, totalRetry=4) if not os.path.isdir(self.localPath): ## Clone Repo localRepo = git.Repo.clone_from(self.remoteUrlFull, self.localPath) localRepo.close() p("GET LOCAL: CLONED FROM REMOTE TO FOLER %s " % self.localPath) #git.Git(self.repoFolder).clone(self.remoteUrl ) except Exception as e: exc_type, exc_value, exc_traceback = sys.exc_info() traceback.print_exception(exc_type, exc_value, exc_traceback, limit=4, file=sys.stdout) p("Error: %s" % (str(e)))
def getDBStructure(self, tableName, tableSchema): tableName = self.setTable(tableName=tableName) ret = OrderedDict() try: collection = self.isExists(tableName=tableName, tableSchema=tableSchema) if collection: cntRows = self.cntRows() ## there are rows - will use current strucutre if cntRows > 0: schemaObj = self.cursor[tableName].find_one() if schemaObj and len(schemaObj) > 0: for col in schemaObj: colName = uniocdeStr(col) colType = type(col) ret[colName] = { eJson.jSttValues.TYPE: colType, eJson.jSttValues.ALIACE: None } else: collectionInfo = self.cursor.command({ 'listCollections': 1, 'filter': { 'name': collection } }) #collectionInfo = self.cursor.get_collection_infos( filter=[collectionsL[tableName.lower()]] ) if 'cursor' in collectionInfo: cursorObj = collectionInfo['cursor'] if 'firstBatch' in cursorObj: firstBatch = cursorObj['firstBatch'] for batch in firstBatch: if 'options' in batch: validator = batch['options']['validator'] collectionProperties = validator[ '$jsonSchema']['properties'] for col in collectionProperties: colType = collectionProperties[col][ 'bsonType'] ret[uniocdeStr(col)] = { eJson.jSttValues.TYPE: colType, eJson.jSttValues.ALIACE: None } except Exception as e: p("MONGODB-> %s ERROR:\n %s " % (tableName, str(e)), "e") return ret
def __createFrom(self, propVal): ret = OrderedDict() if isinstance(propVal, str): ret[eConn.props.TYPE] = propVal elif isinstance(propVal, (tuple, list)): ret[eConn.props.TYPE] = propVal[0] ret[eConn.props.TBL] = propVal[1] else: p( "CREATE VALUES MUST BE STRING (connection name) OR LIST [connection name, object name], NOT VALID VALUES:%s" % str(propVal), "e") return ret
def connect(self, fileName=None): if fileName: self.fileFullName = fileName return True elif not self.fileFullName: if self.folder and os.path.isdir(self.folder): p("CONNETCTED USING FOLDER %s" % self.folder) return True else: err = u"FILE NOT VALID: %s" % (self.fileFullName) raise ValueError(err) return True
def isExists(self, tableName, tableSchema=None): tableName = self.setTable(tableName=tableName) allCollections = self.cursor.collection_names() if allCollections and len(allCollections) > 0: for coll in allCollections: if coll.lower() == tableName.lower(): p("MONGODB COLLECTION %s EXISTS" % (tableName), "ii") return coll p("MONGODB COLLECTION %s NOT EXISTS" % (tableName), "ii") return None
def close(self): try: if self.connDB: self.connDB.close() self.connDB = None self.cursor = None except Exception as e: exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] p( "ERROR: file name:" + str(fname) + " line: " + str(exc_tb.tb_lineno) + " massage: " + str(exc_obj), "e")
def connect(self, fileName=None): if self.fileFullName: self.objNames[self.fileName] = { eObj.FILE_FULL_PATH: self.fileFullName, eObj.FILE_FOLDER: self.folder } if os.path.isfile(self.fileFullName): p( u"FILE EXISTS:%s, DELIMITER %s, HEADER %s " % (self.fileFullName, self.delimiter, self.header), "ii") return True else: if self.connIsSrc: p( u"SOURCE FILE NOT EXISTS:%s, DELIMITER %s, HEADER %s " % (self.fileFullName, self.delimiter, self.header), "e") return False else: p( u"TARGET FILE NOT EXISTS:%s, DELIMITER %s, HEADER %s " % (self.fileFullName, self.delimiter, self.header), "ii") return True elif os.path.isdir(self.folder): self.isSingleObject = False for fileName in os.listdir(self.folder): fileFullPath = os.path.join(self.folder, fileName) pre, pos = self.__getSplitedFileName(fullPath=fileFullPath) if self.connFilter: if fileName.split('.')[-1] == self.connFilter: self.objNames[pos] = { eObj.FILE_FULL_PATH: fileFullPath, eObj.FILE_FOLDER: self.folder } p( u"FILE IN FOLDER EXISTS:%s, DELIMITER %s, HEADER %s " % (fileName, self.delimiter, self.header), "ii") else: self.objNames[pos] = { eObj.FILE_FULL_PATH: fileFullPath, eObj.FILE_FOLDER: self.folder } p( u"FILE IN FOLDER EXISTS:%s, DELIMITER %s, HEADER %s " % (fileName, self.delimiter, self.header), "ii") return True return False
def __sttAddMappings(selfself, stt, propVal): if not isinstance(propVal, dict): p( "jsonParser->__sttAddMappings: Not valid prop %s, must be dictionary type" % (propVal), "e") return stt existsColumnsDict = {x.lower(): x for x in stt.keys()} for tar in propVal: if tar.lower() in existsColumnsDict: stt[existsColumnsDict[tar.lower()]][ eJson.stt.SOURCE] = propVal[tar] else: stt[tar][eJson.stt.SOURCE] = propVal[tar] return stt
def __deleteFolder(self, fPath, totalRetry=4): retry = 0 if fPath: if os.path.isdir(fPath): #if not os.access(fPath, os.W_OK): for root, dirs, files in os.walk(fPath): for momo in dirs: os.chmod(os.path.join(root, momo), stat.S_IWUSR) for momo in files: os.chmod(os.path.join(root, momo), stat.S_IWUSR) # Is the error an access error ? while retry < totalRetry: retry += 1 try: shutil.rmtree(fPath) p("DELETED LOCAL FOLDER %s" % (fPath)) retry = totalRetry + 1 except Exception as e: p("TRY %s OUT OF %s, ERROR DELETE %s " % (str(retry), str(totalRetry), str(fPath))) p(e) time.sleep(1) else: p("%s IS NOT EXISTS OR NOT FOLDER " % (fPath))
def __sourceOrTargetOrQueryConn(self, propFullName, propVal): ret = {} if isinstance(propVal, str): ret[eConn.props.NAME] = propFullName ret[eConn.props.TYPE] = propFullName ret[eConn.props.TBL] = propVal elif isinstance(propVal, list): ret[eConn.props.NAME] = propFullName ret[eConn.props.TYPE] = propFullName if len(propVal) == 1: ret[eConn.props.TYPE] = propFullName ret[eConn.props.TBL] = propVal[0] elif len(propVal) == 2: ret[eConn.props.TYPE] = propVal[0] ret[eConn.props.TBL] = propVal[1] elif len(propVal) == 3: ret[eConn.props.TYPE] = propVal[0] ret[eConn.props.TBL] = propVal[1] if self.__isDigitStr(propVal[2]): ret[eConn.props.UPDATE] = self.__setUpdate(propVal[2]) else: ret[eConn.props.FILTER] = propVal[2] elif len(propVal) == 4: ret[eConn.props.TYPE] = propVal[0] ret[eConn.props.TBL] = propVal[1] ret[eConn.props.FILTER] = propVal[2] ret[eConn.props.UPDATE] = self.__setUpdate(propVal[3]) else: p( "%s: Not valid list valuues, must 1,2 or 3 VALUE IS: \n %s" % (str(propFullName), str(propVal)), "e") elif isinstance(propVal, dict): ret = self.__notVaildProp(currentPropDic=propVal, enumPropertyClass=eConn.props) if eConn.props.NAME not in ret and eConn.props.TYPE in ret: ret[eConn.props.NAME] = ret[eConn.props.TYPE] if eConn.props.TYPE not in ret and eConn.props.NAME in ret: ret[eConn.props.TYPE] = ret[eConn.props.NAME] else: p("Not valid values: %s " % (propVal), "e") return {} if findEnum(prop=ret[eConn.props.NAME], obj=eJson) == eJson.QUERY: ret[eConn.props.IS_SQL] = True return ret
def __sendSMTP(self, msgSubj, msgHtml=None, msgText=None): sender = config.SMTP_SENDER receivers = ", ".join(config.SMTP_RECEIVERS) receiversList = config.SMTP_RECEIVERS serverSMTP = config.SMTP_SERVER serverUsr = config.SMTP_SERVER_USER serverPass = config.SMTP_SERVER_PASS msg = MIMEMultipart('alternative') msg['Subject'] = msgSubj msg['From'] = sender msg['To'] = receivers if msgText: textInMail = '' if isinstance(msgText, list): for l in msgText: textInMail += l + "\n" else: textInMail = msgText msg.attach(MIMEText(textInMail, 'plain')) if msgHtml and len(msgHtml) > 0: msg.attach(MIMEText(msgHtml, 'html')) if not sender or len(sender) == 0: p("SENDER IS NOT DEFINES !!!", "e") return if not receiversList or len(receiversList) == 0: p("THERE IS NO EMAIL RECIVER ", "e") return try: server = smtplib.SMTP(serverSMTP) server.ehlo() if serverUsr and serverPass: server.starttls() server.login(serverUsr, serverPass) server.sendmail(sender, receiversList, msg.as_string()) server.quit() except smtplib.SMTPException: err = "gFunc->sendMsg: unable to send email to %s, subject is: %s " % ( str(receivers), str(msgSubj)) raise ValueError(err)
def addPropToDict(existsDict, newProp): if newProp and isinstance(newProp, (dict, OrderedDict)): for k in newProp: if k in existsDict and isinstance(newProp[k], dict): existsDict = addPropToDict(existsDict, newProp=newProp[k]) elif k not in existsDict: existsDict[k] = newProp[k] elif k in existsDict and existsDict[k] is None: existsDict[k] = newProp[k] elif isinstance(newProp, str): existsDict[eConn.props.URL] = newProp else: p("THERE IS AN ERROR ADDING %s INTO DICTIONARY " % (newProp), "e") return existsDict
def end(self, msg=None, pr=True): msg = msg if msg else msgProp.MSG_LAST_STEP totalTasks = 0 for col in self.stateDic: if self.stateDic[col][msgProp.TASKS] == 0: self.stateDic[col][msgProp.TASKS] = 1 totalTasks += self.stateDic[col][msgProp.TASKS] self.addState(sDesc=msg, totalTasks=totalTasks) if pr: for col in self.stateDic: p(list(self.stateDic[col].values()))
def __execEachLine(connObj, sqlTxt): sqlQuery = __split_sql_expressions(sqlTxt) isParam = True if len(locParams) > 0 else False for sql in sqlQuery: sql = re.sub(r"\s+", " ", sql) if isParam: sql = connObj.setQueryWithParams(query=sql, queryParams=locParams) if 'PRINT' in sql: disp = sql.split("'")[1] p('SQL PRINT: ' + disp, "i") if len(sql) > 1: sql = str( sql) if connObj.isExtractSqlIsOnlySTR else uniocdeStr(sql) connObj.exeSQL(sql=sql) p(u"FINISH EXEC: %s" % uniocdeStr(sql), "i")
def __setVersionFromFile(self): try: if not os.path.isfile(self.vFileName): self.version = config.VERSION with open(self.vFileName, 'w') as f: f.write('%s\n' % str(self.version)) else: with open(self.vFileName, 'r+') as f: lines = f.read().splitlines() curr_version = lines[-1] self.version = str(int(curr_version) + 1) f.write('%s\n' % (self.version)) return True except Exception as e: p("ERROR: %s" % (e)) return False
def cloneObject(self, stt=None, fullPath=None): fullPath = fullPath if fullPath else self.fileFullName fileName = os.path.basename(fullPath) fileDir = os.path.dirname(fullPath) fileNameNoExtenseion = os.path.splitext(fileName)[0] fimeNameExtension = os.path.splitext(fileName)[1] ### check if table exists - if exists, create new table isFileExists = os.path.isfile(fullPath) toUpdateFile = False if isFileExists: actulSize = os.stat(fullPath).st_size if actulSize < self.fileMinSize: p( "FILE %s EXISTS WITH SIZE SMALLER THAN %s --> WONT UPDATE ..." % (fullPath, str(actulSize)), "ii") toUpdateFile = False fileStructure = self.getStructure(fullPath=fullPath) fileStructureL = [x.lower() for x in fileStructure] sttL = [x.lower() for x in stt] if set(fileStructureL) != set(sttL): toUpdateFile = True p( "FILE %s EXISTS, SIZE %s STRUCTURE CHANGED !!" % (fullPath, str(actulSize)), "ii") else: p( "FILE %s EXISTS, SIZE %s STRUCURE DID NOT CHANGED !! " % (fullPath, str(actulSize)), "ii") if toUpdateFile and config.DING_TRACK_OBJECT_HISTORY: oldName = None if (os.path.isfile(fullPath)): oldName = fileNameNoExtenseion + "_" + str( time.strftime('%y%m%d')) + fimeNameExtension oldName = os.path.join(fileDir, oldName) if (os.path.isfile(oldName)): num = 1 oldName = os.path.splitext(oldName)[0] + "_" + str( num) + os.path.splitext(oldName)[1] oldName = os.path.join(fileDir, oldName) while (os.path.isfile(oldName)): num += 1 FileNoExt = os.path.splitext(oldName)[0] FileExt = os.path.splitext(oldName)[1] oldName = FileNoExt[:FileNoExt.rfind( '_')] + "_" + str(num) + FileExt oldName = os.path.join(fileDir, oldName) if oldName: p( "FILE HISTORY, FILE %s EXISTS, COPY FILE TO %s " % (str(self.fileName), str(oldName)), "ii") shutil.copy(fullPath, oldName)
def mngConnectors(propertyDict, connLoadProp=None): connLoadProp = connLoadProp if connLoadProp else config.CONNECTIONS ## Merge by CONNECTION if eConn.props.TYPE in propertyDict and propertyDict[eConn.props.TYPE] in connLoadProp: connValues = connLoadProp [ propertyDict[eConn.props.TYPE] ] for val in connValues: propertyDict[ val ] = connValues[ val ] if propertyDict and isinstance(propertyDict, dict) and eConn.props.TYPE in propertyDict: cType = propertyDict[eConn.props.TYPE] if cType in CLASS_TO_LOAD: return CLASS_TO_LOAD[cType]( propertyDict=propertyDict ) else: p("CONNECTION %s is NOT DEFINED. PROP: %s" % (str(cType), str(propertyDict)), "e") else: p ("connectorMng->mngConnectors: must have TYPE prop. prop: %s " %(str(propertyDict)), "e")
def getStructure(self, objects=None): objDict = objects if objects else self.objNames if not isinstance(objDict, (dict, OrderedDict)): if objects in self.objNames: return self.__getStructure( fullPath=self.objNames[objects][eObj.FILE_FULL_PATH]) else: p("FILE %s IS NOT EXISTS " % str(objects)) return None if self.isSingleObject: return self.__getStructure(fullPath=self.fileFullName) else: retDicStructure = OrderedDict() for ff in objDict: retDicStructure[ff] = self.__getStructure( fullPath=objDict[ff][eObj.FILE_FULL_PATH]) return retDicStructure
def __init__(self, node, connDict=None, versionManager=None): self.stt = None self.addSourceColumn = True self.addIndex = None self.nodes = None self.connDict = connDict if connDict else config.CONNECTIONS self.versionManager = versionManager jsonNodes = [] if isinstance(node, (list, tuple)): jsonNodes = node elif isinstance(node, (dict, OrderedDict)): jsonNodes = [node] else: p("NODE IS NOT LIST OR DICTIONARY, IGNORE NODE") ## INIT LIST NODES TO EXECUTE if len(jsonNodes) > 0: self.nodes = self.initNodes(jsonNodes)
def connectRemote(self): try: self.remoteObj = Github(self.remoteLoginUser, self.remoteLoginPass) p("INIT: CONNECTED TO GITHUB USING USER: %s, PASS: %s " % (self.remoteLoginUser, self.remoteLoginPass)) self.remoteUser = self.remoteObj.get_user() for repo in self.remoteUser.get_repos(): if repo.name.lower() == self.repoName.lower(): self.repoName = repo.name self.remoteUrl = repo.git_url # self.remoteUrlFull startFrom = self.remoteUrl.find("://") self.remoteUrlFull = 'https://%s:%s@%s' % ( self.remoteLoginUser, self.remoteLoginPass, self.remoteUrl[startFrom + 3:]) self.remoteRepo = repo p("SET: USING REMOTE GITHUB REPO %s, URL: %s" % (self.repoName, self.remoteUrl)) self.remoteConnected = True break except Exception as e: exc_type, exc_value, exc_traceback = sys.exc_info() traceback.print_exception(exc_type, exc_value, exc_traceback, limit=4, file=sys.stdout) p("Error: %s" % (str(e))) self.remoteConnected = True
def __execParallel(priority, ListOftupleFiles, connObj, msg=None): multiProcessParam = [] multiProcessFiles = '' for tupleFiles in ListOftupleFiles: sqlFiles = tupleFiles[0] locParams = tupleFiles[1] for sqlScript in sqlFiles: multiProcessParam.append( (sqlScript, locParams, connObj, config.LOGS_DEBUG)) multiProcessFiles += "'" + sqlScript + "' ; " if msg: msg.addStateCnt() # single process if priority < 0 or len(multiProcessParam) < 2: p("SINGLE PROCESS: %s" % (str(multiProcessFiles)), "ii") for query in multiProcessParam: _execSql(query) # multiprocess execution else: if len(multiProcessParam) > 1: p( "%s PROCESS RUNING: %s" % (str(len(multiProcessParam)), str(multiProcessFiles)), "ii") # Strat runing all processes proc = multiprocessing.Pool(config.DONG_MAX_PARALLEL_THREADS).map( _execSql, multiProcessParam) p( "FINISH EXECUTING PRIORITY %s, LOADED FILES: %s >>>> " % (str(priority), str(multiProcessFiles)), "i")
def getSql(self, conn, sqlType, **args): self.default = None for c in self.allConn: self.connQuery[c] = None if eSql.RENAME == sqlType: self.setSqlRename(**args) elif eSql.DROP == sqlType: self.setSqlDrop(**args) elif eSql.TRUNCATE == sqlType: self.setSqlTruncate(**args) elif eSql.STRUCTURE == sqlType: self.setSqlTableStructure(**args) elif eSql.MERGE == sqlType: self.setSqlMerge(**args) elif eSql.ISEXISTS == sqlType: self.setSqlIsExists(**args) elif eSql.DELETE == sqlType: self.setSqlDelete(**args) elif eSql.TABLE_COPY_BY_COLUMN == sqlType: self.tblCopyByColumn(**args) elif eSql.INDEX_EXISTS == sqlType: self.setSqlExistingIndexes(**args) elif eSql.INDEX == sqlType: self.setSqlIndex(**args) elif eSql.COLUMN_UPDATE == sqlType: self.setSqlColumnUpdate(**args) elif eSql.COLUMN_DELETE == sqlType: self.setSqlColumnDelete(**args) elif eSql.COLUMN_ADD == sqlType: self.setSqlColumnAdd(**args) elif eSql.CREATE_FROM == sqlType: self.setSqlCreateFrom(**args) elif eSql.ALL_TABLES == sqlType: self.setSqlGetAllTables(**args) else: p("%s COMMAND IS NOT DEFINED !" % (sqlType.upper()), "e") return None if conn not in self.connQuery: p("%s SQL QUERY FOR CONNENTION %s NOT IMPLEMENTED !" % (sqlType.upper(),conn), "e") return None if not self.connQuery[conn]: p("%s SQL QUERY FOR CONNECTION %s USING DEFAULT SQL " % (sqlType.upper(),conn),"ii") return self.default return self.connQuery[conn]