コード例 #1
0
ファイル: CrateDbLoader.py プロジェクト: rcsb/py-rcsb_db
 def __init__(self,
              schemaDefObj,
              ioObj=None,
              dbCon=None,
              workPath=".",
              cleanUp=False,
              warnings="default",
              verbose=True):
     self.__verbose = verbose
     self.__debug = False
     self.__sD = schemaDefObj
     self.__ioObj = ioObj
     #
     self.__dbCon = dbCon
     self.__workingPath = workPath
     self.__pathList = []
     self.__cleanUp = cleanUp
     #
     # self.__sdp = SchemaDefDataPrep(schemaDefAccessObj=schemaDefObj, ioObj=IoAdapter(), verbose=True)
     #
     self.__warningAction = warnings
     #
     self.__fTypeRow = "skip-max-width"
     dtf = DataTransformFactory(schemaDefAccessObj=self.__sD,
                                filterType=self.__fTypeRow)
     self.__sdp = SchemaDefDataPrep(schemaDefAccessObj=self.__sD,
                                    dtObj=dtf,
                                    workPath=self.__workingPath,
                                    verbose=self.__verbose)
コード例 #2
0
ファイル: CockroachDbLoader.py プロジェクト: rcsb/py-rcsb_db
    def __init__(self, schemaDefObj, ioObj=None, dbCon=None, workPath=".", cleanUp=False, warnings="default", verbose=True):
        """Map PDBx/mmCIF instance data to SQL loadable data using external schema definition.

        Args:
            schemaDefObj (object): Description
            ioObj (None, optional): Description
            dbCon (None, optional): Description
            workPath (str, optional): Description
            cleanUp (bool, optional): Description
            warnings (str, optional): Description
            verbose (bool, optional): Description
        """
        self.__verbose = verbose
        self.__debug = False
        self.__sD = schemaDefObj
        self.__ioObj = ioObj
        #
        self.__dbCon = dbCon
        self.__workingPath = workPath
        self.__pathList = []
        self.__cleanUp = cleanUp
        #
        # self.__sdp = SchemaDefDataPrep(schemaDefObj=schemaDefObj, ioObj=IoAdapter(), verbose=True)
        #
        self.__warningAction = warnings
        self.__fTypeRow = "skip-max-width"
        dtf = DataTransformFactory(schemaDefAccessObj=self.__sD, filterType=self.__fTypeRow)
        self.__sdp = SchemaDefDataPrep(schemaDefAccessObj=self.__sD, dtObj=dtf, workPath=self.__workingPath, verbose=self.__verbose)
コード例 #3
0
    def __init__(self, cfgOb, schemaDefObj, cfgSectionName="site_info_configuration", dbCon=None, cachePath=".", workPath=".", cleanUp=False, warnings="default", verbose=True):
        self.__verbose = verbose
        self.__debug = False
        self.__cfgOb = cfgOb
        sectionName = cfgSectionName
        self.__sD = schemaDefObj

        #
        self.__dbCon = dbCon
        self.__cachePath = cachePath
        self.__workPath = workPath
        self.__pathList = []
        self.__cleanUp = cleanUp
        #
        self.__colSep = "&##&\t"
        self.__rowSep = "$##$\n"
        #
        #
        self.__fTypeRow = "skip-max-width"
        self.__fTypeCol = "skip-max-width"
        #
        self.__warningAction = warnings
        dtf = DataTransformFactory(schemaDefAccessObj=self.__sD, filterType=self.__fTypeRow)
        self.__sdp = SchemaDefDataPrep(schemaDefAccessObj=self.__sD, dtObj=dtf, workPath=self.__cachePath, verbose=self.__verbose)
        self.__rpP = RepositoryProvider(cfgOb=self.__cfgOb, cachePath=self.__cachePath)
        #
        schemaName = self.__sD.getName()
        modulePathMap = self.__cfgOb.get("DICT_METHOD_HELPER_MODULE_PATH_MAP", sectionName=sectionName)
        dP = DictionaryApiProviderWrapper(self.__cfgOb, self.__cachePath, useCache=True)
        dictApi = dP.getApiByName(schemaName)
        rP = DictMethodResourceProvider(self.__cfgOb, cachePath=self.__cachePath)
        self.__dmh = DictMethodRunner(dictApi, modulePathMap=modulePathMap, resourceProvider=rP)
コード例 #4
0
    def __simpleSchemaDataPrep(self, contentType, filterType, styleType, mockLength, rejectLength=0, dataSelectors=None, mergeContentTypes=None):
        """Internal method for preparing file-based data NOT requiring dynamic methods, slicing, or key injection.

        Args:
            contentType (str): Content type name
            filterType (str): List of data processing options (separated by '|') (e.g. "drop-empty-attributes|drop-empty-tables|skip-max-width|...)
            styleType (str): organization of output document (e.g. rowise-by-name)
            mockLength (int): Expected length of the test data for the input content type
            rejectLength (int, optional): number of input data sets rejected by the dataselection criteria. Defaults to 0.
            dataSelectors (list of str, optional): data selection criteria. Defaults to None.
            mergeContentTypes (list of str, optional): list content types to merge with the input data set. Defaults to None. (e.g. ['vrpt'])
        """
        try:
            dataSelectors = dataSelectors if dataSelectors else ["PUBLIC_RELEASE"]
            dD = self.__schP.makeSchemaDef(contentType, dataTyping="ANY", saveSchema=True)
            _ = SchemaDefAccess(dD)
            inputPathList = self.__rpP.getLocatorObjList(contentType=contentType, mergeContentTypes=mergeContentTypes)
            sd, _, _, _ = self.__schP.getSchemaInfo(databaseName=contentType, dataTyping="ANY")
            dtf = DataTransformFactory(schemaDefAccessObj=sd, filterType=filterType)
            sdp = SchemaDefDataPrep(schemaDefAccessObj=sd, dtObj=dtf, workPath=self.__cachePath, verbose=self.__verbose)
            #

            logger.debug("For %s mock length %d length of path list %d\n", contentType, mockLength, len(inputPathList))
            self.assertEqual(len(inputPathList), mockLength)
            tableDataDictList, containerNameList, rejectList = sdp.fetchDocuments(inputPathList, styleType=styleType, filterType=filterType, dataSelectors=dataSelectors)
            logger.debug("For %s mock length %d reject length %d length of tddl list %d\n", contentType, mockLength, rejectLength, len(tableDataDictList))
            self.assertEqual(len(tableDataDictList), mockLength - rejectLength)
            self.assertEqual(len(containerNameList), mockLength - rejectLength)

            if rejectList:
                logger.debug("For %s rejecting components %r", contentType, rejectList)
            #
            self.assertEqual(len(rejectList), rejectLength)
            fName = "simple-prep-%s-%s.json" % (contentType, styleType)
            if self.__exportFlag:
                fPath = os.path.join(self.__outputPath, fName)
                self.__mU.doExport(fPath, tableDataDictList, fmt="json", indent=3)
            if self.__diffFlag:
                fPath = os.path.join(self.__savedOutputPath, fName)
                refDocList = self.__mU.doImport(fPath, fmt="json")
                self.assertEqual(len(refDocList), len(tableDataDictList))
                #
                jD = diff(refDocList, tableDataDictList, syntax="explicit", marshal=True)
                if jD:
                    _, fn = os.path.split(fPath)
                    bn, _ = os.path.splitext(fn)
                    fPath = os.path.join(self.__outputPath, bn + "-diff.json")
                    logger.debug("jsondiff for %s %s = \n%s", contentType, styleType, pprint.pformat(jD, indent=3, width=100))
                    self.__mU.doExport(fPath, jD, fmt="json", indent=3)
                self.assertEqual(len(jD), 0)

        except Exception as e:
            logger.exception("Failing with %s", str(e))
            self.fail()
    def __testPrepDocumentsFromContainers(self, inputPathList, databaseName, collectionName, styleType="rowwise_by_name_with_cardinality", mergeContentTypes=None):
        """Test case -  create loadable PDBx data from repository files
        """
        try:

            sd, _, _, _ = self.__schP.getSchemaInfo(databaseName)
            #
            dP = DictionaryApiProviderWrapper(self.__cfgOb, self.__cachePath, useCache=False)
            dictApi = dP.getApiByName(databaseName)
            rP = DictMethodResourceProvider(self.__cfgOb, configName=self.__configName, cachePath=self.__cachePath, siftsAbbreviated="TEST")
            dmh = DictMethodRunner(dictApi, modulePathMap=self.__modulePathMap, resourceProvider=rP)
            #
            dtf = DataTransformFactory(schemaDefAccessObj=sd, filterType=self.__fTypeRow)
            sdp = SchemaDefDataPrep(schemaDefAccessObj=sd, dtObj=dtf, workPath=self.__cachePath, verbose=self.__verbose)
            containerList = self.__rpP.getContainerList(inputPathList)
            for container in containerList:
                cName = container.getName()
                logger.debug("Processing container %s", cName)
                dmh.apply(container)
                if self.__export:
                    savePath = os.path.join(HERE, "test-output", cName + "-with-method.cif")
                    #self.__mU.doExport(savePath, [container], fmt="mmcif")
            #
            tableIdExcludeList = sd.getCollectionExcluded(collectionName)
            tableIdIncludeList = sd.getCollectionSelected(collectionName)
            sliceFilter = sd.getCollectionSliceFilter(collectionName)
            sdp.setSchemaIdExcludeList(tableIdExcludeList)
            sdp.setSchemaIdIncludeList(tableIdIncludeList)
            #
            docList, containerNameList, _ = sdp.processDocuments(
                containerList, styleType=styleType, filterType=self.__fTypeRow, dataSelectors=["PUBLIC_RELEASE"], sliceFilter=sliceFilter, collectionName=collectionName
            )

            docList = sdp.addDocumentPrivateAttributes(docList, collectionName)
            docList = sdp.addDocumentSubCategoryAggregates(docList, collectionName)
            #
            mergeS = "-".join(mergeContentTypes) if mergeContentTypes else ""
            if self.__export and docList:
                # for ii, doc in enumerate(docList[:1]):
                for ii, doc in enumerate(docList):
                    cn = containerNameList[ii]
                    fp = os.path.join(HERE, "test-output", "prep-%s-%s-%s-%s.json" % (cn, databaseName, collectionName, mergeS))
                    self.__mU.doExport(fp, [doc], fmt="json", indent=3)
                    logger.debug("Exported %r", fp)
            #
            return docList, containerNameList

        except Exception as e:
            logger.exception("Failing with %s", str(e))
            self.fail()
コード例 #6
0
ファイル: CockroachDbLoader.py プロジェクト: rcsb/py-rcsb_db
class CockroachDbLoader(object):

    """Map PDBx/mmCIF instance data to SQL loadable data using external schema definition."""

    def __init__(self, schemaDefObj, ioObj=None, dbCon=None, workPath=".", cleanUp=False, warnings="default", verbose=True):
        """Map PDBx/mmCIF instance data to SQL loadable data using external schema definition.

        Args:
            schemaDefObj (object): Description
            ioObj (None, optional): Description
            dbCon (None, optional): Description
            workPath (str, optional): Description
            cleanUp (bool, optional): Description
            warnings (str, optional): Description
            verbose (bool, optional): Description
        """
        self.__verbose = verbose
        self.__debug = False
        self.__sD = schemaDefObj
        self.__ioObj = ioObj
        #
        self.__dbCon = dbCon
        self.__workingPath = workPath
        self.__pathList = []
        self.__cleanUp = cleanUp
        #
        # self.__sdp = SchemaDefDataPrep(schemaDefObj=schemaDefObj, ioObj=IoAdapter(), verbose=True)
        #
        self.__warningAction = warnings
        self.__fTypeRow = "skip-max-width"
        dtf = DataTransformFactory(schemaDefAccessObj=self.__sD, filterType=self.__fTypeRow)
        self.__sdp = SchemaDefDataPrep(schemaDefAccessObj=self.__sD, dtObj=dtf, workPath=self.__workingPath, verbose=self.__verbose)
        #

    def load(self, inputPathList=None, containerList=None, loadType="batch-file", deleteOpt=None, tableIdSkipD=None):
        """Load data for each table defined in the current schema definition object.
        Data are extracted from the input file or container list.

        Data source options:
              inputPathList = [<full path of target input file>, ....]

            or

              containerList = [ data container, ...]


            loadType  =  ['cockroack-insert' | 'cockroach-insert-many']
            deleteOpt = 'selected' | 'all'

            tableIdSkipD - searchable container with tableIds to be skipped on loading -

            Loading is performed using the current database server connection.

            Intermediate data files for 'batch-file' loading are created in the current working path.

            Returns True for success or False otherwise.

        Args:
            inputPathList (list, optional): Description
            containerList (list, optional): Description
            loadType (str, optional): Description
            deleteOpt (None, optional): Description
            tableIdSkipD (None, optional): Description

        Returns:
            TYPE: Description

        """
        tableIdSkipD = tableIdSkipD if tableIdSkipD is not None else {}
        if inputPathList is not None:
            tableDataDict, containerNameList = self.__sdp.fetch(inputPathList)
        elif containerList is not None:
            tableDataDict, containerNameList = self.__sdp.process(containerList)
        #
        #

        if loadType in ["cockroach-insert", "cockroach-insert-many"]:
            sqlMode = "single"
            if loadType in ["cockroach-insert-many"]:
                sqlMode = "many"
            for tableId, rowList in tableDataDict.items():
                if tableId in tableIdSkipD:
                    continue
                if deleteOpt in ["all", "truncate", "selected"] or rowList:
                    self.__cockroachInsertImport(tableId, rowList=rowList, containerNameList=containerNameList, deleteOpt=deleteOpt, sqlMode=sqlMode)
            return True
        else:
            pass

        return False

    def __cockroachInsertImport(self, tableId, rowList=None, containerNameList=None, deleteOpt="selected", sqlMode="many"):
        """Load the input table using sql cockroach templated inserts of the input rowlist of dictionaries (i.e. d[attributeId]=value).

        The containerNameList corresponding to the data within loadable data in rowList can be provided
        if 'selected' deletions are to performed prior to the the batch data inserts.

        deleteOpt = ['selected','all'] where 'selected' deletes rows corresponding to the input container
                    list before insert.   The 'all' options truncates the table prior to insert.

                    Deletions are performed in the absence of loadable data.

        Args:
            tableId (TYPE): Description
            rowList (None, optional): Description
            containerNameList (None, optional): Description
            deleteOpt (str, optional): Description
            sqlMode (str, optional): Description

        Returns:
            TYPE: Description

        """
        startTime = time.time()
        crQ = CockroachDbQuery(dbcon=self.__dbCon, verbose=self.__verbose)
        sqlGen = SqlGenAdmin(self.__verbose)
        #
        databaseName = self.__sD.getVersionedDatabaseName()
        tableDefObj = self.__sD.getTable(tableId)
        tableName = tableDefObj.getName()
        tableAttributeIdList = tableDefObj.getAttributeIdList()
        tableAttributeNameList = tableDefObj.getAttributeNameList()
        #
        sqlDeleteList = None
        if deleteOpt in ["selected", "delete"] and containerNameList is not None:
            deleteAttributeName = tableDefObj.getDeleteAttributeName()
            logger.debug("tableName %s delete attribute %s", tableName, deleteAttributeName)
            sqlDeleteList = sqlGen.deleteFromListSQL(databaseName, tableName, deleteAttributeName, containerNameList, chunkSize=10)
            # logger.debug("Delete SQL for %s : %r" % (tableId, sqlDeleteList))
        elif deleteOpt in ["all", "truncate"]:
            sqlDeleteList = [sqlGen.truncateTableSQL(databaseName, tableName)]
        #
        lenC = len(rowList)
        logger.debug("Deleting from table %s length %d", tableName, lenC)
        crQ.sqlCommandList(sqlDeleteList)
        endTime1 = time.time()
        logger.debug("Deleting succeeds for table %s %d rows at %s (%.3f seconds)", tableName, lenC, time.strftime("%Y %m %d %H:%M:%S", time.localtime()), endTime1 - startTime)
        logger.debug("Delete commands %s", sqlDeleteList)

        if not rowList:
            logger.debug("Skipping insert for table %s length %d", tableName, len(containerNameList))
            return True
        #
        logger.debug("Insert begins for table %s with row length %d", tableName, len(rowList))
        sqlInsertList = []
        tupL = list(zip(tableAttributeIdList, tableAttributeNameList))
        if sqlMode == "many":
            aList = []
            for tId, nm in tupL:
                aList.append(tId)
            #
            vLists = []
            for row in rowList:
                vList = []
                for tId, nm in tupL:
                    if row[tId] and row[tId] != r"\N":
                        vList.append(row[tId])
                    else:
                        vList.append(None)
                vLists.append(vList)
            #
            ret = crQ.sqlTemplateCommandMany(sqlTemplate=sqlGen.idInsertTemplateSQL(databaseName, tableDefObj, aList), valueLists=vLists)
            endTime = time.time()
            if ret:
                logger.debug("Insert succeeds for table %s %d rows at %s (%.3f seconds)", tableName, lenC, time.strftime("%Y %m %d %H:%M:%S", time.localtime()), endTime - endTime1)
            else:
                logger.error("Insert fails for table %s %d rows at %s (%.3f seconds)", tableName, lenC, time.strftime("%Y %m %d %H:%M:%S", time.localtime()), endTime - endTime1)
        else:
            lenT = -1
            lenR = -1
            aList = []
            for tId, nm in tupL:
                aList.append(nm)
            #
            for row in rowList:
                vList = []
                for tId, nm in tupL:
                    if row[tId] is not None and row[tId] != r"\N":
                        vList.append(row[tId])
                    else:
                        vList.append(None)
                sqlInsertList.append((sqlGen.insertTemplateSQL(databaseName, tableName, aList), vList))
            #
            lenT = len(sqlInsertList)
            lenR = crQ.sqlTemplateCommandList(sqlInsertList)
            #
            ret = lenR == lenT
            endTime = time.time()
            if ret:
                logger.debug(
                    "Insert succeeds for table %s %d of %d rows at %s (%.3f seconds)",
                    tableName,
                    lenR,
                    lenT,
                    time.strftime("%Y %m %d %H:%M:%S", time.localtime()),
                    endTime - endTime1,
                )
            else:
                logger.error(
                    "Insert fails for table %s %d of %d rows at %s (%.3f seconds)", tableName, lenR, lenT, time.strftime("%Y %m %d %H:%M:%S", time.localtime()), endTime - endTime1
                )

        return ret
コード例 #7
0
class SchemaDefLoader(object):
    """Map PDBx/mmCIF instance data to SQL loadable data using external schema definition."""
    def __init__(
        self,
        cfgOb,
        schemaDefObj,
        cfgSectionName="site_info_configuration",
        dbCon=None,
        cachePath=".",
        workPath=".",
        cleanUp=False,
        warnings="default",
        verbose=True,
        restoreUseStash=True,
        restoreUseGit=True,
        providerTypeExclude=True,
    ):
        self.__verbose = verbose
        self.__debug = False
        self.__cfgOb = cfgOb
        sectionName = cfgSectionName
        self.__sD = schemaDefObj

        #
        self.__dbCon = dbCon
        self.__cachePath = cachePath
        self.__workPath = workPath
        self.__pathList = []
        self.__cleanUp = cleanUp
        #
        self.__colSep = "&##&\t"
        self.__rowSep = "$##$\n"
        #
        #
        self.__fTypeRow = "skip-max-width"
        self.__fTypeCol = "skip-max-width"
        #
        self.__warningAction = warnings
        dtf = DataTransformFactory(schemaDefAccessObj=self.__sD,
                                   filterType=self.__fTypeRow)
        self.__sdp = SchemaDefDataPrep(schemaDefAccessObj=self.__sD,
                                       dtObj=dtf,
                                       workPath=self.__cachePath,
                                       verbose=self.__verbose)
        self.__rpP = RepositoryProvider(cfgOb=self.__cfgOb,
                                        cachePath=self.__cachePath)
        #
        schemaName = self.__sD.getName()
        modulePathMap = self.__cfgOb.get("DICT_METHOD_HELPER_MODULE_PATH_MAP",
                                         sectionName=sectionName)
        dP = DictionaryApiProviderWrapper(self.__cachePath,
                                          cfgOb=self.__cfgOb,
                                          configName=sectionName,
                                          useCache=True)
        dictApi = dP.getApiByName(schemaName)
        rP = DictMethodResourceProvider(
            self.__cfgOb,
            cachePath=self.__cachePath,
            restoreUseStash=restoreUseStash,
            restoreUseGit=restoreUseGit,
            providerTypeExclude=providerTypeExclude)
        self.__dmh = DictMethodRunner(dictApi,
                                      modulePathMap=modulePathMap,
                                      resourceProvider=rP)

    def setWarning(self, action):
        if action in ["error", "ignore", "default"]:
            self.__warningAction = action
            return True
        else:
            self.__warningAction = "default"
            return False

    def setDelimiters(self, colSep=None, rowSep=None):
        """Set column and row delimiters for intermediate data files used for
        batch-file loading operations.
        """
        self.__colSep = colSep if colSep is not None else "&##&\t"
        self.__rowSep = rowSep if rowSep is not None else "$##$\n"
        return True

    def load(self,
             inputPathList=None,
             containerList=None,
             loadType="batch-file",
             deleteOpt=None,
             tableIdSkipD=None):
        """Load data for each table defined in the current schema definition object.
        Data are extracted from the input file list.

        Data source options:

          inputPathList = [<full path of target input file>, ....]

        or

          containerList = [ data container, ...]


        loadType  =  ['batch-file' | 'batch-insert']
        deleteOpt = 'selected' | 'all'

        tableIdSkipD - searchable container with tableIds to be skipped on loading -

        Loading is performed using the current database server connection.

        Intermediate data files for 'batch-file' loading are created in the current working path.

        Returns True for success or False otherwise.

        """
        tableIdSkipD = tableIdSkipD if tableIdSkipD is not None else {}
        if inputPathList is not None:
            cL = self.__rpP.getContainerList(inputPathList)
            #
            # Apply dynamic methods here -
            #
            for cA in cL:
                self.__dmh.apply(cA)
            tableDataDict, containerNameList = self.__sdp.process(cL)

        elif containerList is not None:
            tableDataDict, containerNameList = self.__sdp.process(
                containerList)
        #
        #
        if loadType in ["batch-file", "batch-file-append"]:
            append = True if loadType == "batch-file-append" else False
            exportList = self.__exportTdd(tableDataDict,
                                          colSep=self.__colSep,
                                          rowSep=self.__rowSep,
                                          append=append)
            for tableId, loadPath in exportList:
                if tableId in tableIdSkipD:
                    continue
                self.__batchFileImport(tableId,
                                       loadPath,
                                       sqlFilePath=None,
                                       containerNameList=containerNameList,
                                       deleteOpt=deleteOpt)
                if self.__cleanUp:
                    self.__cleanUpFile(loadPath)
            return True
        elif loadType == "batch-insert":
            for tableId, rowList in tableDataDict.items():
                if tableId in tableIdSkipD:
                    continue
                if deleteOpt in ["all", "selected"] or rowList:
                    self.__batchInsertImport(
                        tableId,
                        rowList=rowList,
                        containerNameList=containerNameList,
                        deleteOpt=deleteOpt)
            return True
        else:
            pass

        return False

    def __cleanUpFile(self, filePath):
        try:
            os.remove(filePath)
        except Exception:
            pass

    def makeLoadFilesMulti(self, dataList, procName, optionsD, workingDir):
        """Create a loadable data file for each table defined in the current schema
        definition object.   Data is extracted from the input file list.

        Load files are creating in the current working path.

        Return the containerNames for the input path list, and path list for load files that are created.

        """
        _ = workingDir
        try:
            pn = procName.split("-")[-1]
        except Exception:
            pn = procName

        exportFormat = optionsD[
            "exportFormat"] if "exportFormat" in optionsD else "tdd"
        r1, r2 = self.makeLoadFiles(inputPathList=dataList,
                                    partName=pn,
                                    exportFormat=exportFormat)
        return dataList, r1, r2, []

    def makeLoadFiles(self,
                      inputPathList,
                      append=False,
                      partName="1",
                      exportFormat="tdd"):
        """Create a loadable data file for each table defined in the current schema
        definition object.   Data is extracted from the input file list.

        Load files are created in the current working path.

        Return the containerNames for the input path list, and path list for load files that are created.

        """
        cL = self.__rpP.getContainerList(inputPathList)
        for cA in cL:
            self.__dmh.apply(cA)
        tableDataDict, containerNameList = self.__sdp.process(cL)
        if exportFormat == "tdd":
            return containerNameList, self.__exportTdd(tableDataDict,
                                                       colSep=self.__colSep,
                                                       rowSep=self.__rowSep,
                                                       append=append,
                                                       partName=partName)
        elif exportFormat == "csv":
            return containerNameList, self.__exportCsv(tableDataDict,
                                                       append=append,
                                                       partName=partName)
        else:
            return [], []

    def __exportCsv(self, tableDict, append=False, partName="1"):
        """ """
        modeOpt = "a" if append else "w"

        exportList = []
        for tableId, rowList in tableDict.items():
            if not rowList:
                continue
            tObj = self.__sD.getSchemaObject(tableId)
            schemaAttributeIdList = tObj.getAttributeIdList()
            attributeNameList = tObj.getAttributeNameList()
            #
            fn = os.path.join(self.__workPath,
                              tableId + "-" + partName + ".csv")
            with open(fn, modeOpt, newline="", encoding="utf-8") as ofh:
                csvWriter = csv.writer(ofh)
                csvWriter.writerow(attributeNameList)
                for rD in rowList:
                    csvWriter.writerow(
                        [rD[aId] for aId in schemaAttributeIdList])

            exportList.append((tableId, fn))
        return exportList

    def __exportTdd(self,
                    tableDict,
                    colSep="&##&\t",
                    rowSep="$##$\n",
                    append=False,
                    partName="1"):
        modeOpt = "a" if append else "w"

        exportList = []
        for tableId, rowList in tableDict.items():
            tObj = self.__sD.getSchemaObject(tableId)
            schemaAttributeIdList = tObj.getAttributeIdList()
            #
            if rowList:
                fn = os.path.join(self.__workPath,
                                  tableId + "-" + partName + ".tdd")
                ofh = open(fn, modeOpt, encoding="utf-8")
                for rD in rowList:
                    # logger.info("%r" % colSep.join([str(rD[aId]) for aId in schemaAttributeIdList]))
                    ofh.write("%s%s" % (colSep.join(
                        [str(rD[aId])
                         for aId in schemaAttributeIdList]), rowSep))
                ofh.close()
                exportList.append((tableId, fn))
        return exportList

    def loadBatchFiles(self,
                       loadList=None,
                       containerNameList=None,
                       deleteOpt=None):
        """Load data for each table defined in the current schema definition object using

        Data source options:

          loadList = [(tableId, <full path of load file), ....]
          containerNameList = [ data namecontainer, ...]

        deleteOpt = 'selected' | 'all','truncate'

        Loading is performed using the current database server connection.

        Returns True for success or False otherwise.

        """
        #
        startTime = time.time()
        for tableId, loadPath in loadList:
            ok = self.__batchFileImport(tableId,
                                        loadPath,
                                        sqlFilePath=None,
                                        containerNameList=containerNameList,
                                        deleteOpt=deleteOpt)
            if not ok:
                break
            if self.__cleanUp:
                self.__cleanUpFile(loadPath)
        #
        endTime = time.time()
        logger.debug("Completed with status %r at %s (%.3f seconds)\n", ok,
                     time.strftime("%Y %m %d %H:%M:%S", time.localtime()),
                     endTime - startTime)
        return ok

    def delete(self, tableId, containerNameList=None, deleteOpt="all"):
        #
        startTime = time.time()
        sqlCommandList = self.__getSqlDeleteList(
            tableId, containerNameList=containerNameList, deleteOpt=deleteOpt)

        myQ = MyDbQuery(dbcon=self.__dbCon, verbose=self.__verbose)
        myQ.setWarning(self.__warningAction)
        ret = myQ.sqlCommand(sqlCommandList=sqlCommandList)
        #
        #
        endTime = time.time()

        logger.debug("Delete table %s server returns %r\n", tableId, ret)
        logger.debug("Completed at %s (%.3f seconds)\n",
                     time.strftime("%Y %m %d %H:%M:%S", time.localtime()),
                     endTime - startTime)
        return ret

    def __getSqlDeleteList(self,
                           tableId,
                           containerNameList=None,
                           deleteOpt="all"):
        """Return the SQL delete commands for the input table and container name list."""
        databaseName = self.__sD.getDatabaseName()
        sqlGen = SqlGenAdmin(self.__verbose)

        databaseName = self.__sD.getDatabaseName()
        tableDefObj = self.__sD.getSchemaObject(tableId)
        tableName = tableDefObj.getName()

        sqlDeleteList = []
        if deleteOpt in ["selected", "delete"
                         ] and containerNameList is not None:
            deleteAttributeName = tableDefObj.getDeleteAttributeName()
            sqlDeleteList = sqlGen.deleteFromListSQL(databaseName,
                                                     tableName,
                                                     deleteAttributeName,
                                                     containerNameList,
                                                     chunkSize=50)
        elif deleteOpt in ["all", "truncate"]:
            sqlDeleteList = [sqlGen.truncateTableSQL(databaseName, tableName)]

        logger.debug("Delete SQL for %s : %r\n", tableId, sqlDeleteList)
        return sqlDeleteList

    def __batchFileImport(self,
                          tableId,
                          tableLoadPath,
                          sqlFilePath=None,
                          containerNameList=None,
                          deleteOpt="all"):
        """Batch load the input table using data in the input loadable data file.

        if sqlFilePath is provided then any generated SQL commands are preserved in this file.

        deleteOpt None|'selected'| 'all' or 'truncate'
        """
        startTime = time.time()
        databaseName = self.__sD.getDatabaseName()
        sqlGen = SqlGenAdmin(self.__verbose)

        databaseName = self.__sD.getDatabaseName()
        tableDefObj = self.__sD.getSchemaObject(tableId)
        # tableName = tableDefObj.getName()

        #
        if deleteOpt:
            sqlCommandList = self.__getSqlDeleteList(
                tableId,
                containerNameList=containerNameList,
                deleteOpt=deleteOpt)
        else:
            sqlCommandList = []

        if os.access(tableLoadPath, os.R_OK):
            tableDefObj = self.__sD.getSchemaObject(tableId)

            sqlCommandList.append(
                sqlGen.importTable(databaseName,
                                   tableDefObj,
                                   importPath=tableLoadPath))

            if self.__verbose:
                logger.debug("SQL import command\n%s\n", sqlCommandList)
            #

        if sqlFilePath is not None:
            try:
                with open(sqlFilePath, "w", encoding="utf-8") as ofh:
                    ofh.write("%s" % "\n".join(sqlCommandList))
            except Exception:
                pass
        #
        myQ = MyDbQuery(dbcon=self.__dbCon, verbose=self.__verbose)
        myQ.setWarning(self.__warningAction)
        ret = myQ.sqlCommand(sqlCommandList=sqlCommandList)
        #
        #
        endTime = time.time()
        logger.debug("Table %s server returns %r\n", tableId, ret)
        logger.debug("Completed at %s (%.3f seconds)\n",
                     time.strftime("%Y %m %d %H:%M:%S", time.localtime()),
                     endTime - startTime)
        return ret

    def loadBatchData(self,
                      tableId,
                      rowList=None,
                      containerNameList=None,
                      deleteOpt="selected"):
        return self.__batchInsertImport(tableId,
                                        rowList=rowList,
                                        containerNameList=containerNameList,
                                        deleteOpt=deleteOpt)

    def __batchInsertImport(self,
                            tableId,
                            rowList=None,
                            containerNameList=None,
                            deleteOpt="selected"):
        """Load the input table using batch inserts of the input list of dictionaries (i.e. d[attributeId]=value).

        The containerNameList corresponding to the data within loadable data in rowList can be provided
        if 'selected' deletions are to performed prior to the the batch data inserts.

        deleteOpt = ['selected','all'] where 'selected' deletes rows corresponding to the input container
                    list before insert.   The 'all' options truncates the table prior to insert.

                    Deletions are performed in the absence of loadable data.

        """
        startTime = time.time()

        myQ = MyDbQuery(dbcon=self.__dbCon, verbose=self.__verbose)
        myQ.setWarning(self.__warningAction)
        sqlGen = SqlGenAdmin(self.__verbose)
        #
        databaseName = self.__sD.getDatabaseName()
        tableDefObj = self.__sD.getSchemaObject(tableId)
        tableName = tableDefObj.getName()
        tableAttributeIdList = tableDefObj.getAttributeIdList()
        tableAttributeNameList = tableDefObj.getAttributeNameList()
        #
        sqlDeleteList = None
        if deleteOpt in ["selected", "delete"
                         ] and containerNameList is not None:
            deleteAttributeName = tableDefObj.getDeleteAttributeName()
            sqlDeleteList = sqlGen.deleteFromListSQL(databaseName,
                                                     tableName,
                                                     deleteAttributeName,
                                                     containerNameList,
                                                     chunkSize=10)
            if self.__verbose:
                logger.debug("Delete SQL for %s : %r\n", tableId,
                             sqlDeleteList)
        elif deleteOpt in ["all", "truncate"]:
            sqlDeleteList = [sqlGen.truncateTableSQL(databaseName, tableName)]

        sqlInsertList = []
        for row in rowList:
            vList = []
            aList = []
            for tid, nm in zip(tableAttributeIdList, tableAttributeNameList):
                # if len(row[id]) > 0 and row[id] != r'\N':
                if row[tid] is not None and row[tid] != r"\N":
                    vList.append(row[tid])
                    aList.append(nm)
            sqlInsertList.append(
                (sqlGen.insertTemplateSQL(databaseName, tableName,
                                          aList), vList))

        ret = myQ.sqlBatchTemplateCommand(sqlInsertList,
                                          prependSqlList=sqlDeleteList)
        if ret:
            logger.debug("Batch insert completed for table %s rows %d\n",
                         tableName, len(sqlInsertList))
        else:
            logger.error("Batch insert fails for table %s length %d\n",
                         tableName, len(sqlInsertList))

        endTime = time.time()
        if self.__verbose:
            logger.debug("Completed at %s (%.3f seconds)\n",
                         time.strftime("%Y %m %d %H:%M:%S", time.localtime()),
                         endTime - startTime)

        return ret

    def __deleteFromTable(self, tableIdList, deleteValue):
        """Delete data from the input table list where the schema table delete attribute
        has the input value "deleteValue".

        """
        databaseName = self.__sD.getDatabaseName()
        sqlList = []
        sqlGen = SqlGenAdmin(self.__verbose)
        for tableId in tableIdList:
            tableName = self.__sD.getSchemaName(tableId)
            tableDefObj = self.__sD.getSchemaObject(tableId)
            atName = tableDefObj.getDeleteAttributeName()
            sqlTemp = sqlGen.deleteTemplateSQL(databaseName, tableName,
                                               [atName])
            sqlList.append(sqlTemp % deleteValue)
        #
        return sqlList
コード例 #8
0
    def __fullSchemaDataPrep(self,
                             contentType,
                             filterType,
                             styleType,
                             mockLength,
                             rejectLength=0,
                             dataSelectors=None,
                             mergeContentTypes=None,
                             excludeExtras=None):
        """Internal method for preparing file-based data requiring dynamic methods, slicing, or key injection.

        Args:
            contentType (str): Content type name
            filterType (str): List of data processing options (separated by '|') (e.g. "drop-empty-attributes|drop-empty-tables|skip-max-width|...)
            styleType (str): organization of output document (e.g. rowise-by-name)
            mockLength (int): Expected length of the test data for the input content type
            rejectLength (int, optional): number of input data sets rejected by the dataselection criteria. Defaults to 0.
            dataSelectors (list of str, optional): data selection criteria. Defaults to None.
            mergeContentTypes (list of str, optional): list content types to merge with the input data set. Defaults to None. (e.g. ['vrpt'])
        """
        try:
            excludeExtras = excludeExtras if excludeExtras else []
            _ = mockLength
            _ = rejectLength
            dD = self.__schP.makeSchemaDef(contentType,
                                           dataTyping="ANY",
                                           saveSchema=True)
            _ = SchemaDefAccess(dD)
            inputPathList = self.__rpP.getLocatorObjList(
                contentType=contentType, mergeContentTypes=mergeContentTypes)
            sd, _, collectionNameList, _ = self.__schP.getSchemaInfo(
                databaseName=contentType, dataTyping="ANY")
            #
            dP = DictionaryApiProviderWrapper(self.__cachePath,
                                              cfgOb=self.__cfgOb,
                                              configName=self.__configName,
                                              useCache=True)
            dictApi = dP.getApiByName(contentType)
            #
            rP = DictMethodResourceProvider(
                self.__cfgOb,
                configName=self.__configName,
                cachePath=self.__cachePath,
                restoreUseStash=False,
                restoreUseGit=True,
                providerTypeExclude=self.__excludeType,
            )
            dmh = DictMethodRunner(dictApi,
                                   modulePathMap=self.__modulePathMap,
                                   resourceProvider=rP)
            #
            dtf = DataTransformFactory(schemaDefAccessObj=sd,
                                       filterType=filterType)
            sdp = SchemaDefDataPrep(schemaDefAccessObj=sd,
                                    dtObj=dtf,
                                    workPath=self.__cachePath,
                                    verbose=self.__verbose)
            containerList = self.__rpP.getContainerList(inputPathList)
            for container in containerList:
                cName = container.getName()
                logger.debug("Processing container %s", cName)
                dmh.apply(container)
            #
            for collectionName in collectionNameList:
                tableIdExcludeList = sd.getCollectionExcluded(collectionName)
                tableIdIncludeList = sd.getCollectionSelected(collectionName)
                sliceFilter = sd.getCollectionSliceFilter(collectionName)
                sdp.setSchemaIdExcludeList(tableIdExcludeList)
                sdp.setSchemaIdIncludeList(tableIdIncludeList)
                #
                docList, _, _ = sdp.processDocuments(
                    containerList,
                    styleType=styleType,
                    sliceFilter=sliceFilter,
                    filterType=filterType,
                    dataSelectors=dataSelectors,
                    collectionName=collectionName)

                docList = sdp.addDocumentPrivateAttributes(
                    docList, collectionName)
                docList = sdp.addDocumentSubCategoryAggregates(
                    docList, collectionName)

                # Special exclusions for the test harness. (removes timestamped data items to allow diffs.)
                self.__filterDocuments(docList, excludeExtras)
                mergeS = "-".join(
                    mergeContentTypes) if mergeContentTypes else ""
                fName = "full-prep-%s-%s-%s-%s.json" % (
                    contentType, collectionName, mergeS, styleType)
                if self.__exportFlag:
                    self.__logDocumentOrder(docList)
                    fPath = os.path.join(self.__outputPath, fName)
                    self.__mU.doExport(fPath, docList, fmt="json", indent=3)
                    logger.debug("Exported %r", fPath)
                #
                if self.__diffFlag:
                    fPath = os.path.join(self.__savedOutputPath, fName)
                    refDocList = self.__mU.doImport(fPath, fmt="json")
                    self.assertEqual(len(refDocList), len(docList))
                    logger.debug("For %s %s len refDocList %d", contentType,
                                 collectionName, len(refDocList))
                    logger.debug("For %s %s len docList %d", contentType,
                                 collectionName, len(docList))
                    jD = diff(refDocList,
                              docList,
                              syntax="explicit",
                              marshal=True)
                    if jD:
                        _, fn = os.path.split(fPath)
                        bn, _ = os.path.splitext(fn)
                        fPath = os.path.join(self.__outputPath,
                                             bn + "-diff.json")
                        logger.debug("jsondiff for %s %s = \n%s", contentType,
                                     collectionName,
                                     pprint.pformat(jD, indent=3, width=100))
                        self.__mU.doExport(fPath, jD, fmt="json", indent=3)
                    self.assertEqual(len(jD), 0)

        except Exception as e:
            logger.exception("Failing with %s", str(e))
            self.fail()