class FelixParams(ExternalParams): format = 'Felix' def __init__(self, file, **kw): self.dataFile = file self.head = 4 * 4096 ExternalParams.__init__(self, **kw) # ExternalParams requires this to be defined def parseFile(self): try: fp = open(self.dataFile, 'rb') except IOError, e: raise ApiError(str(e)) s = fp.read(self.head) if (len(s) < self.head): raise ApiError( 'file shorter than expected length (%d bytes)of header (never mind data)' % self.head) fp.close() x = array.array('i') y = array.array('f') x.fromstring(s) y.fromstring(s) matrix_type = x[1] if (matrix_type != 1): self.swap = True x.byteswap() y.byteswap() if (x[1] != 1): raise ApiError( 'normal byte ordered matrix type (word 1) = %d and swapped byte ordered matrix type = %d, one of these should be 1' % (matrix_type, x[1])) ndim = self.ndim = x[0] self.initDims() for i in range(ndim): self.npts[i] = x[20 + 1 * ndim + i] self.block[i] = x[20 + 4 * ndim + i] self.sf[i] = y[20 + 6 * ndim + i] self.sw[i] = y[20 + 7 * ndim + i] self.refpt[i] = y[20 + 8 * ndim + i] self.refppm[i] = y[20 + 9 * ndim + i] / self.sf[i] nuc = '' j = 0 while ((j < 8) and x[220 + 8 * i + j]): nuc = nuc + chr(x[220 + 8 * i + j]) j = j + 1 self.nuc[i] = self.standardNucleusName(nuc)
def getDataPlane(dataSource): """ get plane of data. Currently returns acquisition dimension as fastest varying Currently always returns entire plane """ from numpy import array # data type in file - may vary between formats btype = 'i' dataStore = dataSource.dataStore # tests for not-yet-implemented: # may be handled in the future, using new optional function parameters if dataSource.numDim != 2: # Assumes 2D raise ApiError('%s: num dims %s not supported' % (dataSource, dataSource.numDim)) if dataStore.blockSizes != dataStore.numPoints: # assumes unblocked data raise ApiError('%s: file is blocked. numPoints:%s, blockSizes:%s' % (dataSource, dataStore.numPoints, dataStore.blockSizes)) if True in dataStore.isComplex: # assumes all-real data raise ApiError('%s: data have complex dimension - isComplex:' % (dataSource, dataStore.isComplex)) # active code starts # check number of points points1 = dataStore.numPoints points2 = tuple(x.numPoints for x in dataSource.sortedDataDims()) if points1 != points2: raise ApiError( "Number of points differ between file (%s) and spectrum (%s)" % (points1, points2)) # get and check file filePath = getDataSourceFileName(dataSource) if filePath is None: raise ApiError('No data file found for %s' % dataSource) # read data from file buf = open(filePath).read() nPoints = len(buf) / struct.calcsize(btype) data = array(struct.unpack(str(nPoints) + btype, buf)) # convert data to array data = data.reshape(points1[1], points1[0]) # return data
def setSampledDim(self, dim, values): if dim < 0 or dim >= self.ndim: raise ApiError('dim = %d, must be between 0 and %d' % (dim, self.ndim-1)) if len(values) != self.npts[dim]: raise ApiError('number of values = %d, must be %d' % (len(values), self.npts[dim])) self.dimType[dim] = self.sampledDimType self.pointValues[dim] = values self.nuc[dim] = None
def fullSanityCheck(self): headerLength = 2048 rawDimLengths = [] for i in range(0,self.ndim): realDimBytes = self.npts[i] blocks = ceil(realDimBytes/float(self.block[i])) rawDimBytes = blocks*self.block[i] rawDimLengths.append(rawDimBytes) if __debug__: print i,self.npts[i],self.block[i],realDimBytes,rawDimBytes,blocks dataSize = 1 for rawDimLength in rawDimLengths: dataSize *= rawDimLength expectedSize = (dataSize *4) + headerLength if __debug__: print 'expected bytes',expectedSize fileSize = stat(self.dataFile).st_size if fileSize < expectedSize: raise ApiError('nmrview file %s\n is not the correct size (%d bytes; expected %d bytes)' % (self.dataFile,fileSize,expectedSize))
def renameProject(project, newProjectName): """ Rename project. """ # change project name if newProjectName == project.name: return else: print '### renaming', project.name, newProjectName project.override = True # TBD: for now name is frozen so change this way try: # below constraint is not checked in setName() if override is True so repeat here isValid = newProjectName.isalnum( ) # superfluous but faster in most cases if not isValid: for cc in newProjectName: if cc != '_' and not cc.isalnum(): isValid = False break else: isValid = True if (not (isValid)): raise ApiError( 'project name must only have characters that are alphanumeric or underscore' ) # below checks for length of name as well project.name = newProjectName finally: project.override = False
def __init__(self, oldVersionStr, newRoot=None, oldRoot=None, newVersionStr=None, oldTags=None): """old and new root, old and new versionStr are pre and post upgrade memopsRoot and version string, respectively. globalMapping is the top level I/O map to use - normally not passed but set here. oldTags are the names of the links from oldRoot to all package topObjects, in import order. """ self.oldRoot = oldRoot self.newRoot = newRoot self.oldVersionStr = oldVersionStr if newVersionStr is None: newVersionStr = currentVersionStr elif newVersionStr != currentVersionStr: raise ApiError( " DataUpgrader to non-current version %s not implemented" % newVersionStr) self.newVersionStr = newVersionStr self.oldTags = oldTags # Version-specific compatibility code module. self.compatibilityModule = getCompatibilityModule(oldVersionStr)
def parseFile(self): try: procparParams = parseProcparFile(self.procpar_file) dataFileParams = readDataFileHeader(self.data_file) except IOError, e: raise ApiError(str(e))
def setNmrExpPrototypeLink(obj, tag, topObjByGuid, delayDataDict, linkmapper): """ redirect certain NmrExpPrototype links to other experiments """ doGet = delayDataDict.get objDataDict = doGet(obj) inDataList = objDataDict.get(tag) if inDataList: keyList = inDataList[0] linkmapper(keyList) guid = keyList[0] # set link oo = topObjByGuid.get(guid) clazz = keyList[-1]['class'] if (oo is None): # NB naughty - _packageName is a private attribute. # But getPackageName is not static obj.root.refreshTopObjects(clazz._packageName) try: oo = topObjByGuid[guid] except: raise ApiError("""%s.%s: NmrExpPrototype with guid %s not found or loaded""" % (clazz.__name__, tag, guid)) obj.__dict__[tag] = clazz.getByKey(oo, keyList[1:-1]) del objDataDict[tag]
def parseFile(self): try: procParData = AzaraProcessingParsFile(self.parFile) procParData.read() except IOError, e: raise ApiError(str(e))
def getProjectionData(dataSources): """get list of shape names excluding acquisition dimension and spectrum,shapename scalingFactor matrix """ allShapeNames = getIndirectShapeNames(dataSources) shapeNameSet = set() scalingFacs = [] for dataSource in dataSources: dd = {} scalingFacs.append(dd) for dataDim in dataSource.sortedDataDims()[1:]: for dsc in projectionDimScalings(dataDim): sname = dsc.expDimRef.displayName if sname in dd: raise ApiError("%s ExpDimRef has duplicate displayName" % dsc) shapeNameSet.add(sname) dd[sname] = dsc.scalingFactors[0] # create defs matrix. NBNB currently must be int - may change later? # first get shapeNames in actual use in predetermined order shapeNames = [x for x in allShapeNames if x in shapeNameSet] defsMatrix = [] nShapes = len(shapeNames) for dd in scalingFacs: ll = [int(dd.get(sname,0.0)) for sname in shapeNames] defsMatrix.append(ll) # return shapeNames, defsMatrix
def checkParFileAxisIndex(self, parFile, lineIndex, fields): axis = atoi(fields[1]) if axis > self.ndim or axis < 1: badAxisError = 'line %d in nmrview .par %s, has a bad axis index: %d. Permissible axis indices are 1 to %d (line: %s)\n' raise ApiError( badAxisError % (lineIndex + 1, parFile, axis, self.ndim, ' '.join(fields)))
def save(repositoryPath, topObject, mapping=None, comment=None, simplified=True, compact=True, expanded=False): fileLocation = xmlUtil.findTopObjectPath(repositoryPath, topObject) if os.path.exists(fileLocation): # File that fits guid already exists - overwrite it pass else: # file does not exist - make sure the directories are there. dirName = os.path.dirname(fileLocation) if not os.path.exists(dirName): os.makedirs(dirName) elif not os.path.isdir(dirName): raise ApiError("%s exists and is not a directory" % dirName) stream = open(fileLocation, 'w') try: saveToStream(stream, topObject, mapping, comment, simplified, compact) finally: stream.close()
def fixImplementation(topObj, delayDataDict): """ Add generalData repository, packageLocator for AnalysisProfile with repositories link, and set NB when this is called PAckageLocators and Repositories have already had their child links and crosslinks set. """ from memops.universal import Io as uniIo from memops.api.Implementation import Url import os.path emptyDict = {} emptyList = [] doGet = delayDataDict.get urlPath = uniIo.normalisePath(os.path.expanduser('~/.ccpn/data')) repositories = doGet(topObj).get('repositories') for refData in repositories: if refData.name == 'refData': break else: raise ApiError("refData repository not found") genData = topObj.newRepository(name='generalData', url=Url(path=urlPath)) topObj.__dict__['repositories']['generalData'] = genData profileLocator = topObj.newPackageLocator(targetName='ccpnmr.AnalysisProfile', repositories=(genData, refData)) topObj.__dict__['packageLocators']['ccpnmr.AnalysisProfile'] = profileLocator
def parseFile(self): # read file and get Decomposition node : try: elementTree = ET.parse(self.parFile) except IOError, e: raise ApiError(str(e))
def parseFile(self): self.nvParToNvNvFile() try: fp = open(self.dataFile, 'rb') except IOError, e: raise ApiError('nmrview file %s is not acessible' % self.dataFile + str(e))
def parseFile(self): try: help = BrukerParHelp('help') procParData = BrukerProcessingParData(self.procs_file, help.tags) procParData.get() except IOError, e: raise ApiError(str(e))
def setDataSourceFileName(dataSource, fileName): dataStore = dataSource.dataStore if dataStore is None: raise ApiError('dataStore is None') setDataStoreFileName(dataStore, fileName)
def makeNewParams(self): try: self.init() self.parseFile() self.checkValid() except ApiError, e: raise ApiError('%s: make sure you selected correct file for %s format' % (e.error_msg, self.format))
def getLinearChemCompData(project, molType, ccpCode, linking): """Descrn: Implementation function, specific for makeLinearSequence() Inputs: Project object, and desired molType, ccpCode, linking (all strings) Output: (dd,ll) tuple where dd is a dictionary for passing to the MolResidue crreation (as **dd), and ll is a list of the linkCodes that are different from 'next' and 'prev' """ chemComp = getChemComp(project, molType, ccpCode) if chemComp is None: raise ApiError("No chemComp for %s residue %s" % (molType, ccpCode)) chemCompVar = chemComp.findFirstChemCompVar(linking=linking, isDefaultVar=True) or \ chemComp.findFirstChemCompVar(linking=linking) # Note requiring a default var is too strict - not always set for # imports from mol2/PDB etc if chemCompVar is None: raise ApiError("No ChemCompVar found for %s:%s linking %s" % (molType, ccpCode, linking)) molResData = {'chemComp':chemComp, 'linking':linking, 'descriptor':chemCompVar.descriptor} seqLinks = [] otherLinkCodes = [] for linkEnd in chemCompVar.linkEnds: code = linkEnd.linkCode if code in ('next','prev'): seqLinks.append(code) else: otherLinkCodes.append(code) if linking == 'start': if seqLinks != ['next']: raise ApiError("Linking 'start' must have just 'next' linkEnd") elif linking == 'end': if seqLinks != ['prev']: raise ApiError("Linking 'end' must have just 'prev' linkEnd ") elif linking != 'middle' or seqLinks not in (['next','prev'],['prev','next']): raise ApiError("Illegal linking %s with seqLinks %s" % (linking,seqLinks)) return (molResData, otherLinkCodes)
def checkMagic(self,bytes): numberMagicBytes = len(self.magicBytes) inMagicBytes = list(["%02X" % ord(byte) for byte in bytes[0:numberMagicBytes]]) inMagicBytes.sort() if inMagicBytes != self.magicBytes: inMagicBytesString = ', '.join(inMagicBytes) magicBytesString = ', '.join(self.magicBytes) raise ApiError('nmrview file %s\n does not start with the expected magic bytes (%s; in any order) got %s instead' % (self.dataFile,magicBytesString,inMagicBytesString))
def fixExoLinkMap(newGuid, action, prefix, globalMapping, typeName): """ Fix exoLink maps if action == 'rename' enter newGuid map under (prefix, typeName) name if action == 'skip', enter skip record for guid under (prefix, typeName) name """ from memops.format.xml import XmlGen exoTag = XmlGen.xmlTag(prefix, typeName, var='exo') # set up exolinks = globalMapping[prefix]['exolinks'] loadMaps = globalMapping['loadMaps'] # check if slots are occupied if typeName in exolinks: raise ApiError("%s exolink: name %s already in use" % (exoTag, typeName)) if exoTag in loadMaps: raise ApiError("%s exolink: name already in use" % exoTag) if action == 'skip': # we are not replacing anything, but skippping newMap = { 'type': 'exo', 'guid': newGuid, 'eType': 'cplx', 'proc': 'skip' } else: # replacing an existing map newClassMap = globalMapping['mapsByGuid'].get(newGuid) newExoTag = XmlGen.xmlTag(prefix, newClassMap['class'].__name__, var='exo') #newMap = globalMapping['mapsByGuid'].get(newGuid) newMap = loadMaps[newExoTag] del exolinks[newMap['class'].__name__] #del loadMaps[newMap['tag']] # set new map exolinks[typeName] = newMap loadMaps[exoTag] = newMap
class AzaraParams(ExternalParams): format = 'Azara' def __init__(self, parFile, externalParams=None): self.parFile = parFile self.dim = -1 ExternalParams.__init__(self, externalParams) # ExternalParams requires this to be defined def parseFile(self): try: fp = open(self.parFile) except IOError, e: raise ApiError(str(e)) lines = fp.readlines() fp.close() lineNo = 0 for line in lines: lineNo = lineNo + 1 n = line.find('!') if (n >= 0): line = line[:n] line = line.strip() if (line): fields = line.split() try: self.parseFields(fields) except ApiError, e: raise ApiError('line number %d: %s' % (lineNo, e.error_msg)) except Exception, e: raise ApiError('line number %d: %s' % (lineNo, e))
def getCompatibilityModule(fromVersion): """ import relevant compatibility module """ compModules = ['Major', 'Minor', 'General', 'MapInfo'] from memops.general.Version import cmpVersionStrings newVersion = currentVersionStr compar = cmpVersionStrings(fromVersion, newVersion) if compar < 0: # fromVersion is older srcDir = 'upgrade' elif compar > 0: # fromVersion is newer srcDir = 'downgrade' else: # versions identical return ss = fromVersion.replace('.', '_') moduleDir = "memops.format.compatibility.%s.v_%s" % (srcDir, ss) try: result = __import__(moduleDir, {}, {}, compModules) except ImportError: if compar < 0: raise ApiError("""Could not import %s Backwards compatibility from %s to %s missing.""" % (moduleDir, fromVersion, newVersion)) else: raise ApiError("""Could not import %s Forwards compatibility from %s to %s missing. Trying to load new-version data with old-version code? """ % (moduleDir, fromVersion, newVersion)) for ss in compModules: if not hasattr(result, ss): raise ApiError("Compatibility error: No module %s found in %s" % (ss, moduleDir)) # return result
def parseParFile(self): try: parFile = splitext(self.dataFile)[0] + '.par' if isfile(parFile): try: fp = open(parFile, 'rb') except IOError, e: raise ApiError('can\'t open nmrview .par file %s: \n' % parFile + str(e)) for i, line in enumerate(fp): fields = line.strip().split() if i == 0: if fields[0] != 'header': parFileFormatError = 'file %s doesn\'t appear to be an nmrview .par file the first line doesn\'t start with \'header\' %s' raise ApiError(parFileFormatError % (parFile, line)) elif fields[0] == 'sw': axisIndex = atoi(fields[1]) - 1 self.checkParFileNumberOfFields(parFile, i, fields, 3) self.checkParFileAxisIndex(parFile, i, fields) self.sw[axisIndex] = atof(fields[2]) elif fields[0] == 'sf': axisIndex = atoi(fields[1]) - 1 self.checkParFileNumberOfFields(parFile, i, fields, 3) self.checkParFileAxisIndex(parFile, i, fields) self.sf[axisIndex] = atof(fields[2]) elif fields[0] == 'ref': axisIndex = atoi(fields[1]) - 1 self.checkParFileNumberOfFields(parFile, i, fields, 4) self.checkParFileAxisIndex(parFile, i, fields) self.refpt[axisIndex] = atof(fields[3]) self.refppm[axisIndex] = atof(fields[2]) except Exception, e: print e traceback.print_exc(file=sys.stdout)
def parseFile(self): (s, x, self.big_endian, self.swap) = getHeader(self.dataFile) ndim = self.ndim = int(x[ndim_index]) if ((ndim < 1) or (ndim > 4)): raise ApiError('ndim = %d, should be between 1 and 4' % ndim) self.initDims() for i in range(ndim): c = int(x[complex_index[i]]) if (c == 0): raise ApiError( 'data is complex in dim %d, can only cope with real data so far' % (i + 1)) self.npts[i] = int(x[npts_index[i]]) if (i == 0): self.block[i] = self.npts[i] else: self.block[i] = 1 j = int(x[order_index[i]]) - 1 self.sw[i] = x[sw_index[j]] if (self.sw[i] == 0): self.sw[i] = 1000 # TBD: TEMP hack self.sf[i] = x[sf_index[j]] o = x[origin_index[j]] self.refppm[i] = (self.sw[i] + o) / self.sf[i] self.refpt[i] = 0 n = 4 * nuc_index[j] nuc = s[n:n + 4].strip() # get rid of null termination m = nuc.find(chr(0)) if m >= 0: nuc = nuc[:m] if nuc == 'ID': # TBD: do not know if this is necessary or sufficient self.nuc[i] = None else: self.nuc[i] = self.standardNucleusName(nuc)
def getTopObjIdFromFileName(fileName, mustBeMultipart=None): """Get project name or TopObject guid from file name (relative or absolute) Note: TopObject ID is constrained to not need decoding """ basename = os.path.basename(fileName) ll = basename.split(separatorFileNameChar) if mustBeMultipart is None: # no check on number of fields pass elif mustBeMultipart: # must be multi-field (normal TopObject) if len(ll) == 1: raise ApiError("TopObject fileName %s lacks field separators %s" % (fileName, separatorFileNameChar)) elif len(ll) != 1: # must be single field (Implementation) raise ApiError("TopObject fileName %s has field separators %s" % (fileName, separatorFileNameChar)) return ll[-1][:-lenFileSuffix]
def modifyPackageLocators(project, repositoryName, repositoryPath, packageNames, resetPackageLocator=True, resetRepository=False): """ Resets package locators for specified packages to specified repository. Use as, for example: modifyPackageLocators(project,'newChemComps','/mydir/data/chemComps/',('ccp.molecule.ChemComp','ccp.molecule.ChemCompCoord')) Additional args: - resetPackageLocator: True will reset the package locator completely, removing old info False will add the repository to the package locator. - resetRepository: True will reset url for the repository, even if it already exists False will not reset the url for the repository if it already exists Returns the relevant repository. """ repository = project.findFirstRepository(name=repositoryName) ss = normalisePath(repositoryPath) if not repository: repository = project.newRepository(name=repositoryName, url=Implementation.Url(path=ss)) elif resetRepository and repository.url.path != repositoryPath: repository.url = Implementation.Url(path=ss) for packageName in packageNames: packageLocator = project.findFirstPackageLocator( targetName=packageName) if not packageLocator: raise ApiError("Cannot modify repository 'any' for package %s" % packageName) if resetPackageLocator: packageLocator.repositories = (repository, ) elif not repository in packageLocator.repositories: packageLocator.addRepository(repository) return repository
def getSampledValues(directory, template): """Get sampled values from NMRPipe 2D headers for files specified by template in directory """ values = [] z = 0 while 1: try: file = getDataFileName(template, z) except: raise ApiError('template not valid') fullfile = os.path.join(directory, file) if os.path.exists(fullfile) and os.path.isfile(fullfile): value = getSampledValue(fullfile) values.append(value) else: break z = z + 1 return values
def getCompatibilityModule(fromVersion): """ import relevant compatibility module """ compModules = ['Major', 'Minor', 'General', 'MapInfo'] from memops.general.Version import cmpVersionStrings newVersion = currentVersionStr compar = cmpVersionStrings(fromVersion, newVersion) if compar < 0: # fromVersion is older srcDir = 'upgrade' elif compar > 0: # fromVersion is newer srcDir = 'downgrade' else: # versions identical return ss = fromVersion.replace('.', '_') moduleDir = "memops.format.compatibility.%s.v_%s" % (srcDir, ss) try: result = __import__(moduleDir, {}, {}, compModules) except ImportError: print("No compatibility module found for version %s" % fromVersion) raise for ss in compModules: if not hasattr(result, ss): raise ApiError("Compatibility errro: No module %s found in %s" % (ss, moduleDir)) # return result
def setNmrExpPrototypeLink(obj, tag, topObjByGuid, delayDataDict): """ redirect certain NmrExpPrototype links to other experiments """ doGet = delayDataDict.get objDataDict = doGet(obj) inDataList = objDataDict.get(tag) if inDataList: keyList = inDataList[0] guid = keyList[0] # map to different keys if guid == 'cam_wb104_2008-01-15-16-06-40_00004': # 'H[N[CA[CB]]] defunct guid = 'cam_wb104_2008-01-15-16-06-40_00024' keyList[1] = keyList[1] - 2 # refExperiment serial elif guid == 'cam_wb104_2008-01-15-16-06-40_00033': # 'H[N[CO[C[C]]]] defunct guid = 'cam_wb104_2008-01-15-16-06-40_00035' refExpMap = {4: 1, 6: 9, 7: 10, 8: 11} keyList[1] = refExpMap.get(keyList[1], 0) # refExperiment serial # set link oo = topObjByGuid.get(guid) clazz = keyList[-1]['class'] if (oo is None): # NB naughty - _packageName is a private attribute. # But getPackageName is not static obj.root.refreshTopObjects(clazz._packageName) try: oo = topObjByGuid[guid] except: raise ApiError( """%s.%s: NmrExpPrototype with guid %s not found or loaded""" % (clazz.__name__, tag, guid)) obj.__dict__[tag] = clazz.getByKey(oo, keyList[1:-1])