Example #1
1
from mu import Mu
import sys

def check_transform(obj, level):
    print("    " * level + obj.transform.name + (" c" if hasattr(obj, "collider") else ""))

def check_obj(obj, level = 0):
    check_transform(obj, level)
    for o in obj.children:
        check_obj(o, level + 1)

for fname in sys.argv[1:]:
    mu = Mu()
    if not mu.read(fname):
        print("could not read: " + fname)
        raise
    check_obj(mu.obj)
Example #2
0
def find_props(fname, props, anims):
    mu = Mu()
    mu.objects = {}
    if not mu.read(fname):
        print("could not read: " + fname)
        raise
    check_obj(mu.obj, props, anims, "", mu)
    return mu
Example #3
0
def dump(fname):
    mu = Mu()
    if not mu.read(fname):
        print("could not read: " + fname)
        raise
    print(mu.version)
    dump_textures(mu)
    dump_materials(mu)
    dump_object(mu, mu.obj)
Example #4
0
def dump(fname):
	mu = Mu()
	if not mu.read(fname):
		print("could not read: " + fname)
		raise
	print(mu.version)
	dump_textures(mu)
	dump_materials(mu)
	dump_object(mu, mu.obj)
Example #5
0
def makecfg(fname):
    mu = Mu()
    if not mu.read(fname):
        print("could not read: " + fname)
        raise
    cfg = ConfigNode()
    add_textures(mu, cfg)
    add_materials(mu, cfg)
    add_object(mu, mu.obj, cfg)
    print(cfg.ToString())
Example #6
0
def readJSON(filepath):
    mu = Mu()
    if not mu.read(filepath):
        print 'File not found'
        # print filepath

    js = MuJS(mu.obj)
    print js.getJSON()
    with open('test.json', 'w+') as jsf:
        json.dump(json.loads(js.getJSON()), jsf, indent=4)
Example #7
0
def makecfg(fname):
    mu = Mu()
    if not mu.read(fname):
        print("could not read: " + fname)
        raise
    cfg = ConfigNode()
    add_textures(mu, cfg)
    add_materials(mu, cfg)
    add_object(mu, mu.obj, cfg)
    print(cfg.ToString())
Example #8
0
def find_lights(fname):
    mu = Mu()
    if not mu.read(fname):
        print("could not read: " + fname)
        raise
    sys.stdout.write("checking " + fname)
    if check_obj(mu.obj):
        mu.write(fname+".new")
        print(" fixed")
    else:
        print(" ok")
Example #9
0
def main():
    if len(sys.argv) != 3:
        print("hull.py <in-name> <out-name>")
        sys.exit(1)
    fname = sys.argv[1]
    oname = sys.argv[2]
    mu = Mu()
    if not mu.read(fname):
        print("could not read: " + fname)
        sys.exit(1)
    find_colliders(mu.obj)
    mu.write(oname)
Example #10
0
def main():
    if len(sys.argv) != 3:
        print("hull.py <in-name> <out-name>")
        sys.exit(1)
    fname = sys.argv[1]
    oname = sys.argv[2]
    mu = Mu()
    if not mu.read(fname):
        print("could not read: " + fname)
        sys.exit(1)
    find_colliders(mu.obj)
    mu.write(oname)
Example #11
0
def main():
    wheel_mu = sys.argv[1]
    mu = Mu()
    if not mu.read(wheel_mu):
        print("could not read: " + fname)
        raise
    find_wheels(mu.obj)
    if len(sys.argv) > 2:
        node = ConfigNode.loadfile(sys.argv[2])
        wheel = node.GetNode('Wheel')
        if not wheel:
            print("could not find Wheel")
            sys.exit(1)
        adjust_wheel(wheel)
        mu.write("wheelout.mu")
    else:
        for w in wheel_colliders.keys():
            node = wheel_cfg(w, wheel_colliders[w])
            print("Wheel " + node.ToString())
Example #12
0
    def inferBestCMu(self, samples, currentGraph):
        #import threading
        import time
        start_time = time.time()
        fitness = Fitness()
        threads = []
        #can we run the method for each sample in parallel? This would speed up the process by a lot. THe samples are not dependent on each other!
        for sampleInd in range(
                1, len(samples)):  #Skip the first 'dummy' precursor sample
            self.inferBestCMuPerSample(samples, sampleInd, currentGraph,
                                       fitness)
            #t = threading.Thread(target=self.inferBestCMuPerSample, args=[samples,sampleInd,currentGraph, fitness])
            #threads.append(t)
            #t.start()
        #for thread in threads:
        #	thread.join()

        #After we have inferred the best C and mu for each sample, we can update the best C and mu in the samples.
        for sample in range(0, len(samples)):

            samples[sample].originalCMu = samples[sample].bestCMu
            if samples[sample].bestCMu is None:
                measurementLength = len(samples[0].measurements.measurements)
                samples[sample].Mu = Mu(0)  #assume 100% tumor
                #Set a default bestCMu in this case, we don't know the solution.
                print "sample ", sample, " setting CMu to 2"
                samples[sample].bestCMu = [
                    CMuCombination(C([2, 2]), Mu(0), self.eventDistances)
                ] * measurementLength
            else:
                if samples[sample].bestCMu[0] is not None:
                    print "setting mu to: ", samples[sample].bestCMu[
                        0].mu.mu, " in sample: ", samples[sample].name
                    samples[sample].Mu = samples[sample].bestCMu[0].mu
                else:  #without a successfully inferred C and mu the sample is so complex it is most likely 100% tumor or contains a lot of subclones.
                    print sample, " Assuming 100% tumor"

                    samples[sample].Mu = Mu(0)  #assume 100% tumor

        print("--- %s seconds for all samples of 1 patient ---" %
              (time.time() - start_time))
        return samples
Example #13
0
def main():
    wheel_mu = sys.argv[1]
    mu = Mu()
    if not mu.read(wheel_mu):
        print("could not read: " + fname)
        raise
    find_wheels(mu.obj)
    if len(sys.argv) > 2:
        text = open(sys.argv[2], "rt").read()
        node = ConfigNode.load(text)
        wheel = node.GetNode('Wheel')
        if not wheel:
            print("could not find Wheel")
            sys.exit(1)
        adjust_wheel(wheel)
        mu.write("wheelout.mu")
    else:
        for w in wheel_colliders.keys():
            node = wheel_cfg(w, wheel_colliders[w])
            print("Wheel "+ node.ToString())
Example #14
0
	def computePermutationErrors(self, pCMatrix, pAMatrix, pSamples, pTreeList, permutedC, permutedA):
		
		#Import a class specific for computing permutation errors
		#Use it to compute the errors below
		simulationErrorHandler = SimulationErrorHandler()
		
		#Obtain the muData from the samples (also provide the real mu!)
		realSamples = self.simulationData.samples
		pMuTumor = []
		realMuTumor = []
		
		for sample in range(0, len(pSamples)):
			pSampleMuT = pSamples[sample].bestCMu[0].mu.mu[1]
			#sampleMuT = realSamples[sample].bestCMu[0].mu.mu[1]
			
			#print "current saved mu: ", self.savedMu[sample]
			#print "normal mu: ", 1-(100*self.savedMu[sample])
			tumorMuConverted = int(100*self.savedMu[sample])
			realMuTumor.append(Mu(100-tumorMuConverted))
			#sampleMuT = realSamples[sample].bestCMu[0].mu
			pMuTumor.append(pSampleMuT)
			#savedMu.append(sampleMuT)
			
		
		[ambiguityScore, totalScore] = simulationErrorHandler.computeAmbiguityScore(permutedA, pAMatrix, self.savedMu, pMuTumor)
		
		pCMatrixFloat = pCMatrix.astype(float)
		error = simulationErrorHandler.computeCError(permutedC, pCMatrixFloat)
		pCError = error / float(permutedC.size)
		
		
		aData = simulationErrorHandler.computeAError(permutedA, pAMatrix)
		pAError = aData[0] / float(permutedA.size)
		
	
		muData = simulationErrorHandler.computeMuError(pSamples, realMuTumor)
		pMuError = muData[0]
		
		print self.simulationData.realTree.edgeList
		pTreeScore = simulationErrorHandler.computeTreeError(pTreeList, self.simulationData.realTree)
		
		print pCError
		print pAError
		print pMuError
		print pTreeScore
		print ambiguityScore
		print totalScore
		
		#return the scores
		return [pCError, pAError, pMuError, pTreeScore, ambiguityScore, totalScore]
def computeAmbiguousPositions():

    #We can generate the allele list with the event distances function
    kmin = 1  #the kmin and kmax used in the simulations
    kmax = 6
    eventDistance = EventDistances(kmin, kmax)

    #get the allele list
    alleleList = eventDistance.alleleList

    #make sure that alleles are not duplicated
    LAFAndCombinations = dict()
    normalAlleles = Alleles(1, 1)
    for allele in alleleList:
        AOccurrences = [m.start() for m in re.finditer('A', allele)]
        ACount = len(AOccurrences)
        BOccurrences = [m.start() for m in re.finditer('B', allele)]
        BCount = len(BOccurrences)

        alleleObj = Alleles(ACount, BCount)
        if BCount > ACount or BCount == ACount:
            alleleCombination = AlleleCombination([normalAlleles, alleleObj])

            for muIndex in range(0, 101):

                LAF = alleleCombination.computeLAF(
                    Mu(muIndex)
                )  #compute the LAF that each combination would generate
                if LAF not in LAFAndCombinations.keys():
                    #LAFAndCombinations[LAF] = []
                    LAFAndCombinations[LAF] = 0

                #LAFAndCombinations[LAF].append((alleleObj.getAllelesAsString(), muIndex))
                LAFAndCombinations[LAF] += 1

    #print LAFAndCombinations
    #For every mu, we should check which LAF the combination with normal would generate

    #With this dictionary, we can check if a LAF has more than one solution. If true, then we can check and see if the position is correct or not. With that, we compute a score showing the
    #number of ambiguous positions that we were able to infer correctly. This score is a higher wow factor than
    return LAFAndCombinations
Example #16
0
    def __init__(self):
        self.kmin = settings.general['kmin']
        self.kmax = settings.general['kmax']

        #Make a dummy bestCMu for the healthy sample
        self.eventDistances = EventDistances(self.kmin, self.kmax)

        #Initialize C+mu combinations and store in a matrix, objects do not need to be remade in loop
        #These are the combinations that we will explore and score for each LAF in the method
        self.combinationMatrix = np.empty([101, self.kmax], dtype='object')
        for muIndex in range(
                0, 101
        ):  #we allow a maximum of 0-100, so it is fine to use these settings here.
            muClass = Mu(muIndex)
            for k in range(self.kmin, self.kmax + 1):
                c = C([2, k])
                cMuCombination = CMuCombination(c, muClass,
                                                self.eventDistances)
                self.combinationMatrix[muIndex][k - 1] = cMuCombination

        self.cCombinations = CCombinations(
            self.kmin, self.kmax
        )  #re-make this, we use iterators, otherwise the combinations need to be stored in memory.
Example #17
0
def thread_func(parms):
    name = parms
    input = Mu()
    input.file = open(name + ".bin", "rb")
    verts = read_vertices(input)
    faces = read_facelist(input)
    final_faces = read_facelist(input)
    point = input.read_int()
    lit_faces = read_facelist(input)
    new_faces = read_facelist(input)
    output = Mu()
    output.materials = []
    output.textures = []
    output.obj = make_empty(name)
    output.obj.children.append(make_mesh("faces", verts, faces))
    output.obj.children.append(make_mesh("final_faces", verts, final_faces))
    output.obj.children.append(make_mesh("lit_faces", verts, lit_faces))
    output.obj.children.append(make_mesh("new_faces", verts, new_faces))
    if (point >= 0):
        p = make_empty(f"point-{point}")
        p.transform.localPosition = verts[point]
        output.obj.children.append(p)
    for ep in extra_points:
        p = make_empty(f"epoint-{ep}")
        p.transform.localPosition = verts[ep]
        output.obj.children.append(p)
    output.write(name + ".mu")
    print(name)
pkl_file = open(simulationFolder + '/simulationData.pkl', 'rb')
simulationData = pickle.load(pkl_file)
pkl_file.close()

#Read the real data
cMatrix = simulationData.cMatrix
aMatrix = simulationData.aMatrix
realTree = simulationData.realTree

#Obtain the mu from the files
savedMu = []
with open(simulationFolder + '/RealMu_0.txt', "r") as muFile:

    for line in muFile:
        mu = float(line)
        savedMu.append(Mu(mu))

samples = simulationData.samples

#Run TargetClone on samples

pkl_file = open(simulationSettings.files['targetCloneInstance'], 'rb')
targetClone = pickle.load(pkl_file)
pkl_file.close()

#Set the segmentation
segmentation = Segmentation()
segmentation.setSegmentationFromFile(
    simulationSettings.files['segmentationFile'])

targetClone.segmentation = segmentation
Example #19
0
precursorAlleleACount = int(settings.general['precursorAlleleACount'])
precursorAlleleBCount = int(settings.general['precursorAlleleBCount'])

#Check if the ploidy is correct
totalPloidy = precursorAlleleACount + precursorAlleleBCount

precursorTumorFrequency = 100  #Check if the ploidy is different from 2 (or allele balance). If true, then the precursor is not a healthy cell and we have 100% tumor
if precursorAlleleACount != 1 or precursorAlleleBCount != 1 or precursorPloidy != 2:
    precursorTumorFrequency = 0  #In this case we have 100% tumor in the precursor. Only if the precursor it is normal it is 0% tumor.

#Initialize the 'healthy' sample, this can now also be a precursor
healthySample = Sample(None, None)
healthySample.C = [C([2, precursorPloidy])] * measurementLength
healthySample.A = [Alleles(precursorAlleleACount, precursorAlleleBCount)
                   ] * measurementLength
healthySample.Mu = [Mu(precursorTumorFrequency)]
#obtain the chromosome, start and end information from the other samples
healthySample.measurements = LAF([0.5] * measurementLength,
                                 tmpSamples[0].measurements.chromosomes,
                                 tmpSamples[0].measurements.starts,
                                 tmpSamples[0].measurements.ends)
healthySample.somaticVariants = [0] * somVarNum
healthySample.somaticVariantsInd = tmpSamples[0].somaticVariantsInd
healthySample.setParent(None)
healthySample.name = 'Precursor'  #do not call it healthy, it may also be a 4N precursor.

#Make a dummy bestCMu for the healthy sample
eventDistances = targetClone.eventDistances
bestCMuHealthy = CMuCombination(C([2, precursorPloidy]),
                                Mu(precursorTumorFrequency), eventDistances)
Example #20
0
from mu import Mu
import sys


def check_transform(obj, level):
    print("    " * level + obj.transform.name)


def check_obj(obj, level=0):
    check_transform(obj, level)
    for o in obj.children:
        check_obj(o, level + 1)


for fname in sys.argv[1:]:
    mu = Mu()
    if not mu.read(fname):
        print("could not read: " + fname)
        raise
    check_obj(mu.obj)
Example #21
0
from mu import Mu
import sys


def check_mesh(obj, level):
    if hasattr(obj, "shared_mesh") and not hasattr(obj, "renderer"):
        print(obj.transform.name)
        obj.shared_mesh = None
        return True
    return False


def check_obj(obj, level=0):
    changed = check_mesh(obj, level)
    for o in obj.children:
        changed = check_obj(o, level + 1) or changed
    return changed


for fname in sys.argv[1:]:
    mu = Mu()
    if not mu.read(fname):
        print("could not read: " + fname)
        break
    if check_obj(mu.obj):
        mu.write(fname + ".out")
Example #22
0
    "coll_torus1",
    "coll_torus2",
    "coll_torus3",
    "coll_torus4",
    "coll_torus5",
    "coll_torus6",
    "coll_torus7",
    "coll_torus8",
]

def check_transform(obj):
    if obj.transform.name in broken_xforms:
        print("zeroing lp for " + obj.transform.name)
        obj.transform.localPosition = 0, 0, 0

def check_obj(obj):
    for o in obj.children:
        check_obj(o)
    check_transform(obj)
    if hasattr(obj, "animation"):
        for clip in obj.animation.clips:
            check_clip(clip)

fname = "centrifuge.mu"
mu = Mu()
if not mu.read(fname):
    print("could not read: " + fname)
    raise
check_obj(mu.obj)
mu.write("output.mu")
def computeCorrectAmbiguityScore(LAFAndCombinations, simulationFolder):
    ambiguityScores = []
    ambiguities = []
    correctAmbiguityPositions = 0
    totalAmbiguousPositions = 0
    totalSize = 0
    #We need to read the actual A matrix values and also the mu
    normalAlleles = Alleles(1, 1)
    #1. read the simulated A matrix

    allPerColAmbiguities = dict()

    for subdir, dirs, files in os.walk(simulationFolder):
        if subdir == simulationFolder:  #we are not interested in the root folder
            continue
        for file in files:
            if re.match('RealA', file):  #read the file and obtain the a matrix
                realAMatrix = np.loadtxt(subdir + '/' + file, dtype=str)
            if re.match('RealMu', file):  #also read the real mu
                realMu = collectErrorsFromFile(file, subdir)

            #Then load the inferred A and mu
            if re.match('EstimatedA',
                        file):  #read the file and obtain the a matrix
                estimatedAMatrix = np.loadtxt(subdir + '/' + file, dtype=str)
            if re.match('EstimatedMu', file):  #also read the real mu
                estimatedMu = collectErrorsFromFile(file, subdir)

        #Compute the LAF that each measurement in the real data would generate
        perColAmbiguityCount = dict()
        for row in range(0, realAMatrix.shape[0]):
            for col in range(0, realAMatrix.shape[1]):

                if col not in perColAmbiguityCount:
                    perColAmbiguityCount[col] = realAMatrix.shape[0]

                totalSize += 1
                #generate allele object
                allele = realAMatrix[row][col]
                AOccurrences = [m.start() for m in re.finditer('A', allele)]
                ACount = len(AOccurrences)
                BOccurrences = [m.start() for m in re.finditer('B', allele)]
                BCount = len(BOccurrences)

                alleleObj = Alleles(ACount, BCount)
                alleleCombination = AlleleCombination(
                    [normalAlleles, alleleObj])

                #Compute the LAF this combination would generate
                muNormal = 1 - (realMu[col])
                realMuObj = Mu(
                    int(muNormal *
                        100))  #this function only takes integer indices!
                realLAF = alleleCombination.computeLAF(realMuObj)

                #Check if this LAF is ambiguous y/n.
                ambiguousCount = LAFAndCombinations[realLAF]

                #If the ambiguous count > 1 and we are correct, we make a note of that.
                if ambiguousCount > 1:
                    totalAmbiguousPositions += 1

                    if realAMatrix[row][col] == estimatedAMatrix[row][col]:
                        correctAmbiguityPositions += 1
                        perColAmbiguityCount[
                            col] -= 1  #Determine how many positions are wrong.

        #Divide the ambiguity score by the total number of positions.
        #print correctAmbiguityPositions
        #print correctAmbiguityPositions / float(totalAmbiguousPositions)
        #print totalAmbiguousPositions / float(totalSize)
        #ambiguityScores.append(correctAmbiguityPositions / float(totalAmbiguousPositions)) #Reporting as % of ambiuguities
        #Reporting the ambiguity scores as the fraction of the total
        ambiguityScores.append(correctAmbiguityPositions / float(totalSize))

        ambiguities.append(totalAmbiguousPositions / float(totalSize))
        allPerColAmbiguities[subdir] = perColAmbiguityCount
    #Compute an average for every noise level.

    #convert to z-scores

    averageAmbiguityScore = sum(ambiguityScores) / float(len(ambiguityScores))
    averageAmbiguities = sum(ambiguities) / float(len(ambiguities))

    return [
        averageAmbiguities, averageAmbiguityScore, ambiguityScores,
        allPerColAmbiguities
    ]
Example #24
0
def find_props(fname, props):
    mu = Mu()
    if not mu.read(fname):
        print("could not read: " + fname)
        raise
    check_obj(mu.obj, props, "")
Example #25
0
    "coll_torus1",
    "coll_torus2",
    "coll_torus3",
    "coll_torus4",
    "coll_torus5",
    "coll_torus6",
    "coll_torus7",
    "coll_torus8",
]

def check_transform(obj):
    if obj.transform.name in broken_xforms:
        print("zeroing lp for " + obj.transform.name)
        obj.transform.localPosition = 0, 0, 0

def check_obj(obj):
    for o in obj.children:
        check_obj(o)
    check_transform(obj)
    if hasattr(obj, "animation"):
        for clip in obj.animation.clips:
            check_clip(clip)

fname = "centrifuge.mu"
mu = Mu()
if not mu.read(fname):
    print("could not read: " + fname)
    raise
check_obj(mu.obj)
mu.write("output.mu")
Example #26
0
def find_skins(fname):
    mu = Mu()
    if not mu.read(fname):
        print("could not read: " + fname)
        raise
    check_obj(mu.obj)
Example #27
0
from mu import Mu
import sys

def check_mesh(obj, level):
    if hasattr(obj, "shared_mesh") and not hasattr(obj, "renderer"):
        print(obj.transform.name)
        obj.shared_mesh = None
        return True
    return False

def check_obj(obj, level = 0):
    changed = check_mesh(obj, level)
    for o in obj.children:
       changed = check_obj(o, level + 1) or changed
    return changed

for fname in sys.argv[1:]:
    mu = Mu()
    if not mu.read(fname):
        print("could not read: " + fname)
        break
    if check_obj(mu.obj):
        mu.write(fname+".out")