示例#1
0
文件: main.py 项目: dlin8/my3D
def main():
    #screenOne = screen.createScreen(500,500)
    #draw.circle(edgeMatrix, 250, 250, 0, 50, .00001)
    #matrix.drawEdges(screenOne, edgeMatrix, green)

    # screen.display(screenOne)
    screenOne = screen.createScreen(500, 500)
    parser.parseFile('script3', screenOne, green, edgeMatrix, transformMatrix)
    matrix.drawEdges(screenOne, edgeMatrix, green)
    screen.display(screenOne)
示例#2
0
    def __init__(self):
        parser.parseFile()

        self.neuron = Neuron(self.inputCount())
        self.neuron.randomize(-1.0, 1.0)
        self.teachingStep = 0
        self.prevResponse = 0
        self.prevError = 0
        self.curResponse = 0
        self.curError = 0
def run(k):
    # 1) Load data files
    x_training = parseFile("../data/X_original.txt")
    y_training = parseFile("../data/Y_original.txt")
    x_testing = parseFile("../data/X_test.txt")
    y_testing = parseFile("../data/Y_test.txt")
    
    # 2) Get the maximums from the training data 
    #    (for normalization of _all_ data)
    x_maximums = maxColumns(x_training)
    y_maximums = maxColumns(y_training)
    
    # 3) Normalize data
    x_training_normal = normalize(x_maximums, x_training)
    y_training_normal = normalize(y_maximums, y_training)
    x_testing_normal = normalize(x_maximums, x_testing)
    y_testing_normal = normalize(y_maximums, y_testing)

    # 4) Compute error for each row in the Training Data.
    training_errors = []
    for row_index in range(len(x_training_normal)):
        # A) Get K nearest neighbor indexes for the row.
        #    When k = 1, we should just return the corresponding row index,
        #    since the error will be 0.  We only do this for the training data, though.
        if k == 1:
            neighbors = [row_index]
        else:
            neighbors = kNearestNeighbors(k, x_training_normal[row_index], x_training_normal)
        
        # B) Record the absolute value of the error.
        training_errors.append( abs(y_training_normal[row_index][0] - averageNeighborOutput(neighbors, y_training_normal)) )
        
    # 5) Get the MAE for the Training Data.
    training_mae = sum(training_errors)/len(training_errors)
    
    # 6) Get the R_MSE for the Training Data.
    training_r_mse = sqrt(sum( [pow(error,2) for error in training_errors] )/len(training_errors))
    
    # 7) Compute the error for each row of the Test Data.
    testing_errors = []
    for row_index in range(len(x_testing_normal)):
        # A) Get K nearest neighbor indexes for the row.
        neighbors = kNearestNeighbors(k, x_testing_normal[row_index], x_training_normal)
        
        # B) Record the absolute value of the error.
        testing_errors.append( abs( y_testing_normal[row_index][0] - averageNeighborOutput(neighbors, y_training_normal)) )
    
    # 8) Get the MAE for the Testing Data.
    testing_mae = sum(testing_errors)/len(testing_errors)
    
    # 9) Get the R_MSE for the Testing Data.
    testing_r_mse = sqrt(sum( [pow(error, 2) for error in testing_errors] )/len(testing_errors))
    
    return (training_mae, training_r_mse, testing_mae, testing_r_mse)
示例#4
0
文件: recipemd.py 项目: ltgrant/opt
def main():
    parser = argparse.ArgumentParser(
        description='Work with recipe markdown files')
    parser.add_argument(
        '-i',
        metavar='mdfile',
        type=argparse.FileType('r'),
        help='name of the markdown file to process, default: use stdin',
        default=sys.stdin)
    parser.add_argument('-o',
                        metavar='output',
                        type=argparse.FileType('wb'),
                        help='name of the output file, default: use stdout',
                        default=sys.stdout.buffer)
    parser.add_argument(
        '-t',
        metavar='outtype',
        choices=['xml', 'json'],
        help='output type to generate (xml, json), default: xml',
        default='xml')
    parser.add_argument(
        '--xslt',
        metavar='xslt',
        help=
        'xslt to reference in PI of generated XMLs, ignored if outtype is not "xml"',
        default=None)
    args = parser.parse_args()

    recipes = parseFile(args.i)

    if args.t == 'xml':
        xs.dump(args.o, recipes, args.xslt)
    else:
        js.dump(args.o, recipes)
示例#5
0
def run_bowtie(bowtie_index, contents, frequency_tree):
    reads_to_be_analyzed = 2500
    outputs = []
    for i in range(len(contents)):
        #        print("############# BEGINING SEQUENCING " + str(i + 1) + " OF " + str(len(contents)) + " #############", file = sys.stderr)
        #        print("############# BEGINING SEQUENCING " + str(i + 1) + " OF " + str(len(contents)) + " #############", file = sys.stderr)
        args = shlex.split("/software/bowtie2/bowtie2 --quiet -x " +
                           bowtie_index + " --sra-acc " + contents[i][1] +
                           " --sample-sra " + str(reads_to_be_analyzed) +
                           " --threads 4 --no-head >> temp.sam")
        #    args = shlex.split("/software/bowtie2/bowtie2 -x " +  bowtie_index + " --sra-acc " + contents[i][1] + " --sample-sra " + str(reads_to_be_analyzed) + " --threads 4 --no-head >> temp.sam")
        print("ARGS = " + str(args))
        p = subprocess.Popen(args)
        p.wait()
        #        print("############# FINISHED SEQUENCING " + str(i + 1) + " OF " + str(len(contents)) + " #############", file = sys.stderr)
        try:
            data = parseFile("temp.sam", frequency_tree)
            #         arr = [contents[0], contents[1]]
            #         arr += data
            outputs.append(data)
        except:
            continue
        if os.path.exists("temp.sam"):
            os.remove("temp.sam")
    return outputs
示例#6
0
文件: common.py 项目: ltgrant/opt
def process(obj_id, target, xslt = None):
  """ get file from git, process, write to target folder

  Arguments:
  obj_id -- git object id of the file (as string)
  target -- target folder path (as string, with trailing slash)
  xslt   -- xslt file path if xml-stylesheet PI should be included,
            no PI will be included if null (which is default)
  """
  stream = io.TextIOWrapper(git.blob_file_handle(obj_id), encoding='utf8')
  r = parseFile(stream)
  dump(target, r, xslt)
示例#7
0
def process( obj_id, target):
    """ get file from git, process, write to target folder

    Arguments:
    obj_id -- git object id of the file (as string)
    target -- target folder path (as string, with trailing slash)
    """
    stream = io.TextIOWrapper( git.blob_file_handle(obj_id), encoding='utf8')
    r = parseFile(stream)
    rec = serializeRecipes(r)
    rec.addprevious(etree.ProcessingInstruction('xml-stylesheet', 'type="text/xsl" href="' + settings.XSLT + '"'))
    et = etree.ElementTree(rec)
    et.write(target,xml_declaration=True,pretty_print=True,encoding='UTF-8')
示例#8
0
def main():
    global args, facts, goals, rules, dic
    args = parseArg()
    facts, goals, rules, dic = parseFile(args.filename, args.default)

    print(goals)
    if args.verbose:
        print(bc.GREEN + 'Facts:', list(facts.keys()), bc.RES, "\n")
        print(bc.BLUE + 'Goals:', list(goals.keys()), bc.RES, "\n")
        printdic(dic)
        printrules(rules)
    solve = Solver(rules, goals, dic, args)
    solve.solver()
示例#9
0
def main():
    parser = argparse.ArgumentParser(description='Work with recipe markdown files')
    parser.add_argument('-i', metavar='mdfile', type=argparse.FileType('r'), help='name of the markdown file to process, default: use stdin', default=sys.stdin)
    parser.add_argument('-o', metavar='output', type=argparse.FileType('wb'), help='name of the output file, default: use stdout', default=sys.stdout.buffer)
    parser.add_argument('-t', metavar='outtype', choices=['xml', 'json'], help='output type to generate (xml, json), default: xml', default='xml')
    parser.add_argument('--xslt', metavar='xslt', help='xslt to reference in PI of generated XMLs, ignored if outtype is not "xml"', default=None)
    args = parser.parse_args()

    recipes = parseFile(args.i)

    if args.t == 'xml':
        xs.dump(args.o, recipes, args.xslt)
    else:
        js.dump(args.o, recipes)
示例#10
0
def kmeans():
    l1, l2 = list(), list()

    X = parser.parseFile(config.csv_file_name)
    for i, row in enumerate(X):
        sublist = [float(row['latitude']), float(row['longitude'])]
        l1.append(sublist)
        l2.append(row)

    ss = StandardScaler(with_mean=False, with_std=False)
    X = ss.fit_transform(l1)
    km = KMeans(n_clusters=100,
                init='k-means++',
                n_init=10,
                max_iter=300,
                tol=0.0001,
                precompute_distances=True,
                verbose=0,
                random_state=None,
                copy_x=True,
                n_jobs=1)
    km.fit(X)

    labels = km.labels_
    cluster_centers = km.cluster_centers_
    labels_unique = numpy.unique(labels)
    n_clusters_ = len(labels_unique) - 1

    #We generate the list of dicts representing the center of our clusters.
    #cluster is the ID of the cluster
    #x and y are the coordinates for the center of this cluster
    #number is the number of points in that cluster
    centers = list()
    for i, row in enumerate(cluster_centers):
        centers.append({
            'cluster': i,
            'x': row[0],
            'y': row[1],
            'number': labels.tolist().count(i)
        })

    points = list()
    for i, row in enumerate(X):
        #-1 means that the point doesn't belong to any cluster (has been rejected).
        #So we don't want to return these points and throw them away.
        if labels[i] != -1:
            l2[i]['cluster'] = labels[i]
            points.append(l2[i])

    return centers, points
示例#11
0
def meanShift():
    l1, l2 = list(), list()

    X = parser.parseFile(config.csv_file_name)
    for i, row in enumerate(X):
        #attention a prendre un nombre suffisant de donnee pour que meanshift marche
        sublist = [float(row['latitude']), float(row['longitude'])]
        l1.append(sublist)
        l2.append(row)

    ss = StandardScaler(with_mean=False, with_std=False)
    X = ss.fit_transform(l1)
    bandwidth = estimate_bandwidth(X, quantile=0.01, n_samples=1000)
    ms = MeanShift(bandwidth=bandwidth,
                   bin_seeding=True,
                   cluster_all=False,
                   min_bin_freq=15)
    ms.fit(X)

    labels = ms.labels_
    cluster_centers = ms.cluster_centers_
    labels_unique = numpy.unique(labels)
    n_clusters_ = len(labels_unique) - 1

    #We generate the list of dicts representing the center of our clusters.
    #cluster is the ID of the cluster
    #x and y are the coordinates for the center of this cluster
    #number is the number of points in that cluster
    centers = list()
    for i, row in enumerate(cluster_centers):
        centers.append({
            'cluster': i,
            'x': row[0],
            'y': row[1],
            'number': labels.tolist().count(i)
        })

    points = list()
    for i, row in enumerate(X):
        #-1 means that the point doesn't belong to any cluster (has been rejected).
        #So we don't want to return these points and throw them away.
        if labels[i] != -1:
            l2[i]['cluster'] = labels[i]
            points.append(l2[i])

    return centers, points
示例#12
0
def meanShift():
    l1, l2 = list(), list()

    X = parser.parseFile(config.csv_file_name)
    for i, row in enumerate(X):
        #attention a prendre un nombre suffisant de donnee pour que meanshift marche
        sublist = [float(row['latitude']), float(row['longitude'])]
        l1.append(sublist)
        l2.append(row)

    ss = StandardScaler(with_mean=False, with_std=False)
    X = ss.fit_transform(l1)
    bandwidth = estimate_bandwidth(X, quantile=0.01, n_samples=1000)
    ms = MeanShift(bandwidth=bandwidth, bin_seeding=True, cluster_all=False, min_bin_freq=15)
    ms.fit(X)

    labels = ms.labels_
    cluster_centers = ms.cluster_centers_
    labels_unique = numpy.unique(labels)
    n_clusters_ = len(labels_unique) - 1

#We generate the list of dicts representing the center of our clusters.
#cluster is the ID of the cluster
#x and y are the coordinates for the center of this cluster
#number is the number of points in that cluster
    centers = list()
    for i, row in enumerate(cluster_centers):
        centers.append({'cluster': i, 'x': row[0], 'y': row[1], 'number': labels.tolist().count(i)})

    points = list()
    for i, row in enumerate(X):
#-1 means that the point doesn't belong to any cluster (has been rejected).
#So we don't want to return these points and throw them away.
        if labels[i] != -1:
            l2[i]['cluster'] = labels[i]
            points.append(l2[i])

    return centers, points
示例#13
0
def main(filePath):
    graph, sequence = parseFile(filePath)

    # Get all the possible Euler circuits in given graph.
    eulerCircuits = graphutils.getEulerianCircuits(graph)
    longestCommonSStrings = []
    circuitLabels = []

    # For each of the circuits found, get the circuit label string and also the longest common substring.
    for circuit in eulerCircuits:
        circuitLabel = graphutils.getLabelsForPath(graph, circuit)
        circuitLabels.append(circuitLabel)
        longestCommonSStrings.append(
            graphutils.longestCommonSubstring(sequence, circuitLabel))

    # Out of all the substrings, find the longest
    longestIndex = 0
    longestString = 0
    for i in range(len(longestCommonSStrings)):
        if len(longestCommonSStrings[i]) > longestString:
            longestString = len(longestCommonSStrings[i])
            longestIndex = i

    # The following is all for nice output
    print 'Eulerian Circuit: %s' % eulerCircuits[longestIndex]
    print 'Symbols on path: %s' % circuitLabels[longestIndex]
    print 'Sequence: %s' % sequence
    print 'Matched Substring: %s' % longestCommonSStrings[longestIndex]
    startIndexSeq = findStringIndex(sequence,
                                    longestCommonSStrings[longestIndex])
    startIndexCircuit = findStringIndex(circuitLabels[longestIndex],
                                        longestCommonSStrings[longestIndex])
    lengthSString = len(longestCommonSStrings[longestIndex]) - 1
    print 'Found sequence: %d-%d and subsequence: %d-%d' % (
        startIndexSeq, startIndexSeq + lengthSString, startIndexCircuit,
        startIndexCircuit + lengthSString)
示例#14
0
def kmeans():
    l1, l2 = list(), list()

    X = parser.parseFile(config.csv_file_name)
    for i, row in enumerate(X):
        sublist = [float(row['latitude']), float(row['longitude'])]
        l1.append(sublist)
        l2.append(row)

    ss = StandardScaler(with_mean=False, with_std=False)
    X = ss.fit_transform(l1)
    km = KMeans(n_clusters=100, init='k-means++', n_init=10, max_iter=300, tol=0.0001, precompute_distances=True, verbose=0, random_state=None, copy_x=True, n_jobs=1)
    km.fit(X)

    labels = km.labels_
    cluster_centers = km.cluster_centers_
    labels_unique = numpy.unique(labels)
    n_clusters_ = len(labels_unique) - 1

#We generate the list of dicts representing the center of our clusters.
#cluster is the ID of the cluster
#x and y are the coordinates for the center of this cluster
#number is the number of points in that cluster
    centers = list()
    for i, row in enumerate(cluster_centers):
        centers.append({'cluster': i, 'x': row[0], 'y': row[1], 'number': labels.tolist().count(i)})

    points = list()
    for i, row in enumerate(X):
#-1 means that the point doesn't belong to any cluster (has been rejected).
#So we don't want to return these points and throw them away.
        if labels[i] != -1:
            l2[i]['cluster'] = labels[i]
            points.append(l2[i])

    return centers, points
#!/usr/bin/python
import parser
import os
import os.path

HAND_HISTORY_PATH = "/home/msharman/HandHistory/antler88"

for f in os.listdir( HAND_HISTORY_PATH ):
    path = os.path.join( HAND_HISTORY_PATH, f )
    try:
	print "Parsing %s:"%f
        parser.parseFile( path  )
    except Exception as e:
        print "Exception parsing %s: %s"%(f,e)
        exit()
示例#16
0
from parser import parseFile
from requestHandler import requestSPARQL
from fileManager import save, FILE

values = parseFile('./data/training_set_91_91.tsv')

data = []

l = len(values)
i = 1
for value in values:
    ind = '{0:03}'.format(i)
    print "[%s/%d] Request for (%s, %s)" % (ind, l, value[0], value[1]),
    res = requestSPARQL(gene=value[0], drug=value[1])
    if res.status_code == 200:
        print "\t[OK]"
        data.append({
            'gene': value[0],
            'drug': value[1],
            'asso': value[2],
            'json': res.content
        })
    else:
        print "\t\t[FAIL]",
    i += 1
    print "\n",

save(data, FILE)
示例#17
0
import re
import unittest
from os import listdir
from os.path import isfile, join

from main import main
from parser import parseFile
from problem import Problem
from test_operations import TestOperation

# all the test files in /tests
testfiles = [f for f in listdir("./tests") if isfile(join("./tests", f))]

# solutions to problems
solutions = []

# display for a solution
def printSolution(s):
  print("alg: ", s[0][0:6], "error: ", s[1], "result: ", s[2], "steps: ", s[3], "time: ", s[4],"nodes: ", s[5],  "max depth: ", s[6], "branching f: ", len(s[7].ops))

for file in testfiles:
  if re.match('test', file) is not None:
    solutions.append(main(parseFile("tests/" + file), False))

for fi in range(0, len(testfiles), 2):
  printSolution(solutions[fi])
  printSolution(solutions[fi+1])
  print("----------------------------------------")
示例#18
0
    for key in keys:
        sds[key] = 0
        for house in l:
            sds[key] += (float(house[key]) - means[key])**2
    for key in keys:
        sds[key] = (sds[key] / (len(l) - 1))**0.5
    #print('statistic info : ')
    #print(means, sds)
    return (means, sds)


def softmax(x, mean, sd) -> float:
    return 1 / (1 + math.exp(-(x - mean) / sd))


(data, prices) = preprocess(parseFile('database.txt'))
#print(data)
#print(prices)
matdata = numpy.zeros((len(data), 10))
matprices = numpy.zeros((len(data)))
(means, sds) = findStats(data, ['floor', 'm2', 'age', 'numrooms'])
priceMean = numpy.mean(prices)
priceSD = numpy.std(prices)

for (i, house) in enumerate(data):
    matdata[i][0] = house['furnished']
    matdata[i][1] = softmax(house['floor'], means['floor'], sds['floor'])
    matdata[i][2] = softmax(house['m2'], means['m2'], sds['m2'])
    matdata[i][3] = softmax(house['age'], means['age'], sds['age'])
    for j in range(4, 9):
        matdata[i][j] = 0
示例#19
0
 def setUp(self):
     import parser
     self.t = parser.parseFile('data/v5_b.xml')
示例#20
0
 def _test_parse_file(self, inputfile, result):
   self.assertEqual(parseFile(inputfile).alg, result.alg)
   self.assertEqual(parseFile(inputfile).startnum, result.startnum)
   self.assertEqual(parseFile(inputfile).targetnum, result.targetnum)
   self.assertEqual(parseFile(inputfile).time, result.time)
   self.assertEqual(parseFile(inputfile).ops, result.ops)
示例#21
0
                inktype = info.flags & 0x3f  # TODO: correct?
                if inktype in inktypes.keys():
                    ink = inktypes[inktype]
                else:
                    ink = "<unknown (%02x)>" % inktype
                item.addChild(QTreeWidgetItem(["Ink", ink]))
                trails = "No"
                if info.flags & 0x40:
                    trails = "Yes"
                item.addChild(QTreeWidgetItem(["Trails", trails]))
                antialias = "Off"
                if info.flags & 0x2000:
                    antialias = "Low"
                elif info.flags & 0x4000:
                    antialias = "Mid"
                if info.flags & 0x6000 == 0x6000:
                    antialias = "High"
                item.addChild(QTreeWidgetItem(["Antialias", antialias]))
            self.info.addTopLevelItem(item)
            self.info.expandItem(item)

        self.info.resizeColumnToContents(0)


app = QApplication([])
movie = parser.parseFile(sys.argv[1])
win = MyMainWindow()
win.resize(1024, 768)
win.show()
app.exec_()
示例#22
0
	for s in previousScore2:
		if prec == -1:
			prec = s
			bestNext = previousScore3[i]
		else:
			if prec > s:
				prec = s
				bestNext = previousScore3[i]
		i += 1 
	return bestNext, previousScore


if __name__ == '__main__':
	# Call parser
	G = nx.Graph()
	G, nbCars, Totaltime, intersections, streets, nodeStart = parser.parseFile("paris_54000.txt")
	
	print "totaltime : ", str(Totaltime)
	print "nbCars : ", str(nbCars)
	print "streets nb : ", str(len(streets))
	print "intersections nb : ", str(len(intersections))
	
	# street info a Street[Distance,Time]
	global _addCoef
	_addCoef = 130
	
	# create some structure
	global _outputCarsMovements
	_outputCarsMovements = []
	global _totalTimeForTheCard
	_totalTimeForTheCard = []
示例#23
0
 def setUp(self):
     import parser
     self.t = parser.parseFile('data/v5_b.xml')
示例#24
0
def parse():
    for dev_file in Path('./atdf').glob('*.atdf'):
        yield from parseFile(str(dev_file.resolve()))
示例#25
0
#!/usr/bin/python
import parser
import os
import os.path

HAND_HISTORY_PATH = "/home/msharman/HandHistory/antler88"

for f in os.listdir(HAND_HISTORY_PATH):
    path = os.path.join(HAND_HISTORY_PATH, f)
    try:
        print "Parsing %s:" % f
        parser.parseFile(path)
    except Exception as e:
        print "Exception parsing %s: %s" % (f, e)
        exit()
示例#26
0
文件: run.py 项目: ZIllR0/ctf-1
import sys
from parser import parseFile, parse, simplifyTF

sys.setrecursionlimit(100000)

print('[*] Parsing')
symbols = parseFile(sys.argv[1])

print('[*] Running')
result = repr(symbols['_'].eval().simplify().alpha_norm())
result = simplifyTF(result)
print('[-] Result:', result)
示例#27
0
        except:
            print('Unknow function at: ' + line)
            exit()
        if currentFunc.delay > tmpCycle:
            tmpCycle = currentFunc.delay
        currentFunc.computeCost(initialStocks)
    for stockValue in initialStocks.values():
        if stockValue < 0:
            print('Invalid operation at: ' + line)
            exit()
    localCycle += tmpCycle
    if localCycle != actualFileCycle:
        print('Cycle count is wrong at: ' + line)
        exit()
    for func in splitedLine:
        currentFunc = functions[func]
        currentFunc.computeReward(initialStocks)
    return localCycle


parseFile(data, initialStocks, processList, toOptimize)
functions = funcDictionaryBuild()
localCycle = 0
for line in output:
    if len(line) == 0:
        continue
    if line.startswith('State of stock after'):
        break
    localCycle = parseLine(line, functions, localCycle)
print('This output is correct.')
示例#28
0
    else:
        result = iterative.solve(problem)

    val = result[0]
    error = abs(val - problem.targetnum)
    steps = result[1]
    time = result[2]
    nodes = result[3]
    max = result[4]
    endNode = result[5]

    if debug == None or debug == True:
        try:
            endNode.print_path()
        except:
            print("No steps taken...")

        print("Algorithm used: " + problem.alg)
        print("Error: %r" % error)
        print("Value found: %r" % val)
        print("Steps taken: %r" % steps)
        print("Time taken: %r" % time)
        print("Nodes expanded: %r" % nodes)
        print("Max depth traversed: %r" % max)

    return (problem.alg, error, val, steps, time, nodes, max, problem)


if __name__ == "__main__":
    main(parseFile(parseCommandLine(sys.argv[1:])), True)
示例#29
0
    D = []
    for i in range(len(l)):
        if (belong(D, l[i])) < 0:
            D.append(l[i])
    return len(D)


# Replaces character strings by numbers
def make_standard(data, s, max):
    D = []
    Used = []
    for i in range(max):
        if belong(Used, get_group(data, i, s)) < 0:
            Used.append(get_group(data, i, s))
            D.append(len(Used))
        else:
            D.append(belong(Used, get_group(data, i, s)))
    return D


data1 = parser.parseFile("1epahttp.txt", "epahttp")
data2 = parser.parseFile("2sdschttp2.txt", "sdschttp")
data3 = parser.parseFile("3Calgaryaccess_log.txt", "calgaryhttp")
sample1 = make_standard(data1, "host", 25000)
sample2 = make_standard(data2, "host", 25000)
sample3 = make_standard(data3, "host", 25000)


print(ecart([sketch_cod2(sample1, sample2, 64, 0.0001) for i in range(100)]))
# print(count_min([sample1,sample2,sample3],0.000001,1024,25000))
示例#30
0
文件: main.py 项目: stuydw/curves
import parser
from matrix import identity
from draw import newScreen

# Initialize the screen
screen = newScreen()
# Parse the script
parser.parseFile("script_c", identity(4), [], screen)
示例#31
0
__author__ = "yaelcohen"

### go over the folder parse each file
## manage global ids
## create big diction

import glob
import pickle


id = 0
all = {}
all_nodes = []
path = "/Users/yaelcohen/Documents/cvs/cvs_out/*.html"
for fname in glob.glob(path):
    # if (id > 1000):
    #    break
    print "working on " + fname
    cv, id = parseFile(fname, id)
    all_nodes += cv
    all[fname] = cv

print all
print id
pickle.dump(all, open("all_dict_2.p", "wb"))
pickle.dump(all_nodes, open("all_nodes_2.p", "wb"))

## all_nodes = pickle.load( open( "all_nodes.p", "rb" ) )
print "DONE"
示例#32
0
import sys
from parser import parseFile, parse
from pylam import Variable

sys.setrecursionlimit(100000)

symbols = {}
symbols['M'] = Variable('M', showid=False)
symbols['I'] = Variable('I', showid=False)
symbols = parseFile('def.txt', symbols)

for k, v in symbols.items():
    if k in 'YMI_':
        continue
    symbols[k] = v.eval().simplify()
    assert len(symbols[k].freevar()) == 0, k

print(symbols['_'].alpha_norm())