def i2trained(filenom, etiq): linea = "o" file = open(filenom, "r") coordinates = [] traceEndsAsList = [] # Llegeix les linies del fitxer fins que acaben while linea != "": linea = file.readline() isTraceIn = search4Trace(linea, "in") # Si s'inicia una trace nova incrementa el comptador de traces i busca el final de la trace if isTraceIn: isTraceOut = search4Trace(linea, "out") #Si a la mateixa linea s'acaba es guarda la linea fins on detecta el final if isTraceOut: lastV = linea.index('</trace>') newTraceW = linea[0:lastV + 1] #Si no s'acaba a la mateixa linea va guardant les linies que troba dins de la mateixa trace else: lineaTrace = linea while lineaTrace != "": lineaTrace = lineaTrace + file.readline() isTraceOut = search4Trace(lineaTrace, "out") #Quan troba el final de la trace guarda el que ha acumulat de les diferents linies if isTraceOut: newTraceW = lineaTrace[0:len(lineaTrace)] lineaTrace = '' #Neteja les linies per quedar-se nomes amb la part d'interes newTraceW.rstrip('\n') fiXML = newTraceW.index('>', 0) fiTrace = newTraceW.index('<', 3) onlyTrace = newTraceW[fiXML + 1:fiTrace] #Separa les coordenades de la linea en una llista precoordinateLines = onlyTrace.split(' ') for lines in range(len(precoordinateLines)): word = precoordinateLines[lines].rstrip(',') precoordinateLines[lines] = word #Passa les coordenades de text a format numeric coordinatesAr = [] for dob in range(len(precoordinateLines) / 2): coordinatesAr.append([ float(precoordinateLines[dob * 2]), float(precoordinateLines[(dob * 2) + 1]) ]) coordinates.extend(coordinatesAr) traceEndsAsList.append(len(coordinates) - 1) #Monta els simbols amb els trace ends corresponents traceEnds = np.asarray(traceEndsAsList, np.float64) coordin = np.asarray(coordinates, np.float64) symbtag = SClass.taggedSymbol(coordin, traceEnds, etiq) file.close() return symbtag
def mountDS(name, whichdB): if whichdB == 'UNIPEN': fileTags = open(name, "r") linea = 'o' symbolsdB = [] while linea != '': linea = fileTags.readline() if '.INCLUDE' in linea: espai = linea.index(' ') incPath = 'UnipenData/include/' + linea[espai + 1:].rstrip('\n') print incPath trainTraces = uptr.u2t(incPath) elif '.SEGMENT CHARACTER' in linea: espaiFi = linea.index(' ', 19) segSel = linea[19:espaiFi] if '-' in segSel: numOfTr = segSel.count('-') + 1 trInd = [] longC = 0 tEnds = np.zeros([numOfTr], np.float64) for i in range(numOfTr): if i == 0: prevI = -1 if i == numOfTr - 1: nowI = len(segSel) else: nowI = segSel.index('-', prevI + 1) trInd.append(int(segSel[prevI + 1:nowI])) prevI = nowI longC += len(trainTraces[trInd[i]]) tEnds[i] = longC - 1 if i == 0: symCoord = np.array(trainTraces[trInd[0]], np.float64) else: symCoord = np.vstack( (symCoord, np.asarray(trainTraces[trInd[i]]))) else: trInd = [int(segSel)] longC = len(trainTraces[trInd[0]]) tEnds = np.array([longC - 1], np.float64) symCoord = np.array(trainTraces[trInd[0]], np.float64) mk1 = linea.index('"') + 1 mk2 = linea.index('"', mk1) etiqueta = linea[mk1:mk2] symbolsdB.append(nsi.taggedSymbol(symCoord, tEnds, etiqueta)) #elif whichdB=='CROHME': return symbolsdB
def i2trained(filenom,etiq): linea="o" file=open(filenom,"r") coordinates=[] traceEndsAsList=[] # Llegeix les linies del fitxer fins que acaben while linea!="": linea=file.readline() isTraceIn=search4Trace(linea,"in") # Si s'inicia una trace nova incrementa el comptador de traces i busca el final de la trace if isTraceIn: isTraceOut=search4Trace(linea,"out") #Si a la mateixa linea s'acaba es guarda la linea fins on detecta el final if isTraceOut: lastV=linea.index('</trace>') newTraceW=linea[0:lastV+1] #Si no s'acaba a la mateixa linea va guardant les linies que troba dins de la mateixa trace else: lineaTrace=linea while lineaTrace!="": lineaTrace=lineaTrace+file.readline() isTraceOut=search4Trace(lineaTrace,"out") #Quan troba el final de la trace guarda el que ha acumulat de les diferents linies if isTraceOut: newTraceW=lineaTrace[0:len(lineaTrace)] lineaTrace='' #Neteja les linies per quedar-se nomes amb la part d'interes newTraceW.rstrip('\n') fiXML=newTraceW.index('>',0) fiTrace=newTraceW.index('<',3) onlyTrace=newTraceW[fiXML+1:fiTrace] #Separa les coordenades de la linea en una llista precoordinateLines=onlyTrace.split(' ') for lines in range(len(precoordinateLines)): word=precoordinateLines[lines].rstrip(',') precoordinateLines[lines]=word #Passa les coordenades de text a format numeric coordinatesAr=[] for dob in range(len(precoordinateLines)/2): coordinatesAr.append([float(precoordinateLines[dob*2]),float(precoordinateLines[(dob*2)+1])]) coordinates.extend(coordinatesAr) traceEndsAsList.append(len(coordinates)-1) #Monta els simbols amb els trace ends corresponents traceEnds=np.asarray(traceEndsAsList,np.float64) coordin=np.asarray(coordinates,np.float64) symbtag=SClass.taggedSymbol(coordin,traceEnds,etiq) file.close() return symbtag
def segment(coordinates,byAxis,difs): [x,y]=byAxis [difX,difY]=difs seguits=[] #Monta una imatge amb les traces, amb un gruix fix segons el numero de traces, per analitzar si al creuar-se s'han d'agrupar en un sol simbol structel2=np.array([[1 for k in range(int(math.sqrt(100/coordinates.shape[0]))+1)] for l in range(int(math.sqrt(100/coordinates.shape[0]))+1)], np.int32) for i in range(coordinates.shape[0]-1): seguits.append(-1) numO=1 while numO==1: seguits[i]=seguits[i]+1 imgAn=np.zeros((51, 501)) for j in range (coordinates.shape[1]-1): #Segons si les coordenades avancen o retrocedeixen ordena l'omplerta de la imatge if (50*(coordinates[i+seguits[i],j,0]-min(x))/difX)>(50*(coordinates[i+seguits[i],j+1,0]-min(x))/difX): factx1=1 else: factx1=0 if (50*(coordinates[i+seguits[i],j,1]-min(y))/difY)>(50*(coordinates[i+seguits[i],j+1,1]-min(y))/difY): facty1=1 else: facty1=0 if (50*(coordinates[i+seguits[i]+1,j,0]-min(x))/difX)>(50*(coordinates[i+seguits[i]+1,j+1,0]-min(x))/difX): factx2=1 else: factx2=0 if (50*(coordinates[i+seguits[i]+1,j,1]-min(y))/difY)>(50*(coordinates[i+seguits[i]+1,j+1,1]-min(y))/difY): facty2=1 else: facty2=0 imgAn[np.array(range(int(((50*(coordinates[i+seguits[i],j+facty1,1]-min(y))/difY))),int(((50*(coordinates[i+seguits[i],j+1-facty1,1]-min(y))/difY)+1))),int),int(((500*(coordinates[i+seguits[i],j+factx1,0]-min(x))/difX))):int(((500*(coordinates[i+seguits[i],j+1-factx1,0]-min(x))/difX)+1))] = 1 imgAn[np.array(range(int(((50*(coordinates[i+seguits[i]+1,j+facty2,1]-min(y))/difY))),int(((50*(coordinates[i+seguits[i]+1,j+1-facty2,1]-min(y))/difY)+1))),int),int(((500*(coordinates[i+seguits[i]+1,j+factx2,0]-min(x))/difX))):int(((500*(coordinates[i+seguits[i]+1,j+1-factx2,0]-min(x))/difX)+1))] = 1 dilated=skimage.morphology.binary_dilation(imgAn,structel2) label_image = ndi.measurements.label(dilated) #Mira si hi ha mes d'un objecte separat entre 2 traces, si nomes queda un ho guarda com a traces dins un mateix simbol numO=np.amax(label_image[0]) if (i+seguits[i])==coordinates.shape[0]-2: if numO==1: seguits[i]=seguits[i]+1 numO=2 del imgAn #Defineix el seguit de simbols segons el seu numero de traces seguits.append(0) qS=[] nS=0 disc=0 for i in range(coordinates.shape[0]): if disc==0: qS.append(1) while seguits[i+qS[nS]-1]!=0: qS[nS]=qS[nS]+seguits[i+qS[nS]-1] disc=qS[nS]-1 nS=nS+1 else: disc=disc-1 print 'Traces grouped as' print qS #Representa i agrupa les traces en simbols cc=0 cd=0 tam=max(qS)*coordinates.shape[1] simbols=[] for i in range(len(qS)): ce=cd finished=0 symCoordinates=np.zeros([tam,2],np.float64) tends=np.zeros([qS[i]],np.float64) for j in range(tam): if finished==0: symCoordinates[j]=coordinates[cd,cc] cc=cc+1 if cc==coordinates.shape[1]: tends[cd-ce]=j cd=cd+1 cc=0 if qS[i]==(cd-ce): finished=1 else: symCoordinates[j]=coordinates[cd-1,(coordinates.shape[1])-1] tends[tends.shape[0]-1]=symCoordinates.shape[0]-1 nsi=SClass.Symbol(symCoordinates,tends) simbols.append(nsi) print 'Data segmented.' fig=plt.figure(2) fig.canvas.set_window_title('Regions') for i in range(len(simbols)): for j in range(simbols[i].tE.shape[0]): if j==0: ini=-1 else: ini=int(simbols[i].tE[j-1]) lineG,=plt.plot(simbols[i].Coord[range(ini+1,int(simbols[i].tE[j])+1),0],-simbols[i].Coord[range(ini+1,int(simbols[i].tE[j])+1),1],'-') return simbols,qS
def templateGenerator(): symboldB, tagClassification = dB.readCROHMEdB([ 'trainData/CROHME_training', 'trainData/trainData_v2', 'trainData/TrainINKML' ]) option = 3 counta = 0 tagAverages = {} #Opcio 1: S'adapten els simbols de la base de dades amb el minim nombre de traces trobat if option == 1: for character in tagClassification: counta += 1 print character, ':', len(tagClassification[character]) numStrokes = [ len(tagClassification[character][i].tE) for i in range(len(tagClassification[character])) ] nStrokesTemp = min(numStrokes) tagClassification[character] = spp.strokeReduction( tagClassification[character], nStrokesTemp, True) eachStroke = np.asarray([ int( sum([ tagClassification[character][i].tE[j] for i in range(len(tagClassification[character])) ]) / len(tagClassification[character])) for j in range(nStrokesTemp) ]) tagClassification[character] = spp.altArcLengthResampling( tagClassification[character], eachStroke) average = np.zeros([len(tagClassification[character][0].Coord), 2], np.float64) for example in tagClassification[character]: average = np.array([[(average[i, 0] + example.Coord[i, 0]), (average[i, 1] + example.Coord[i, 1])] for i in range(example.Coord.shape[0])], np.float64) average = np.array([[ average[i, 0] / len(tagClassification[character]), average[i, 1] / len(tagClassification[character]) ] for i in range(example.Coord.shape[0])], np.float64) tagAverages[character] = average plt.figure(counta) for j in range(nStrokesTemp): if j == 0: ini = -1 else: ini = int(eachStroke[j - 1]) plt.plot(average[range(ini + 1, int(eachStroke[j]) + 1), 0], -average[range(ini + 1, int(eachStroke[j]) + 1), 1], 'r') #Opcio 2: S'adapten els simbols de la base de dades amb el maxim nombre de traces trobat elif option == 2: for character in tagClassification: counta += 1 print character, ':', len(tagClassification[character]) numStrokes = [ len(tagClassification[character][i].tE) for i in range(len(tagClassification[character])) ] nStrokesTemp = int(round(sum(numStrokes) / float(len(numStrokes)))) tagClassification[character] = spp.strokeReduction( tagClassification[character], nStrokesTemp, True) eachStroke = np.asarray([ int( sum([ tagClassification[character][i].tE[j] for i in range(len(tagClassification[character])) ]) / len(tagClassification[character])) for j in range(nStrokesTemp) ]) tagClassification[character] = spp.altArcLengthResampling( tagClassification[character], eachStroke) average = np.zeros([len(tagClassification[character][0].Coord), 2], np.float64) for example in tagClassification[character]: average = np.array([[(average[i, 0] + example.Coord[i, 0]), (average[i, 1] + example.Coord[i, 1])] for i in range(example.Coord.shape[0])], np.float64) average = np.array([[ average[i, 0] / len(tagClassification[character]), average[i, 1] / len(tagClassification[character]) ] for i in range(example.Coord.shape[0])], np.float64) tagAverages[character] = average #Opcio 3: Es separen els simbols segons el seu numero de traces elif option == 3: charList = [character for character in tagClassification] for charInd in range(len(charList)): numStrokes = [ len(tagClassification[charList[charInd]][i].tE) for i in range(len(tagClassification[charList[charInd]])) ] c = 0 typesByN = [] for n in numStrokes: if n not in typesByN: typesByN.append(n) tagClassification[charList[charInd] + str(n)] = [] tagClassification[charList[charInd] + str(n)].append( tagClassification[charList[charInd]][c]) c += 1 del tagClassification[charList[charInd]] print tagClassification['-1'][0].LP #Soroll de la dB del tagClassification['\exists2'][1] del tagClassification['\pi2'] del tagClassification['\\' + 'forall3'] del tagClassification['Y2'] del tagClassification['x2'] del tagClassification['Y3'] tagClassification['k1'] = [ tagClassification['k1'][valid] for valid in [0, 1, 2, 3, 8, 12, 20, 24, 29, 30, 34, 35, 42, 43, 58, 59, 61] ] tagClassification['\sum2'] = [ tagClassification['\sum2'][valid] for valid in [1, 5, 13, 18, 24, 25, 28, 32, 35, 37, 38, 45, 61] ] tagClassification[']2'] = [ tagClassification[']2'][valid] for valid in [3, 4] ] tagClassification['[2'] = [ tagClassification['[2'][valid] for valid in [0, 8] ] tagClassification['\\' + 'theta2'] = [ tagClassification['\\' + 'theta2'][valid] for valid in [ 1, 2, 5, 7, 9, 12, 13, 28, 29, 31, 32, 34, 37, 39, 40, 41, 52, 61, 65, 66, 67, 69 ] ] tagClassification['\div3'] = [ tagClassification['\div3'][valid] for valid in [10, 24, 38] ] tagClassification['\\' + 'tan3'] = [ tagClassification['\\' + 'tan3'][valid] for valid in [0, 4, 5, 16, 22, 34, 41, 45, 50, 53, 72, 86, 144] ] tagClassification['\lim3'] = [tagClassification['\lim3'][9]] ### os.remove('results.txt') report = open('results.txt', 'w') for character in tagClassification: #EachStroke fa referencia a com es reparteixen els grups en traces, segons la mitjana d'aquesta distribucio a la base de dades eachStroke = np.asarray([ int( sum([ tagClassification[character][i].tE[j] for i in range(len(tagClassification[character])) ]) / len(tagClassification[character])) for j in range(tagClassification[character][0].tE.shape[0]) ]) tagClassification[character] = spp.altArcLengthResampling( tagClassification[character], eachStroke) counta += 1 print character, ':', len(tagClassification[character]) #Totes les features del template d'una etiqueta es calculen com la seva mitjana en els simbols d'aquesta etiqueta average = np.zeros([len(tagClassification[character][0].Coord), 2], np.float64) for example in tagClassification[character]: average = np.array([[(average[i, 0] + example.Coord[i, 0]), (average[i, 1] + example.Coord[i, 1])] for i in range(example.Coord.shape[0])], np.float64) average = np.array([[ average[i, 0] / len(tagClassification[character]), average[i, 1] / len(tagClassification[character]) ] for i in range(example.Coord.shape[0])], np.float64) tagAverages[character] = nsi.taggedSymbol(average, eachStroke, character) tagAverages[character].computeFeatures() tagAverages[character].LP = [ np.nansum([ tagClassification[character][i].LP[j] for i in range(len(tagClassification[character])) ]) / len(tagClassification[character]) for j in range(len(tagClassification[character][0].LP)) ] tagAverages[character].accAngle = [ np.nansum([ tagClassification[character][i].accAngle[j] for i in range(len(tagClassification[character])) ]) / len(tagClassification[character]) for j in range(len(tagClassification[character][0].accAngle)) ] tagAverages[character].coG = [[ np.nansum([ tagClassification[character][i].coG[j][0] for i in range(len(tagClassification[character])) ]) / len(tagClassification[character]), np.nansum([ tagClassification[character][i].coG[j][1] for i in range(len(tagClassification[character])) ]) / len(tagClassification[character]) ] for j in range(len(tagClassification[character][0].coG))] tagAverages[character].liS = [ np.nansum([ tagClassification[character][i].liS[j] for i in range(len(tagClassification[character])) ]) / len(tagClassification[character]) for j in range(len(tagClassification[character][0].liS)) ] tagAverages[character].quadraticError = [ np.nansum([ tagClassification[character][i].quadraticError[j] for i in range(len(tagClassification[character])) ]) / len(tagClassification[character]) for j in range( len(tagClassification[character][0].quadraticError)) ] tagAverages[character].relStrokeLength = [ np.nansum([ tagClassification[character][i].relStrokeLength[j] for i in range(len(tagClassification[character])) ]) / len(tagClassification[character]) for j in range( len(tagClassification[character][0].relStrokeLength)) ] tagAverages[character].turningAngle = [ np.nansum([ tagClassification[character][i].turningAngle[j] for i in range(len(tagClassification[character])) ]) / len(tagClassification[character]) for j in range( len(tagClassification[character][0].turningAngle)) ] tagAverages[character].turningAngleDifference = [ np.nansum([ tagClassification[character][i].turningAngleDifference[j] for i in range(len(tagClassification[character])) ]) / len(tagClassification[character]) for j in range( len(tagClassification[character] [0].turningAngleDifference)) ] styles = ['horizontal', 'vertical', 'diagonal', 'closed'] tagAverages[character].Style = styles[np.argmax( [[ tagClassification[character][i].Style for i in range(len(tagClassification[character])) ].count('horizontal'), [ tagClassification[character][i].Style for i in range(len(tagClassification[character])) ].count('vertical'), [ tagClassification[character][i].Style for i in range(len(tagClassification[character])) ].count('diagonal'), [ tagClassification[character][i].Style for i in range(len(tagClassification[character])) ].count('closed')])] report.write('-----------------------------------------------\n') report.write(character + ' |\n') report.write('---------\n') for i in range(len(tagClassification[character])): report.write( str(tagClassification[character][i].tE) + ' :\n') for j in range(tagClassification[character][i].Coord.shape[0]): report.write( str(tagClassification[character][i].Coord[j]) + ', ') report.write('\n') report.write('average:\n ') for j in range(average.shape[0]): report.write(str(average[j]) + ', ') report.write('\n') #Guarda els resultats al sistema if os.path.isfile('varSimbdB.txt'): os.remove('varSimbdB.txt') f = open('varSimbdB.txt', 'wb') pickle.dump(symboldB, f) f.close() if os.path.isfile('varTagClass.txt'): os.remove('varTagClass.txt') f = open('varTagClass.txt', 'wb') pickle.dump(tagClassification, f) f.close() if os.path.isfile('varAverages.txt'): os.remove('varAverages.txt') f = open('varAverages.txt', 'wb') pickle.dump(tagAverages, f) f.close() report.close() plt.show()
dtEnd = sys.argv[2] txLocation = sys.argv[3] txRRule = sys.argv[4] temp = open("C:/wamp/www/mesa/python/temp1.json", "r") calendars = json.loads(temp.read()) temp.close() temp = open("C:/wamp/www/mesa/python/temp2.json", "r") blSettings = json.loads(temp.read()) temp.close() RRule = functions.parseRRule(txRRule) priorities = functions.parsePriorities(blSettings) originalEvent = classes.Event( "blevent", { "blEvent": { "start_time": dtStart.replace(" ", "T") + "Z", "end_time": dtEnd.replace(" ", "T") + "Z", "location": txLocation, "travel_time": 0, } }, ) modifiedMatrix = functions.construct_modified_matrix(calendars, blSettings, granularity) pointList = pointListGenerator.construct_point_list(modifiedMatrix, granularity, originalEvent, blSettings) costOutput = SClass.smallest_cost(pointList, priorities, originalEvent, granularity, txLocation, modifiedMatrix) print(costOutput)
def reAssign(symbols,tags): for sNum in range(len(symbols)): symbols[sNum].tagUntagged(tags[sNum],sNum) equals=[] geqs=[] leqs=[] twoDots=[] for inspected in symbols: if inspected.tag[:-1]=='-' or inspected.tag[:-1]=='\div': hasAbove=False hasBelow=False for s in symbols: if s.ref!=inspected.ref and s.center[0]>inspected.bBox[0] and s.center[0]<inspected.bBox[1]: if s.center[1]<inspected.center[1]: hasAbove=True other=s else: hasBelow=True other=s if (hasAbove and hasBelow==False) or (hasAbove==False and hasBelow): if (other.tag[:-1]=='-' or other.tag[:-1]=='\div') and [other.ref,inspected.ref] not in equals: equals.append([inspected.ref,other.ref]) elif other.tag[:-1]=='\gt' and [other.ref,inspected.ref] not in geqs: geqs.append([inspected.ref,other.ref]) elif other.tag[:-1]=='\lt' and [other.ref,inspected.ref] not in leqs: leqs.append([inspected.ref,other.ref]) elif inspected.tag[:-1]=='.': hasAbove=False hasBelow=False for s in symbols: if s.ref!=inspected.ref and s.center[0]>inspected.bBox[0] and s.center[0]<inspected.bBox[1] and s.tag[:-1]=='.': if s.center[1]<inspected.center[1]: hasAbove=True other=s else: hasBelow=True other=s if (hasAbove and hasBelow==False) or (hasAbove==False and hasBelow): if [other.ref,inspected.ref] not in twoDots: twoDots.append([inspected.ref,other.ref]) removed=[] for eq in equals: newPos=min(eq) removed.append(max(eq)) reduSim=[spp.arcLengthResampling([symbols[eq[0]]],25)[0].Coord,spp.arcLengthResampling([symbols[eq[1]]],25)[0].Coord] extSim=np.concatenate((reduSim[0],reduSim[1]),axis=0) newEq=nsi.taggedSymbol(extSim,np.array([24,49],np.float64),'=2') newEq.ref=newPos newEq.draw() del symbols[max(eq)] del symbols[min(eq)] symbols.insert(newPos,newEq) for gq in geqs: newPos=min(gq) removed.append(newPos) reduSim=[spp.arcLengthResampling([symbols[gq[0]]],25)[0].Coord,spp.arcLengthResampling([symbols[gq[1]]],25)[0].Coord] extSim=np.concatenate((reduSim[0],reduSim[1]),axis=0) newGq=nsi.taggedSymbol(extSim,np.array([24,49],np.float64),'\geq2') newGq.ref=newPos newGq.draw() del symbols[max(gq)] del symbols[min(gq)] symbols.insert(newPos,newGq) for lq in leqs: newPos=min(lq) removed.append(max(lq)) reduSim=[spp.arcLengthResampling([symbols[lq[0]]],25)[0].Coord,spp.arcLengthResampling([symbols[lq[1]]],25)[0].Coord] extSim=np.concatenate((reduSim[0],reduSim[1]),axis=0) newLq=nsi.taggedSymbol(extSim,np.array([24,49],np.float64),'\leq2') newLq.ref=newPos newLq.draw() del symbols[max(lq)] del symbols[min(lq)] symbols.insert(newPos,newLq) for tD in twoDots: newPos=min(tD) removed.append(max(tD)) reduSim=[spp.arcLengthResampling([symbols[tD[0]]],25)[0].Coord,spp.arcLengthResampling([symbols[tD[1]]],25)[0].Coord] extSim=np.concatenate((reduSim[0],reduSim[1]),axis=0) newtD=nsi.taggedSymbol(extSim,np.array([24,49],np.float64),':2') newtD.ref=newPos newtD.draw() del symbols[max(tD)] del symbols[min(tD)] symbols.insert(newPos,newtD) if len(removed)!=0: c=0 for sn in range(max([s.ref for s in symbols])+1): if sn in [s.ref for s in symbols]: symbols[[s.ref for s in symbols].index(sn)].ref-=c if sn in removed: c+=1 return symbols,[si.tag for si in symbols]
txRRule = sys.argv[4] temp = open("C:/wamp/www/mesa/python/temp1.json", "r") calendars = json.loads(temp.read()) temp.close() temp = open("C:/wamp/www/mesa/python/temp2.json", "r") blSettings = json.loads(temp.read()) temp.close() RRule = functions.parseRRule(txRRule) priorities = functions.parsePriorities(blSettings) originalEvent = classes.Event( "blevent", { "blEvent": { "start_time": dtStart.replace(" ", "T") + "Z", "end_time": dtEnd.replace(" ", "T") + "Z", "location": txLocation, "travel_time": 0 } }) modifiedMatrix = functions.construct_modified_matrix(calendars, blSettings, granularity) pointList = pointListGenerator.construct_point_list(modifiedMatrix, granularity, originalEvent, blSettings) costOutput = SClass.smallest_cost(pointList, priorities, originalEvent, granularity, txLocation, modifiedMatrix) print(costOutput)
#!/usr/bin/env python import SClass as nsi import pyStructural as stru import sys import ink2Traces import drawTraces import fileSeg import drawRegions import matplotlib.pyplot as plt #Script per provar pyStructural amb casos ben etiquetats test = 5 if test == 1: testCase = [ nsi.Symbol([0, 0], [0]), nsi.Symbol([0, 0], [0]), nsi.Symbol([0, 0], [0]), nsi.Symbol([0, 0], [0]), nsi.Symbol([0, 0], [0]), nsi.Symbol([0, 0], [0]), nsi.Symbol([0, 0], [0]), nsi.Symbol([0, 0], [0]), nsi.Symbol([0, 0], [0]), nsi.Symbol([0, 0], [0]), nsi.Symbol([0, 0], [0]), nsi.Symbol([0, 0], [0]), nsi.Symbol([0, 0], [0]), nsi.Symbol([0, 0], [0]), nsi.Symbol([0, 0], [0]), nsi.Symbol([0, 0], [0])
def templateGenerator(): symboldB,tagClassification=dB.readCROHMEdB(['trainData/CROHME_training','trainData/trainData_v2','trainData/TrainINKML']) option=3 counta=0 tagAverages={} #Opcio 1: S'adapten els simbols de la base de dades amb el minim nombre de traces trobat if option==1: for character in tagClassification: counta+=1 print character,':',len(tagClassification[character]) numStrokes=[len(tagClassification[character][i].tE) for i in range(len(tagClassification[character]))] nStrokesTemp=min(numStrokes) tagClassification[character]=spp.strokeReduction(tagClassification[character],nStrokesTemp,True) eachStroke=np.asarray([int(sum([tagClassification[character][i].tE[j] for i in range(len(tagClassification[character]))])/len(tagClassification[character])) for j in range(nStrokesTemp)]) tagClassification[character]=spp.altArcLengthResampling(tagClassification[character],eachStroke) average=np.zeros([len(tagClassification[character][0].Coord),2],np.float64) for example in tagClassification[character]: average=np.array([[(average[i,0]+example.Coord[i,0]),(average[i,1]+example.Coord[i,1])] for i in range(example.Coord.shape[0])],np.float64) average=np.array([[average[i,0]/len(tagClassification[character]),average[i,1]/len(tagClassification[character])] for i in range(example.Coord.shape[0])],np.float64) tagAverages[character]=average plt.figure(counta) for j in range(nStrokesTemp): if j==0: ini=-1 else: ini=int(eachStroke[j-1]) plt.plot(average[range(ini+1,int(eachStroke[j])+1),0],-average[range(ini+1,int(eachStroke[j])+1),1],'r') #Opcio 2: S'adapten els simbols de la base de dades amb el maxim nombre de traces trobat elif option==2: for character in tagClassification: counta+=1 print character,':',len(tagClassification[character]) numStrokes=[len(tagClassification[character][i].tE) for i in range(len(tagClassification[character]))] nStrokesTemp=int(round(sum(numStrokes)/float(len(numStrokes)))) tagClassification[character]=spp.strokeReduction(tagClassification[character],nStrokesTemp,True) eachStroke=np.asarray([int(sum([tagClassification[character][i].tE[j] for i in range(len(tagClassification[character]))])/len(tagClassification[character])) for j in range(nStrokesTemp)]) tagClassification[character]=spp.altArcLengthResampling(tagClassification[character],eachStroke) average=np.zeros([len(tagClassification[character][0].Coord),2],np.float64) for example in tagClassification[character]: average=np.array([[(average[i,0]+example.Coord[i,0]),(average[i,1]+example.Coord[i,1])] for i in range(example.Coord.shape[0])],np.float64) average=np.array([[average[i,0]/len(tagClassification[character]),average[i,1]/len(tagClassification[character])] for i in range(example.Coord.shape[0])],np.float64) tagAverages[character]=average #Opcio 3: Es separen els simbols segons el seu numero de traces elif option==3: charList=[character for character in tagClassification] for charInd in range(len(charList)): numStrokes=[len(tagClassification[charList[charInd]][i].tE) for i in range(len(tagClassification[charList[charInd]]))] c=0 typesByN=[] for n in numStrokes: if n not in typesByN: typesByN.append(n) tagClassification[charList[charInd]+str(n)]=[] tagClassification[charList[charInd]+str(n)].append(tagClassification[charList[charInd]][c]) c+=1 del tagClassification[charList[charInd]] print tagClassification['-1'][0].LP #Soroll de la dB del tagClassification['\exists2'][1] del tagClassification['\pi2'] del tagClassification['\\'+'forall3'] del tagClassification['Y2'] del tagClassification['x2'] del tagClassification['Y3'] tagClassification['k1']=[tagClassification['k1'][valid] for valid in [0,1,2,3,8,12,20,24,29,30,34,35,42,43,58,59,61]] tagClassification['\sum2']=[tagClassification['\sum2'][valid] for valid in [1,5,13,18,24,25,28,32,35,37,38,45,61]] tagClassification[']2']=[tagClassification[']2'][valid] for valid in [3,4]] tagClassification['[2']=[tagClassification['[2'][valid] for valid in [0,8]] tagClassification['\\'+'theta2']=[tagClassification['\\'+'theta2'][valid] for valid in [1,2,5,7,9,12,13,28,29,31,32,34,37,39,40,41,52,61,65,66,67,69]] tagClassification['\div3']=[tagClassification['\div3'][valid] for valid in [10,24,38]] tagClassification['\\'+'tan3']=[tagClassification['\\'+'tan3'][valid] for valid in [0,4,5,16,22,34,41,45,50,53,72,86,144]] tagClassification['\lim3']=[tagClassification['\lim3'][9]] ### os.remove('results.txt') report=open('results.txt','w') for character in tagClassification: #EachStroke fa referencia a com es reparteixen els grups en traces, segons la mitjana d'aquesta distribucio a la base de dades eachStroke=np.asarray([int(sum([tagClassification[character][i].tE[j] for i in range(len(tagClassification[character]))])/len(tagClassification[character])) for j in range(tagClassification[character][0].tE.shape[0])]) tagClassification[character]=spp.altArcLengthResampling(tagClassification[character],eachStroke) counta+=1 print character,':',len(tagClassification[character]) #Totes les features del template d'una etiqueta es calculen com la seva mitjana en els simbols d'aquesta etiqueta average=np.zeros([len(tagClassification[character][0].Coord),2],np.float64) for example in tagClassification[character]: average=np.array([[(average[i,0]+example.Coord[i,0]),(average[i,1]+example.Coord[i,1])] for i in range(example.Coord.shape[0])],np.float64) average=np.array([[average[i,0]/len(tagClassification[character]),average[i,1]/len(tagClassification[character])] for i in range(example.Coord.shape[0])],np.float64) tagAverages[character]=nsi.taggedSymbol(average,eachStroke,character) tagAverages[character].computeFeatures() tagAverages[character].LP=[np.nansum([tagClassification[character][i].LP[j] for i in range(len(tagClassification[character]))])/len(tagClassification[character]) for j in range(len(tagClassification[character][0].LP))] tagAverages[character].accAngle=[np.nansum([tagClassification[character][i].accAngle[j] for i in range(len(tagClassification[character]))])/len(tagClassification[character]) for j in range(len(tagClassification[character][0].accAngle))] tagAverages[character].coG=[[np.nansum([tagClassification[character][i].coG[j][0] for i in range(len(tagClassification[character]))])/len(tagClassification[character]),np.nansum([tagClassification[character][i].coG[j][1] for i in range(len(tagClassification[character]))])/len(tagClassification[character])] for j in range(len(tagClassification[character][0].coG))] tagAverages[character].liS=[np.nansum([tagClassification[character][i].liS[j] for i in range(len(tagClassification[character]))])/len(tagClassification[character]) for j in range(len(tagClassification[character][0].liS))] tagAverages[character].quadraticError=[np.nansum([tagClassification[character][i].quadraticError[j] for i in range(len(tagClassification[character]))])/len(tagClassification[character]) for j in range(len(tagClassification[character][0].quadraticError))] tagAverages[character].relStrokeLength=[np.nansum([tagClassification[character][i].relStrokeLength[j] for i in range(len(tagClassification[character]))])/len(tagClassification[character]) for j in range(len(tagClassification[character][0].relStrokeLength))] tagAverages[character].turningAngle=[np.nansum([tagClassification[character][i].turningAngle[j] for i in range(len(tagClassification[character]))])/len(tagClassification[character]) for j in range(len(tagClassification[character][0].turningAngle))] tagAverages[character].turningAngleDifference=[np.nansum([tagClassification[character][i].turningAngleDifference[j] for i in range(len(tagClassification[character]))])/len(tagClassification[character]) for j in range(len(tagClassification[character][0].turningAngleDifference))] styles=['horizontal','vertical','diagonal','closed'] tagAverages[character].Style=styles[np.argmax([[tagClassification[character][i].Style for i in range(len(tagClassification[character]))].count('horizontal'),[tagClassification[character][i].Style for i in range(len(tagClassification[character]))].count('vertical'),[tagClassification[character][i].Style for i in range(len(tagClassification[character]))].count('diagonal'),[tagClassification[character][i].Style for i in range(len(tagClassification[character]))].count('closed')])] report.write('-----------------------------------------------\n') report.write(character+' |\n') report.write('---------\n') for i in range(len(tagClassification[character])): report.write(str(tagClassification[character][i].tE)+' :\n') for j in range(tagClassification[character][i].Coord.shape[0]): report.write(str(tagClassification[character][i].Coord[j])+', ') report.write('\n') report.write('average:\n ') for j in range(average.shape[0]): report.write(str(average[j])+', ') report.write('\n') #Guarda els resultats al sistema if os.path.isfile('varSimbdB.txt'): os.remove('varSimbdB.txt') f = open('varSimbdB.txt','wb') pickle.dump(symboldB,f) f.close() if os.path.isfile('varTagClass.txt'): os.remove('varTagClass.txt') f = open('varTagClass.txt','wb') pickle.dump(tagClassification,f) f.close() if os.path.isfile('varAverages.txt'): os.remove('varAverages.txt') f = open('varAverages.txt','wb') pickle.dump(tagAverages,f) f.close() report.close() plt.show()