Ejemplo n.º 1
0
	def report(self):
		""" Print all parses and their violations in a structured format. """
		
		from Meter import Meter
		meter=Meter('default')
		if (hasattr(self,'allParses')):
			self.om(str(self))
			for parseList in self.allParses():
				self.om("\n"+meter.printParses(parseList),conscious=False)
		else:
			for child in self.children:
				if type(child)==type([]): continue
				child.report()
Ejemplo n.º 2
0
def get_meter(session):
    if 'meter' in session and session['meter']:
        md = session['meter']
        from Meter import Meter
        return Meter(md)
    else:
        return None
Ejemplo n.º 3
0
def loadMeters(meter_dir, config={}):

    # get all meters from a path
    import os, imp
    module_d = {}
    for fn in os.listdir(meter_dir):
        if not fn.endswith('.py') or fn.startswith('_'): continue
        idx = fn.replace('.py', '').replace('-', '_')
        module_d[idx] = imp.load_source(idx, os.path.join(meter_dir, fn))

    from Meter import Meter

    d = {}
    for name, module in list(module_d.items()):
        #d[name]=loadConfigPy(toprint=False,config=module)
        #d[name]['id']=name
        mconfig = {}
        for k, v in config.items():
            mconfig[k] = v
        for k, v in loadConfigPy(toprint=False, config=module).items():
            mconfig[k] = v

        mconfig['id'] = name
        mobj = Meter(config=mconfig)
        d[name] = mobj

    return d
Ejemplo n.º 4
0
def loadMeters():
	import meters
	from Meter import Meter
	d={}
	for name,module in list(meters.d.items()):
		#d[name]=loadConfigPy(toprint=False,config=module)
		#d[name]['id']=name
		mconfig = loadConfigPy(toprint=False,config=module)
		mconfig['id']=name
		mobj = Meter(config=mconfig)
		d[name]=mobj
	return d
Ejemplo n.º 5
0
def createParametersPanel(manager):
    global systemPanel, menuHandles, isTemporal
    systemPanel = SystemPanel(changeChartCI, isTemporal)
    global summPanel
    summPanel = SummPanel(isTemporal)
    global empiricalPanel, isDominant
    empiricalPanel = EmpiricalPanel(menuHandles, manager, isDominant,
            systemPanel, isTemporal)
    global simsDonePanel
    simsDonePanel = Meter(300, 120, 10)
    panel = JPanel()
    panel.setBorder(LineBorder(Color.GREEN))
    panel.add(systemPanel)
    panel.add(empiricalPanel)
    panel.add(summPanel)
    panel.add(simsDonePanel)
    return panel
Ejemplo n.º 6
0
    def get_meter(self, meter=None):
        if not meter:
            if self.meter:
                meter = self.meter
            elif hasattr(self, '_Text__bestparses') and self.__bestparses:
                return self.get_meter(sorted(self.__bestparses.keys())[0])
            else:
                import Meter
                meter = Meter.genDefault()
                #print '>> no meter specified. defaulting to this meter:'
                #print meter
        elif type(meter) in [str, unicode]:
            meter = self.config['meters'][meter]
        else:
            pass

        return meter
Ejemplo n.º 7
0
def loadMeters(meter_dir_root,meter_dir_home):
	d={}
	import imp
	for meter_dir in [meter_dir_root,meter_dir_home]:
		if os.path.exists(meter_dir) and os.path.isdir(meter_dir):
			for fn in os.listdir(meter_dir):
				if not fn.endswith('.py') or fn.startswith('_'): continue
				idx=fn.replace('.py','').replace('-','_')
				d[idx]=imp.load_source(idx, os.path.join(meter_dir,fn))

	from Meter import Meter
	for name,module in d.items():
		mconfig = loadConfigPy(toprint=False,config=module)
		mconfig['id']=name
		mobj = Meter(config=mconfig)
		d[name]=mobj
	return d
Ejemplo n.º 8
0
	def get_meter(self,meter=None):
		if not meter:
			if self.meter:
				meter=self.meter
			elif hasattr(self,'_Text__bestparses') and self.__bestparses:
				return self.get_meter(sorted(self.__bestparses.keys())[0])
			else:
				import Meter
				meter=Meter.genDefault()
				#print '>> no meter specified. defaulting to this meter:'
				#print meter
		elif type(meter) in [str,unicode]:
			meter= self.config['meters'][meter]
		else:
			pass

		return meter
Ejemplo n.º 9
0
	def report(self,meter=None,include_bounded=False,reverse=True):
		""" Print all parses and their violations in a structured format. """

		ReportStr = ''
		if not meter:
			from Meter import Meter
			meter=Meter.genDefault()
		if (hasattr(self,'allParses')):
			self.om(unicode(self))
			allparses=self.allParses(meter=meter,include_bounded=include_bounded)
			numallparses=len(allparses)
			#allparses = reversed(allparses) if reverse else allparses
			for pi,parseList in enumerate(allparses):
				line=self.iparse2line(pi).txt
				#parseList.sort(key = lambda P: P.score())
				hdr="\n\n"+'='*30+'\n[line #'+str(pi+1)+' of '+str(numallparses)+']: '+line+'\n\n\t'
				ftr='='*30+'\n'
				ReportStr+=self.om(hdr+meter.printParses(parseList,reverse=reverse).replace('\n','\n\t')[:-1]+ftr,conscious=False)
		else:
			for child in self.children:
				if type(child)==type([]): continue
				ReportStr+=child.report()

		return ReportStr
Ejemplo n.º 10
0
def assess(fn,meter=None,key_meterscheme=None, key_line='line',key_parse='parse'):
	#from prosodic import Text
	import prosodic as p
	Text=p.Text
	if not meter:
		import Meter
		meter=Meter.genDefault()

	p.config['print_to_screen']=0

	def parse2list(parse):
		l=[]
		for i,x in enumerate(parse):
			if not l or l[-1]!=x:
				l+=[x]
			else:
				l[-1]+=x
		return l

	def get_num_sylls_correct(parse_human,parse_comp):
		maxlen=max([len(parse_comp),len(parse_human)])
		#parse_human=parse2list(parse_human)
		#parse_comp=parse2list(parse_comp)
		#parse_comp_forzip = parse_comp + ['x' for x in range(maxlen-len(parse_comp))]
		#parse_human_forzip = parse_human + ['x' for x in range(maxlen-len(parse_human))]
		parse_comp_forzip = parse_comp + ''.join(['x' for x in range(maxlen-len(parse_comp))])
		parse_human_forzip = parse_human + ''.join(['x' for x in range(maxlen-len(parse_human))])

		## sylls correct?
		_sylls_iscorrect=[]
		#print '\t'.join(parse_human_forzip)
		#print '\t'.join(parse_comp_forzip)
		for syll1,syll2 in zip(parse_human_forzip,parse_comp_forzip):
			syll_iscorrect = int(syll1==syll2)
			_sylls_iscorrect+=[syll_iscorrect]
		return _sylls_iscorrect

	import codecs
	ld=read_ld(fn)
	fn_split = fn.split('.')
	ofn_split=fn_split[:-1] + ['evaluated','meter='+meter.id] + [fn_split[-1]]
	ofn_split_ot=fn_split[:-1] + ['evaluated', 'ot','meter='+meter.id] + [fn_split[-1]]
	ofn='.'.join(ofn_split)
	ofn_ot='.'.join(ofn_split_ot)

	def _print(dx):
		print
		for k,v in sorted(dx.items()):
			print k,'\t',v
		print 'HUMAN   :','\t'.join(dx['parse_human'])
		print 'PROSODIC:','\t'.join(dx['parse_comp'])
		print '         ','\t'.join(['*' if x!=y else ' ' for x,y in zip(dx['parse_human'],dx['parse_comp'])])
		print

	def _recapitalize(parse,code):
		code=' '.join([x for x in code])
		parse=parse.replace('|',' ').replace('.',' ')
		newparse=[]
		for s,c in zip(parse.split(),code.split()):
			if c=='w':
				newparse+=[s.lower()]
			else:
				newparse+=[s.upper()]
		return '  '.join(newparse)
	
	def _writegen():
		lines_iscorrect=[]
		lines_iscorrect_control=[]
		lines_iscorrect_control2=[]
		lines_iscorrect_human2=[]
		sylls_iscorrect_control=[]
		sylls_iscorrect_control2=[]
		sylls_iscorrect_human2=[]
		sylls_iscorrect=[]
		lines_iscorrect_nonbounded=[]

		otf=open(ofn_ot,'w')
		otf_nl=0

		for di,d in enumerate(ld):
			line=d[key_line]
			parse_human=''.join([x for x in d[key_parse].lower() if x in ['s','w']])
			if not parse_human: continue
			t=Text(line)
			t.parse(meter=meter)
			#if not t.isParsed: continue

			parse_comp=t.parse_str(viols=False, text=False).replace('|','')

			#if len(parse_comp) != len(parse_human): continue

			parse_str=t.parse_str(viols=False, text=True)
			parses_comp = [x.replace('|','') for x in t.parse_strs(viols=False,text=False)]

			parse_human2=''.join([x for x in d.get('parse_human2','').lower() if x in ['s','w']])

			#parse_human,parse_human2=parse_human2,parse_human

			### OT
			if not otf_nl:
				header=['','','']
				for c in meter.constraints: header+=['[*'+c.name+']']
				otf.write('\t'.join(header)+'\n')
			
			humans = [parse_human]
			if parse_human2: humans+=[parse_human2]
			for _i,_parses in enumerate(t.allParses()):
				if not _parses: continue
				_parses.sort(key=lambda _P: (-humans.count(_P.str_meter()), _P.totalCount))
				if not humans.count(_parses[0].str_meter()):
					# is the good parse in the bounded ones?
					for _bndp in t.boundParses()[_i]:
						if _bndp.str_meter() in humans:
							_parses.insert(0,_bndp)

				for _pi,_parse in enumerate(_parses):
					otf_nl+=1
					code=_parse.str_meter()
					row=[line.encode('utf-8',errors='ignore') if not _pi else '', str(_parse) + (' [*Bounded]' if _parse.isBounded else ''), str(humans.count(code)) if code in humans else '']
					for c in meter.constraints: row+=[str(_parse.constraintCounts[c]) if _parse.constraintCounts[c] else '']
					otf.write('\t'.join(row)+'\n')



			
			parse_comp_dummy2 = ''.join(['w' if not i%2 else 's' for i in range(len(parse_comp))])
			if key_meterscheme:
				if d[key_meterscheme]=='iambic':
					parse_comp_dummy = ('ws'*100)[:len(parse_comp)]
				elif d[key_meterscheme]=='trochaic':
					parse_comp_dummy = ('sw'*100)[:len(parse_comp)]
				elif d[key_meterscheme]=='anapestic':
					parse_comp_dummy = ('wws'*100)[:len(parse_comp)]
				elif d[key_meterscheme]=='dactylic':
					parse_comp_dummy = ('sww'*100)[:len(parse_comp)]
				else:
					parse_comp_dummy=parse_comp_dummy2
			else:
				parse_comp_dummy=parse_comp_dummy2


			
			## sylls correct?
			this_sylls_correct = get_num_sylls_correct(parse_human, parse_comp)
			this_sylls_correct_dummy = get_num_sylls_correct(parse_human, parse_comp_dummy)
			this_sylls_correct_dummy2 = get_num_sylls_correct(parse_human, parse_comp_dummy2)
			if parse_human2: this_sylls_correct_human2 = get_num_sylls_correct(parse_human, parse_human2)
			num_sylls_correct=sum(this_sylls_correct)
			num_sylls_correct_dummy = sum(this_sylls_correct_dummy)
			num_sylls_correct_dummy2 = sum(this_sylls_correct_dummy2)
			if parse_human2: num_sylls_correct_human2 = sum(this_sylls_correct_human2)
			sylls_iscorrect+=this_sylls_correct
			sylls_iscorrect_control+=this_sylls_correct_dummy
			sylls_iscorrect_control2+=this_sylls_correct_dummy2
			if parse_human2: sylls_iscorrect_human2+=this_sylls_correct_human2


			# line correct?
			line_iscorrect=int(parse_comp == parse_human)
			lines_iscorrect+=[line_iscorrect]
			line_iscorrect_dummy = int(parse_comp_dummy == parse_human)
			line_iscorrect_dummy2 = int(parse_comp_dummy2 == parse_human)
			if parse_human2: line_iscorrect_human2 = int(parse_human2 == parse_human)
			lines_iscorrect_control+=[line_iscorrect_dummy]
			lines_iscorrect_control2+=[line_iscorrect_dummy2]
			if parse_human2: lines_iscorrect_human2+=[line_iscorrect_human2]

			# line at least in list of nonbounded parses?
			line_iscorrect_nonbounded=int(parse_human in parses_comp)
			lines_iscorrect_nonbounded+=[line_iscorrect_nonbounded]

			parse_stress = []
			for w in t.words():
				for x in w.stress:
					parse_stress += ['w' if x=='U' else 's']
			parse_stress=''.join(parse_stress)
			

			odx=d
			odx['parse_human']=parse_human
			if parse_human2: odx['parse_human2']=parse_human2
			odx['parse_comp']=parse_comp
			odx['parses_comp_nonbounded']=' | '.join(parses_comp)
			odx['num_sylls']=len(parse_human)
			odx['num_sylls_correct']=num_sylls_correct
			odx['num_sylls_correct_control']=num_sylls_correct_dummy
			odx['num_sylls_correct_control_iambic']=num_sylls_correct_dummy2
			if parse_human2:
				odx['num_sylls_correct_human2']=num_sylls_correct_human2
				odx['perc_sylls_correct_human2']=num_sylls_correct_human2 / float(len(parse_human))
				odx['line_iscorrect_human2']=line_iscorrect_human2
			odx['perc_sylls_correct']=num_sylls_correct / float(len(parse_human))
			odx['perc_sylls_correct_control']=num_sylls_correct_dummy  / float(len(parse_human))
			odx['perc_sylls_correct_control_iambic']=num_sylls_correct_dummy2 / float(len(parse_human))
			odx['line_iscorrect']=line_iscorrect
			odx['line_iscorrect_dummy']=line_iscorrect_dummy
			odx['line_iscorrect_dummy_iambic']=line_iscorrect_dummy2
			odx['line_is_in_nonbounded_parses']=line_iscorrect_nonbounded
			odx['parse_str_human']=_recapitalize(parse_str, parse_human)
			odx['parse_str_compu']=_recapitalize(parse_str, parse_comp)
			odx['parse_str_stress']=_recapitalize(parse_str, parse_stress)
			odx['prosody_ipa']=' '.join([w.str_ipasyllstress() for w in t.words()])
			odx['prosody_stress']=' '.join([w.stress for w in t.words()])
			odx['meter_info']=str(t.meter).replace('\n',' ').replace('\t',' ')
			sumconstr=0
			for k,v in t.constraintViolations(use_weights=False,normalize=False).items():
				odx['constraint_'+k]=v
				sumconstr+=v
			odx['constraint_SUM_VIOL']=sumconstr

			#if not line_iscorrect and line_iscorrect_dummy:
			#if len(parse_comp) != len(parse_human):
			#if len(parse_human)>len(parse_comp):
			_print(odx)
			yield odx

		print
		print '##'*10
		print 'RESULTS SUMMARY'
		print '##'*10
		perc_sylls_correct = sum(sylls_iscorrect) / float(len(sylls_iscorrect)) * 100
		perc_lines_correct = sum(lines_iscorrect) / float(len(lines_iscorrect)) * 100
		perc_lines_correct_control = sum(lines_iscorrect_control) / float(len(lines_iscorrect_control)) * 100
		perc_sylls_correct_control = sum(sylls_iscorrect_control) / float(len(sylls_iscorrect_control)) * 100
		perc_lines_correct_nonbound = sum(lines_iscorrect_nonbounded) / float(len(lines_iscorrect_nonbounded)) * 100
		print 'PERCENT SYLLABLES CORRECT:',round(perc_sylls_correct,2),'% [vs.',round(perc_sylls_correct_control,2),'% for control]'
		print 'PERCENT LINES CORRECT:',round(perc_lines_correct,2),'% [vs.',round(perc_lines_correct_control,2),'% for control]'
		print 'PERCENT LINES IN AVAILABLE NONBOUNDED PARSES:',round(perc_lines_correct_nonbound,2),'%'
	
	writegen(ofn, _writegen)
Ejemplo n.º 11
0
def assess(fn,meter=None,key_meterscheme=None, key_line='line',key_parse='parse'):
	#from prosodic import Text
	import prosodic as p
	Text=p.Text
	if not meter:
		import Meter
		meter=Meter.genDefault()

	p.config['print_to_screen']=0

	def parse2list(parse):
		l=[]
		for i,x in enumerate(parse):
			if not l or l[-1]!=x:
				l+=[x]
			else:
				l[-1]+=x
		return l

	def get_num_sylls_correct(parse_human,parse_comp):
		maxlen=max([len(parse_comp),len(parse_human)])
		#parse_human=parse2list(parse_human)
		#parse_comp=parse2list(parse_comp)
		#parse_comp_forzip = parse_comp + ['x' for x in range(maxlen-len(parse_comp))]
		#parse_human_forzip = parse_human + ['x' for x in range(maxlen-len(parse_human))]
		parse_comp_forzip = parse_comp + ''.join(['x' for x in range(maxlen-len(parse_comp))])
		parse_human_forzip = parse_human + ''.join(['x' for x in range(maxlen-len(parse_human))])

		## sylls correct?
		_sylls_iscorrect=[]
		#print '\t'.join(parse_human_forzip)
		#print '\t'.join(parse_comp_forzip)
		for syll1,syll2 in zip(parse_human_forzip,parse_comp_forzip):
			syll_iscorrect = int(syll1==syll2)
			_sylls_iscorrect+=[syll_iscorrect]
		return _sylls_iscorrect

	import codecs
	ld=read_ld(fn)
	fn_split = fn.split('.')
	ofn_split=fn_split[:-1] + ['evaluated','meter='+meter.id] + [fn_split[-1]]
	ofn_split_ot=fn_split[:-1] + ['evaluated', 'ot','meter='+meter.id] + [fn_split[-1]]
	ofn='.'.join(ofn_split)
	ofn_ot='.'.join(ofn_split_ot)

	def _print(dx):
		print()
		for k,v in sorted(dx.items()):
			print(k,'\t',v)
		print('HUMAN   :','\t'.join(dx['parse_human']))
		print('PROSODIC:','\t'.join(dx['parse_comp']))
		print('         ','\t'.join(['*' if x!=y else ' ' for x,y in zip(dx['parse_human'],dx['parse_comp'])]))
		print()

	def _recapitalize(parse,code):
		code=' '.join([x for x in code])
		parse=parse.replace('|',' ').replace('.',' ')
		newparse=[]
		for s,c in zip(parse.split(),code.split()):
			if c=='w':
				newparse+=[s.lower()]
			else:
				newparse+=[s.upper()]
		return '  '.join(newparse)

	def _writegen():
		lines_iscorrect=[]
		lines_iscorrect_control=[]
		lines_iscorrect_control2=[]
		lines_iscorrect_human2=[]
		sylls_iscorrect_control=[]
		sylls_iscorrect_control2=[]
		sylls_iscorrect_human2=[]
		sylls_iscorrect=[]
		lines_iscorrect_nonbounded=[]

		otf=open(ofn_ot,'w')
		otf_nl=0

		for di,d in enumerate(ld):
			line=d[key_line]
			parse_human=''.join([x for x in d[key_parse].lower() if x in ['s','w']])
			if not parse_human: continue
			t=Text(line)
			t.parse(meter=meter)
			#if not t.isParsed: continue

			parse_comp=t.parse_str(viols=False, text=False).replace('|','')

			#if len(parse_comp) != len(parse_human): continue

			parse_str=t.parse_str(viols=False, text=True)
			parses_comp = [x.replace('|','') for x in t.parse_strs(viols=False,text=False)]

			parse_human2=''.join([x for x in d.get('parse_human2','').lower() if x in ['s','w']])

			#parse_human,parse_human2=parse_human2,parse_human

			### OT
			if not otf_nl:
				header=['','','']
				for c in meter.constraints: header+=['[*'+c.name+']']
				otf.write('\t'.join(header)+'\n')

			humans = [parse_human]
			if parse_human2: humans+=[parse_human2]
			for _i,_parses in enumerate(t.allParses()):
				if not _parses: continue
				_parses.sort(key=lambda _P: (-humans.count(_P.str_meter()), _P.totalCount))
				if not humans.count(_parses[0].str_meter()):
					# is the good parse in the bounded ones?
					for _bndp in t.boundParses()[_i]:
						if _bndp.str_meter() in humans:
							_parses.insert(0,_bndp)

				for _pi,_parse in enumerate(_parses):
					otf_nl+=1
					code=_parse.str_meter()
					row=[line.encode('utf-8',errors='ignore') if not _pi else '', str(_parse) + (' [*Bounded]' if _parse.isBounded else ''), str(humans.count(code)) if code in humans else '']
					for c in meter.constraints: row+=[str(_parse.constraintCounts[c]) if _parse.constraintCounts[c] else '']
					otf.write('\t'.join(row)+'\n')




			parse_comp_dummy2 = ''.join(['w' if not i%2 else 's' for i in range(len(parse_comp))])
			if key_meterscheme:
				if d[key_meterscheme]=='iambic':
					parse_comp_dummy = ('ws'*100)[:len(parse_comp)]
				elif d[key_meterscheme]=='trochaic':
					parse_comp_dummy = ('sw'*100)[:len(parse_comp)]
				elif d[key_meterscheme]=='anapestic':
					parse_comp_dummy = ('wws'*100)[:len(parse_comp)]
				elif d[key_meterscheme]=='dactylic':
					parse_comp_dummy = ('sww'*100)[:len(parse_comp)]
				else:
					parse_comp_dummy=parse_comp_dummy2
			else:
				parse_comp_dummy=parse_comp_dummy2



			## sylls correct?
			this_sylls_correct = get_num_sylls_correct(parse_human, parse_comp)
			this_sylls_correct_dummy = get_num_sylls_correct(parse_human, parse_comp_dummy)
			this_sylls_correct_dummy2 = get_num_sylls_correct(parse_human, parse_comp_dummy2)
			if parse_human2: this_sylls_correct_human2 = get_num_sylls_correct(parse_human, parse_human2)
			num_sylls_correct=sum(this_sylls_correct)
			num_sylls_correct_dummy = sum(this_sylls_correct_dummy)
			num_sylls_correct_dummy2 = sum(this_sylls_correct_dummy2)
			if parse_human2: num_sylls_correct_human2 = sum(this_sylls_correct_human2)
			sylls_iscorrect+=this_sylls_correct
			sylls_iscorrect_control+=this_sylls_correct_dummy
			sylls_iscorrect_control2+=this_sylls_correct_dummy2
			if parse_human2: sylls_iscorrect_human2+=this_sylls_correct_human2


			# line correct?
			line_iscorrect=int(parse_comp == parse_human)
			lines_iscorrect+=[line_iscorrect]
			line_iscorrect_dummy = int(parse_comp_dummy == parse_human)
			line_iscorrect_dummy2 = int(parse_comp_dummy2 == parse_human)
			if parse_human2: line_iscorrect_human2 = int(parse_human2 == parse_human)
			lines_iscorrect_control+=[line_iscorrect_dummy]
			lines_iscorrect_control2+=[line_iscorrect_dummy2]
			if parse_human2: lines_iscorrect_human2+=[line_iscorrect_human2]

			# line at least in list of nonbounded parses?
			line_iscorrect_nonbounded=int(parse_human in parses_comp)
			lines_iscorrect_nonbounded+=[line_iscorrect_nonbounded]

			parse_stress = []
			for w in t.words():
				for x in w.stress:
					parse_stress += ['w' if x=='U' else 's']
			parse_stress=''.join(parse_stress)


			odx=d
			odx['parse_human']=parse_human
			if parse_human2: odx['parse_human2']=parse_human2
			odx['parse_comp']=parse_comp
			odx['parses_comp_nonbounded']=' | '.join(parses_comp)
			odx['num_sylls']=len(parse_human)
			odx['num_sylls_correct']=num_sylls_correct
			odx['num_sylls_correct_control']=num_sylls_correct_dummy
			odx['num_sylls_correct_control_iambic']=num_sylls_correct_dummy2
			if parse_human2:
				odx['num_sylls_correct_human2']=num_sylls_correct_human2
				odx['perc_sylls_correct_human2']=num_sylls_correct_human2 / float(len(parse_human))
				odx['line_iscorrect_human2']=line_iscorrect_human2
			odx['perc_sylls_correct']=num_sylls_correct / float(len(parse_human))
			odx['perc_sylls_correct_control']=num_sylls_correct_dummy  / float(len(parse_human))
			odx['perc_sylls_correct_control_iambic']=num_sylls_correct_dummy2 / float(len(parse_human))
			odx['line_iscorrect']=line_iscorrect
			odx['line_iscorrect_dummy']=line_iscorrect_dummy
			odx['line_iscorrect_dummy_iambic']=line_iscorrect_dummy2
			odx['line_is_in_nonbounded_parses']=line_iscorrect_nonbounded
			odx['parse_str_human']=_recapitalize(parse_str, parse_human)
			odx['parse_str_compu']=_recapitalize(parse_str, parse_comp)
			odx['parse_str_stress']=_recapitalize(parse_str, parse_stress)
			odx['prosody_ipa']=' '.join([w.str_ipasyllstress() for w in t.words()])
			odx['prosody_stress']=' '.join([w.stress for w in t.words()])
			odx['meter_info']=str(t.meter).replace('\n',' ').replace('\t',' ')
			sumconstr=0
			for k,v in list(t.constraintViolations(use_weights=False,normalize=False).items()):
				odx['constraint_'+k]=v
				sumconstr+=v
			odx['constraint_SUM_VIOL']=sumconstr

			#if not line_iscorrect and line_iscorrect_dummy:
			#if len(parse_comp) != len(parse_human):
			#if len(parse_human)>len(parse_comp):
			_print(odx)
			yield odx

		print()
		print('##'*10)
		print('RESULTS SUMMARY')
		print('##'*10)
		perc_sylls_correct = sum(sylls_iscorrect) / float(len(sylls_iscorrect)) * 100
		perc_lines_correct = sum(lines_iscorrect) / float(len(lines_iscorrect)) * 100
		perc_lines_correct_control = sum(lines_iscorrect_control) / float(len(lines_iscorrect_control)) * 100
		perc_sylls_correct_control = sum(sylls_iscorrect_control) / float(len(sylls_iscorrect_control)) * 100
		perc_lines_correct_nonbound = sum(lines_iscorrect_nonbounded) / float(len(lines_iscorrect_nonbounded)) * 100
		print('PERCENT SYLLABLES CORRECT:',round(perc_sylls_correct,2),'% [vs.',round(perc_sylls_correct_control,2),'% for control]')
		print('PERCENT LINES CORRECT:',round(perc_lines_correct,2),'% [vs.',round(perc_lines_correct_control,2),'% for control]')
		print('PERCENT LINES IN AVAILABLE NONBOUNDED PARSES:',round(perc_lines_correct_nonbound,2),'%')

	writegen(ofn, _writegen)
Ejemplo n.º 12
0
	def _scansion_prepare(self):
		from Meter import Meter
		meter=Meter(config['constraints'].split(),(config['maxS'],config['maxW']),self.findattr('dict'))
		ckeys="\t".join(sorted([str(x) for x in meter.constraints]))
		self.om("\t".join([makeminlength(str("text"),being.linelen),makeminlength(str("parse"),being.linelen),"#pars","#viol","meter",ckeys]))
Ejemplo n.º 13
0
import threading, time, datetime
from Meter import Meter
from PV_simulator import PV_simulator

# Get time frame from user
timeNow = datetime.datetime.now().time()
print("The time is now: " + str(timeNow) +
      "\nWhat time of day do you want to stop streaming from the Meter?" +
      "\n\tInput Format (HH:MM:SS):\n\t\tExample = 23:59:59 ")
timeToStop = raw_input("Enter stop time: ")
format = '%H:%M:%S'
timeToStop = datetime.datetime.strptime(timeToStop, format)
# Initialize, append and start threads
threads = []
PV_thread = PV_simulator()
meter_thread = Meter()
threads.append(meter_thread)
threads.append(PV_thread)
PV_thread.start()
meter_thread.start()
# Run loop until stop time is reached
while (True):
    timeNow = str(datetime.datetime.now().time()).split(".")[0]
    if timeToStop >= datetime.datetime.strptime(timeNow, format):
        time.sleep(1)
    else:
        meter_thread.proceed = False
        PV_thread.proceed = False
        for thread in threads:
            thread.join()
        break
Ejemplo n.º 14
0
def train(train_Loader,model,criterion,optimizer,ith_epoch):

    data_time = Meter() # measure average batch data loading time
    batch_time = Meter() # measure average batch computing time, including forward and backward
    losses = Meter() # record average losses across all mini-batches within an epoch
    prec1 = Meter()
    prec3 = Meter()

    model.train()
    end = time.time()
    for ith_batch, data in enumerate(train_Loader):

        input , label = data['image'], data['label']
        input, label = input.cuda(), label.cuda()
        data_time.update(time.time()-end)
        end = time.time()

        # Forward pass
        input_var,label_var = Variable(input), Variable(label)
        output = model(input_var)
        loss = criterion(output,label) # average loss within a mini-batch

        # measure accuracy and record loss
        res, cls1, cls3 = utility_Func.accuracy(output.data,label,topk=(0,2))
        losses.update(loss.data[0])
        prec1.update(res[0])
        prec3.update(res[1])

        # Backward pass
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        optimizer.n_iters = optimizer.n_iters + 1 if hasattr(optimizer, 'n_iters') else 0

        batch_time.update(time.time()-end)
        end = time.time()

        bt_avg,dt_avg,loss_avg,prec1_avg,prec3_avg = batch_time.avg(),data_time.avg(),losses.avg(),prec1.avg(),prec3.avg()
        if ith_batch % args.print_freq == 0:
            print('Train : ith_batch, batches, ith_epoch : %s %s %s\n' %(ith_batch,len(train_Loader),ith_epoch),
                  'Averaged Batch-computing Time : %s \n' % bt_avg,
                  'Averaged Batch-loading Time : %s \n' % dt_avg,
                  'Averaged Batch-Loss : %s \n' % loss_avg,
                  'Averaged Batch-prec1 : %s \n' % prec1_avg,
                  'Averaged Batch-prec3 : %s \n' % prec3_avg)

    return losses.avg(),prec1.avg(),prec3.avg()
Ejemplo n.º 15
0
def validate(val_Loader,model,criterion,ith_epoch):

    batch_time = Meter()  # measure average batch processing time, including forward and output
    losses = Meter()  # record average losses across all mini-batches within an epoch
    top1 = Meter()  # record average top1 precision across all mini-batches within an epoch
    top3 = Meter()  # record average top3 precision
    cls_top1,cls_top3 = {i:Meter() for i in range(80)},{i:Meter() for i in range(80)}

    model.eval()
    end = time.time()
    for ith_batch, data in enumerate(val_Loader):

        # Forward pass
        tmp = list()
        final_output = torch.zeros(len(data['label']), 80).cuda()
        for i in range(10):
            input = data['image'][i]
            input = input.cuda()
            input_var = Variable(input)
            output = model(input_var)  # args.batchSize //32  x 80
            tmp.append(output.data)

        for i in range(len(data['label'])):
            for j in range(10):
                final_output[i,:]+=tmp[j][i,:]
            final_output[i,:].div_(10.0)
        final_output_var = Variable(final_output)
        loss = criterion(final_output_var,data['label'].cuda())  # average loss within a mini-batch

        # measure accuracy and record loss
        res, cls1, cls3 = utility_Func.accuracy(final_output,data['label'].cuda(),topk=(0, 2))
        losses.update(loss.data[0])
        top1.update(res[0])
        top3.update(res[1])
        for i in range(len(data['label'])):
            cls_top1[data['label'][i]].update(cls1[i])
            cls_top3[data['label'][i]].update(cls3[i])

        batch_time.update(time.time() - end)
        end = time.time()

        bt_avg,loss_avg,top1_avg,top3_avg = batch_time.avg(),losses.avg(),top1.avg(),top3.avg()
        if ith_batch % args.print_freq == 0:
            print('Validate : ith_batch, batches, ith_epoch : %s %s %s \n' % (ith_batch, len(val_Loader), ith_epoch),
                  'Averaged Batch-computing Time : %s \n' % bt_avg,
                  'Averaged Batch-Loss : %s \n' % loss_avg,
                  'Averaged Batch-Prec@1 : %s \n' % top1_avg,
                  'Averaged Batch-Prec@3 : %s \n' % top3_avg)

    return losses.avg(),top1.avg(),top3.avg(),cls_top1,cls_top3