Esempio n. 1
0
    def run(self, args, state):
        from jazzparser.formalisms.music_halfspan.evaluation import \
                        tonal_space_local_alignment, tonal_space_distance
        songnum = int(args[0])

        name, song = get_song(songnum, state)
        songset = state.get_data("songset")
        distances = []
        # Try comparing this song to each song in the set
        for other_name, other_song in songset.analyses:
            # Align locally and globally
            ops,steps1,steps2,local_distance = \
                    tonal_space_local_alignment(other_song.lf, song.lf)
            global_distance = \
                    tonal_space_distance(other_song.lf, song.lf)
            distances.append((other_name, local_distance, global_distance))

        # Sort the results
        if self.options['local']:
            distances.sort(key=lambda x: x[1])
        else:
            distances.sort(key=lambda x: x[2])
        # Print out each one
        print "Aligned %s with:" % name
        for other_name, local_distance, global_distance in distances:
            print "%s:  local: %s,  global: %s" % \
                (other_name,local_distance,global_distance)
def max_parse(semantics, gold):
    max_score = float("inf")
    max_index = 0

    for i,(prob,sem) in enumerate(semantics):
        # get dependency graphs
        score = tonal_space_distance(sem.lf, gold.lf)
        if max_score > score:
            max_score = score
            max_index = i
    
    return max_index
Esempio n. 3
0
 def distance(self, sem1, sem2):
     # Handle the extra 'dist' case
     if self.options['output'] == 'dist':
         # If one input is empty, we consider all points to have been deleted
         if sem1 is None:
             return tonal_space_length(sem2)
         elif sem2 is None:
             return tonal_space_length(sem1)
         
         # Compute the score using our standard TS distance computation
         # This is based on the alignment score of the optimal alignment 
         #  of the two sequences
         return tonal_space_distance(sem1.lf, sem2.lf)
     else:
         # Otherwise the superclass takes care of everything
         return super(TonalSpaceEditDistance, self).distance(sem1, sem2)
def reranking(input_files):
	
	# Initialization: v = 0
	learning_rate = 0.2
	v = get_features_vector()
	input_files = sorted(input_files)
	# Algorithm:
	# For t = 1..T, i = 1..n
	#	zi = F(xi)
	#	if (zi != yi) v = v + f(xi, yi) - f(xi, zi)
	T = 10
	for t in range(T):
		print "========== Loop: %d ==========" % t
		for parses_result in input_files:
			# We read in the whole file (it's pickled, so we have to), but don't 
			#  keep the pres object after the loop iteration, because it can 
			#  be very big
			try:
				pres = ParseResults.from_file(parses_result)
			except ParseResults.LoadError, err:
				if options.errors:
					# Print all load errors
					print >>sys.stderr, "Error loading file: %s" % (err)
				errors.append(parses_result)
				continue

			# get gold semantics and gold dependency graph
			gold_result = pres.get_gold_semantics()
			gold_depend_graph = get_depend_graph(gold_result)			

			# calcuate maximum index of parses
			if len(pres.semantics) > 0:
				max_index = max_parse(pres.semantics, v)
				zi = pres.semantics[max_index]

				# get maximum dependency graph			
				zi_depend_graph = get_depend_graph(zi[1])
				
				if tonal_space_distance(zi[1].lf, gold_result.lf) != 0:
					gold_features = get_features(gold_depend_graph)
					zi_features = get_features(zi_depend_graph)
					for k, val in zi_features.iteritems():
						v[k] = v[k] + gold_features[k] - zi_features[k]

				print sum(v.values())
            
            if options.times:
                for (node,time) in sorted(gold_time_map.items(), key=lambda x:x[1]):
                    print "%d @ %s" % (node, time)
            print
    
    # Get the top result's semantics
    if len(pres.semantics) == 0:
        print >>sys.stderr, "No results"
    else:
        top_result = pres.semantics[0][1]
        graph,time_map = semantics_to_dependency_graph(top_result)

        # Before reranking
        total_parses = len(pres.semantics)
        top_distance = tonal_space_distance(top_result.lf, gold_result.lf)

        # After reranking
        v = {}
        v = json.load(open(FEATURE_PARAMS))
        # calcuate reranking index of parses
        if len(pres.semantics) > 0:
            reranking_index = reranking_parse(pres.semantics, v)
            reranking_result = pres.semantics[reranking_index][1]
            reranking_distance = tonal_space_distance(reranking_result.lf, gold_result.lf)
            reranking_graph,time_map = semantics_to_dependency_graph(reranking_result)

        # Perfect reranking
        max_index = max_parse(pres.semantics, gold_result)
        zi = pres.semantics[max_index][1]
        max_distance = tonal_space_distance(zi.lf, gold_result.lf)