Esempio n. 1
0
def classify_encapsulated(audio_summary, track_info, pickle_file):
	"""Performs a similar operation to classify() and all the above functions in one step
	But retrieves fit from pickle, for the GUI
	"""
	f = open(pickle_file, 'r')
	fit = pickle.load(f)
	f.close()
	rv = {}
	#print track_info.keys()
	#print track_info[audio_summary]['title'].keys()
	rv["speechiness"] = [audio_summary['audio_summary']['speechiness']]
	rv["time_sig"] = [audio_summary['audio_summary']['time_signature']]
	rv["bpm"] = [audio_summary['audio_summary']['tempo']]
	rv["key"] = [audio_summary['audio_summary']['key']]
	rv["duration"] = [audio_summary['audio_summary']['duration']]
	rv["loudness"] = [audio_summary['audio_summary']['loudness']]
	rv["end_of_fade_in"] = [track_info['track']['end_of_fade_in']]
	rv["start_of_fade_out"] = [track_info['track']['start_of_fade_out']]
	rv["bpm_range"] = [proc.bpm_range(track_info['beats'])]
	rv["max_bpm_spike"] = [proc.max_bpm_spike(track_info['beats'])]
	try:
		rv["num_keys"] = [proc.num_keys(track_info['sections'])]
	except:
		rv["num_keys"] = [1]
	rv["sections"] = [proc.num_sections(track_info['sections'])]
	new_df = build_data_frame([rv],["Unknown"])
	p = prob_category(new_df,fit)
	robjects.globalenv["pred"] = p
	edm_prob = robjects.default_ri2py(p.rx(1))[0]
	folk_prob = robjects.default_ri2py(p.rx(2))[0]
	rap_prob = robjects.default_ri2py(p.rx(3))[0]
	cls = classify(new_df,fit)
	return [(edm_prob,folk_prob,rap_prob),cls[0]]
Esempio n. 2
0
def query(name,artist):
	"""Prints summary of song and returns information necessary for decision tree"""
	keys = ['C', 'C#', 'D', 'Eb', 'E', 'F', 'F#', 'G', 'Ab', 'A', 'Bb']
	if name == "" or artist == "":
		display_text("No results for that query. \n")
		return -1
	dict = song_profile(str(name),str(artist))
	if dict != "No results for that query. \n":
		dict2 = gti.get_json(dict['audio_summary']['analysis_url'])
		tempo = dict['audio_summary']['tempo']
		time_sig = dict['audio_summary']['time_signature']
		speechiness = dict['audio_summary']['speechiness']
		artist_name = dict['artist_name']
		song = dict['title']
		key_not_found = False
		try:
			key = keys[dict['audio_summary']['key']]
		except:
			key_not_found = True
			key = 'C'
		duration = dict['audio_summary']['duration']
		loudness = dict['audio_summary']['loudness']
		bpm_range = procinp.bpm_range(dict2['beats'])
		max_bpm_spike = procinp.max_bpm_spike(dict2['beats'])
		num_sections = procinp.num_sections(dict2['sections'])
		num_keys_not_found = False
		try:
			num_keys = procinp.num_keys(dict2['sections'])
		except:
			num_keys_not_found = True
			num_keys = 1
		a = "Song information:\n"
		b = "\t" + song + ", by " + artist_name + "\n"
		c = "\tTime signature: " + str(time_sig)+ "\n"
		d = "\tTempo: " + str(tempo)+ " beats per minute\n"
		e = "\tSpeechiness: " + str(speechiness)+ "\n"
		f = "\tLoudness: " + str(loudness)+ " decibels\n"
		if key_not_found:
			g = "\tKey: " + 'Unknown'+ "\n"
		else:
			g = "\tKey: " + str(key)+ "\n"
		h = "\tDuration: " + str(duration)+ " seconds\n"
		i = "\tTempo Range: "+str(bpm_range)+"\n"
		j = "\tMaximum Tempo Spike: "+str(max_bpm_spike)+"\n"
		k = "\tNumber of Sections: "+str(num_sections)+"\n"
		m = "\n"
		if num_keys_not_found:
			l = "\tNumber of Keys: "+'Unknown'+"\n"
		else:
			l = "\tNumber of Keys: "+str(num_keys)+"\n"
		display_text(a+b+c+d+e+f+g+h+i+j+k+l+m)
		return (dict,dict2)
	else:
		display_text(dict)
		return -1