fragment = audio[start_idx : end_idx] theme_descr = calculateThemeDescriptor(fragment) print theme_descr pool.add('theme_descriptors', theme_descr.tolist()[0]) # --------------------- Load all audio files --------------------------- from songcollection import SongCollection sc = SongCollection() sc.load_directory('../music') sc.load_directory('../moremusic') sc.load_directory('../evenmoremusic') sc.load_directory('../music/test') songs = [] for song in sc.get_annotated(): song.open() pool.add('song.themes', song.song_theme_descriptor.tolist()[0]) songs.append(song.title) song.close() # --------------------- Make nice plots --------------------------- Y = pool['song.themes'] # All songs in "/music" and "/moremusic" libraries X = pool['theme_descriptors'] # Evolution of mix theme descriptors from scipy.spatial.distance import euclidean as euclidean_distance for P1,P2 in zip(X[:-1,:],X[1:,:]): distance_path = euclidean_distance(P1,P2) num_songs_closer = 0 for P in Y:
cmd_split = str.split(raw_input('> : '), ' ') except KeyboardInterrupt: logger.info('Goodbye!') break cmd = cmd_split[0] if cmd == 'loaddir': if len(cmd_split) == 1: logger.warning('Please provide a directory name to load!') continue elif not os.path.isdir(cmd_split[1]): logger.warning(cmd_split[1] + ' is not a valid directory!') continue sc.load_directory(cmd_split[1]) logger.info( str(len(sc.songs)) + ' songs loaded [annotated: ' + str(len(sc.get_annotated())) + ']') elif cmd == 'play': if len(sc.get_annotated()) == 0: logger.warning( 'Use the loaddir command to load some songs before playing!' ) continue if len(cmd_split) > 1 and cmd_split[1] == 'save': logger.info('Saving this new mix to disk!') save_mix = True else: save_mix = False logger.info('Starting playback!') try:
reader = csv.reader(csvfile) for line in reader: time = float(line[0]) annot_cur.append(time) annotations.append(annot_cur) return songs, annotations #TODO make sure that these arrays are constructed and saved during training CV loop y_val_all = np.load('singingvoice_y_val.bin.npy') t_val_all = np.load('singingvoice_t_val.bin.npy') y_true_all = np.load('singingvoice_y_train.bin.npy') t_train_all = np.load('singingvoice_t_train.bin.npy') masks = {} for s in sc.get_annotated(): if s.title in t_val_all: cur_song_mask = np.array([t == s.title for t in t_val_all]) y_val = y_val_all[cur_song_mask] cur_song_mask_train = np.array([t == s.title for t in t_train_all]) y_true = y_true_all[cur_song_mask_train] masks[s.title] = (y_val, y_true) for title_1, y_tuple_1 in masks.iteritems(): song_1 = [s for s in sc.get_annotated() if s.title == title_1][0] song_1.open() # Combine all songs and report the accuracy on detecting overlaps in voice
def index_dj(request): template = get_template("Index.html") LOG_LEVEL = logging.DEBUG LOGFORMAT = "%(log_color)s%(message)s%(reset)s" logging.root.setLevel(LOG_LEVEL) formatter = ColoredFormatter(LOGFORMAT) stream = logging.StreamHandler() stream.setLevel(LOG_LEVEL) stream.setFormatter(formatter) logger = logging.getLogger('colorlogger') logger.setLevel(LOG_LEVEL) logger.addHandler(stream) sc = SongCollection() tl = TrackLister(sc) dj = DjController(tl) essentia.log.infoActive = False essentia.log.warningActive = False if request.method == "POST": cmd = request.POST.get("cmd", None) cmd = str(cmd) # cmd_split = str(cmd).split # cmd = cmd_split[0] while (True): # try: # cmd_split = str.split(input('> : '), ' ') # except KeyboardInterrupt: # logger.info('Goodbye!') # break # cmd = cmd_split[0] # if cmd == 'loaddir': # if len(cmd_split) == 1: # return HttpResponse('Please provide a directory name to load!') # continue # elif not os.path.isdir(cmd_split[1]): # return HttpResponse(cmd_split[1] + ' is not a valid directory!') # continue message = "abc" sc.load_directory("/home/ddman/音樂/upload") message = str(len(sc.songs)) + ' songs loaded [annotated: ' + str( len(sc.get_annotated())) + ']' if cmd == 'play': if len(sc.get_annotated()) == 0: message = 'Use the loaddir command to load some songs before playing!' continue # if len(cmd_split) > 1 and cmd_split[1] == 'save': # message = 'Saving this new mix to disk!' # save_mix = True # else: # save_mix = False message = 'Starting playback!' try: dj.play(save_mix=False) except Exception as e: logger.error(e) return render(request, "Index.html", locals()) elif cmd == 'pause': message = 'Pausing playback!' try: dj.pause() except Exception as e: logger.error(e) return render(request, "Index.html", locals()) elif cmd == 'skip' or cmd == 's': message = 'Skipping to next segment...' try: dj.skipToNextSegment() except Exception as e: logger.error(e) return render(request, "Index.html", locals()) elif cmd == 'stop': message = 'Stopping playback!' dj.stop() return render(request, "Index.html", locals()) elif cmd == 'save': message = 'Saving the next new mix!' return render(request, "Index.html", locals()) elif cmd == 'showannotated': message = 'Number of annotated songs ' + str( len(sc.get_annotated())) message = 'Number of unannotated songs ' + str( len(sc.get_unannotated())) return render(request, "Index.html", locals()) elif cmd == 'annotate': message = 'Started annotating!' sc.annotate() message = 'Done annotating!' return render(request, "Index.html", locals()) elif cmd == 'debug': LOG_LEVEL = logging.DEBUG logging.root.setLevel(LOG_LEVEL) stream.setLevel(LOG_LEVEL) logger.setLevel(LOG_LEVEL) message = 'Enabled debug info. Use this command before playing, or it will have no effect.' return render(request, "Index.html", locals()) elif cmd == 'mark': dj.markCurrentMaster() return render(request, "Index.html", locals()) elif cmd == "quit": break return render(request, "Index.html", locals()) else: message = 'The command ' + str(cmd) + ' does not exist!' return render(request, "Index.html", locals()) return render(request, "Index.html", locals())
pyAudio = pyaudio.PyAudio() stream = pyAudio.open(format=pyaudio.paFloat32, channels=1, rate=44100, output=True) try: title = sys.argv[1][len('singingvoice_predicted_'):-4] y_train_all = np.load('singingvoice_y_train.bin.npy') t_train_all = np.load('singingvoice_t_train.bin.npy') print title cur_song_mask = np.array([t == title for t in t_train_all]) y_train = y_train_all[cur_song_mask] y_train = y_train[np.array([i % 4 != 0 for i in range(len(y_train))])] y_pred = np.load('singingvoice_predicted_{}.npy'.format(title)) song = [s for s in sc.get_annotated() if s.title == title][0] song.open() plt.figure() plt.fill_between(range(len(y_train[::3])), np.logical_and(y_pred[0::3], y_pred[1::3], y_pred[2::3]), alpha=0.5, color='blue') plt.fill_between(range(len(y_train[::3])), y_train[::3], alpha=0.5, color='red') plt.show() for y_true, y, t in zip(y_train[::4], y_pred[::4], song.downbeats): print '{} {} {}'.format(y_true, y, t) song.close()