def spectrogram(song, precision=2, fmax=10000, fmin=0, n_mels=128): meta = load_metadata(song) bpm = meta['_beatsPerMinute'] clip, sample_rate = librosa.load(path_to_song(song), None) clip_length = len(clip) / sample_rate # 1 beat = quarter note samples_per_beat = 60 * sample_rate / bpm samples_per_segment = samples_per_beat / precision # to get 1/32 print("bpm=", bpm) print("precision=", precision) print("clip_length=", clip_length) print("samples_per_beat=", samples_per_beat) print("samples_per_segment=", samples_per_segment) mel_spec = librosa.feature.melspectrogram(clip, n_fft=10240, hop_length=int(samples_per_segment), sr=sample_rate, fmin=fmin, fmax=fmax, n_mels=n_mels) print(mel_spec.shape) # librosa.display.specshow(librosa.amplitude_to_db(mel_spec, ref=np.max), # y_axis='log', x_axis='time', sr=sample_rate) # plt.title('melspectrogram') # plt.colorbar(format='%+2.0f dB') # plt.tight_layout() # plt.show() return mel_spec
def beatmap2onset(song, difficulty='expert', precision=2): metadata = load_metadata(song) beatmap = load_beatmap(song, difficulty) notes = list(reversed(sorted(beatmap['_notes'], key=lambda x: x['_time']))) bpm = metadata['_beatsPerMinute'] one_beat_length = 60 / bpm # len of one beat in seconds one_segment_length = one_beat_length / precision # to get 1/32 print('track=', metadata['_songAuthorName'], metadata['_songName']) print('bpm=', bpm) print('last_note(beats)=', notes[0]['_time']) print('time(sec)=', notes[0]['_time'] / bpm * 60) print('one_beat_length(sec)=', one_beat_length) print('one_segment_length(sec)=', one_segment_length) print('segments_in_song=', (notes[0]['_time'] / bpm * 60) / one_segment_length) print('notes=', len(notes)) segments = [] end_in_secs = one_segment_length def to_secs(beats): return beats / bpm * 60 # we loop until we remove all notes from the collection while len(notes) != 0: has = False # we take all the notes from the end of notes list (because we reversed the list # to allow us to pop() from end) that occur inside this segment (start, end] while len(notes) != 0 and to_secs(notes[-1]['_time']) < end_in_secs: note = notes.pop() has = True segments.append(1.0 if has else 0.0) # we go to next segment end_in_secs += one_segment_length print('segments=', len(segments)) H = np.array(segments) print('H.shape', H.shape) return H, bpm, 0, 0, len(segments)
from functions import load_metadata os.makedirs('./beatmaps', exist_ok=True) os.makedirs('./beatmaps/easy', exist_ok=True) os.makedirs('./beatmaps/normal', exist_ok=True) os.makedirs('./beatmaps/hard', exist_ok=True) os.makedirs('./beatmaps/expert', exist_ok=True) os.makedirs('./beatmaps/expertplus', exist_ok=True) os.makedirs('./beatmaps/metadata', exist_ok=True) os.makedirs('./beatmaps/ogg', exist_ok=True) for f in os.listdir('./songs'): # metadata os.rename(f'./songs/{f}/info.dat', f'./beatmaps/metadata/{f}.json') metadata = load_metadata(f) # song song_name = metadata['_songFilename'] os.rename(f'./songs/{f}/{song_name}', f'./beatmaps/ogg/{f}.ogg') # beatmaps for d in ['Easy', 'Normal', 'Hard', 'Expert', 'ExpertPlus']: if os.path.exists(f'./songs/{f}/{d}.dat'): os.rename(f'./songs/{f}/{d}.dat', f'./beatmaps/{d.lower()}/{f}.json') print('easy maps=', len(os.listdir('./beatmaps/easy'))) print('normal maps=', len(os.listdir('./beatmaps/normal'))) print('hard maps=', len(os.listdir('./beatmaps/hard'))) print('expert maps=', len(os.listdir('./beatmaps/expert')))
def beatmap2f24(song, difficulty='expert', precision=2): metadata = load_metadata(song) beatmap = load_beatmap(song, difficulty) notes = list(reversed(sorted(beatmap['_notes'], key=lambda x: x['_time']))) bpm = metadata['_beatsPerMinute'] one_beat_length = 60 / bpm # len of one beat in seconds one_segment_length = one_beat_length / precision # to get 1/32 print('track=', metadata['_songAuthorName'], metadata['_songName']) print('bpm=', bpm) print('last_note(beats)=', notes[0]['_time']) print('time(sec)=', notes[0]['_time'] / bpm * 60) print('one_beat_length(sec)=', one_beat_length) print('one_segment_length(sec)=', one_segment_length) print('segments_in_song=', (notes[0]['_time'] / bpm * 60) / one_segment_length) print('notes=', len(notes)) segments = [] empty_segments = 0 collision_count = 0 end_in_secs = one_segment_length def to_secs(beats): return beats / bpm * 60 # we loop until we remove all notes from the collection while len(notes) != 0: segment = [0.0 for _ in range(24)] empt = True # we take all the notes from the end of notes list (because we reversed the list # to allow us to pop() from end) that occur inside this segment (start, end] while len(notes) != 0 and to_secs(notes[-1]['_time']) < end_in_secs: note = notes.pop() idx = note['_lineIndex'] + note['_lineLayer'] * 3 if note['_type'] == 1: idx += 12 if segment[idx] > 0.0: collision_count += 1 segment[idx] = 1.0 empt = False segments.append(segment) if empt: empty_segments += 1 # we go to next segment end_in_secs += one_segment_length print('segments=', len(segments)) print('empty segments=', empty_segments, 100 * empty_segments / len(segments)) print('collisions=', collision_count, 100 * collision_count / len(segments)) H = np.array(segments) print('H.shape', H.shape) # plt.imshow(H, aspect='auto', interpolation='sinc', vmin=0, vmax=0.03) # plt.show() # np.save('bsmap', H) return H, bpm, empty_segments, collision_count, len(segments)
from functions import list_songs, load_metadata non_zero = 0 zero = 0 for x in list_songs('expert'): meta = load_metadata(x) if meta['_songTimeOffset'] != 0: non_zero += 1 else: zero += 1 print('zero=', zero) print('non_zero=', non_zero)