def show_results(json_file_name): """ Show Results """ for pre_track in utils.read_json_file(json_file_name): track = json.loads(pre_track) print("Artista: " + track['artist']) print("Album: " + track['album']) print("Track: " + track['track']) print("Path: " + track['path']) print("\n---\n")
def walk_in_folders(full_path, output_folder): """ Reading Information in Folder """ data_set = SupervisedDataSet(13, 12) for root, _dirs, files in os.walk(full_path): for file_name in files: if ".json" in file_name: full_file_name = os.path.join(root, file_name) curr = utils.read_json_file(full_file_name) genre = root.split("/")[6] genre_code = get_genre_code(genre) print("FILE: ", file_name, curr["metadata"]["tags"]["file_name"], "GENRE: ", genre, genre_code) t_input = tuple(curr["lowlevel"]["mfcc"]["mean"]) data_set.addSample(t_input, get_genre_code_tuple(genre_code)) neural_network = buildNetwork(13, 4, 12, bias=True) trainer = BackpropTrainer(neural_network, data_set) print("Training...") for i in range(1000): trainer.train() print("Done.") ## pop-rock test = tuple([ -639.92388916, 92.7311630249, -6.63165950775, 22.4517841339, 7.37339067459, -1.02510261536, -5.08265829086, 1.70268416405, -2.5901350975, -6.00173950195, -7.20496797562, -3.18132972717, -3.99400472641 ]) ge_exp = "new-metal" ge_exp_code = get_genre_code(ge_exp) result = neural_network.activate(test) print("RESULT: ") for r in result: print("R: ", r) print("EXPECT POS: ", ge_exp, ge_exp_code)
def run_tests(json_file, last_fm, limit=False, untranslated_only=False, request_limit=300000): """ running tests """ tdb = TracksDb() all_tracks = utils.read_json_file(json_file) if limit: all_tracks = all_tracks[:request_limit] if not untranslated_only: for pre_track in all_tracks: track = json.loads(pre_track) parsed_track_name = re.sub(r"\w*\d\w*", "", track["track"]).strip() parsed_track_name = re.sub(r'\d+', '', parsed_track_name) attempts = [ urllib.parse.urlencode({ "artist": utils.sanitize_string( track["artist"].replace("\uf022", ""), [], force=True ), "track": track["track"].strip() }), urllib.parse.urlencode({ "track": re.sub(r"\w*\d\w*", "", track["track"]).strip() }), urllib.parse.urlencode({ "track": parsed_track_name }), urllib.parse.urlencode({ "track": re.sub(r'\d+', '', track["track"]) }), urllib.parse.urlencode({ "artist": utils.sanitize_string( track["artist"].replace("\uf022", ""), [], force=True ), "track": re.sub(r'\d+', '', track["track"]) }), ] for request_url in attempts: print("Track: {}\nAttempt:{}\n\n".format(track['track'], request_url)) translated = request_attempt(track, request_url, last_fm, tdb) if translated: break proced_untranslated_tracks(tdb, last_fm)
def run_tests(json_file, last_fm, output_file): """ running tests """ level2 = [] responses = [] request_limit = 100 request_counter = 0 for mbid_track in utils.read_json_file(json_file): if mbid_track["mbid"]: if request_counter < request_limit: responses.append( last_fm.get_track_info("", True, mbid_track["mbid"])["track"]) request_counter = request_counter + 1 for rep in responses: level2.append(parse_level_2(rep)) utils.write_json_file(output_file, level2)
def walk_in_folders(full_path, output_folder): """ Reading Information in Folder """ data_set = SupervisedDataSet(13, 1) for root, _dirs, files in os.walk(full_path): for file_name in files: if ".json" in file_name: full_file_name = os.path.join(root, file_name) curr = utils.read_json_file(full_file_name) genre = root.split("/")[6] genre_code = get_genre_code(genre) print("FILE: ", file_name, curr["metadata"]["tags"]["file_name"], "GENRE: ", genre, genre_code) t_input = tuple(curr["lowlevel"]["mfcc"]["mean"]) print(t_input) data_set.addSample(t_input, genre_code) neural_network = buildNetwork(13, 4, 1, bias=True) trainer = BackpropTrainer(neural_network, data_set) for i in range(2000): print(trainer.train()) test = tuple([ -631.971496582, 121.894683838, -30.9424991608, 9.7442483902, 0.577766537666, 5.28025388718, -4.99692964554, 8.93616294861, -1.2865601778, 3.46211123466, -0.934040367603, -5.2112455368, 4.20084095001 ]) restult = neural_network.activate(test) print("RESULT: ", restult)
def main(): """ Populating Database with csv/json file: """ desc = "This module populate the Database with information in csv/json file." parser = argparse.ArgumentParser(description=desc) parser.add_argument('-f', '--input_folder', default="results") parser.add_argument('-i', '--input_file', default="output.json") parser.add_argument('-m', '--insert_mode', default="truncate") parser.add_argument('-lp', '--limit_population', default="no") parser.add_argument('-tl', '--tracks_limit', default=300000) parsed = parser.parse_args() insert_mode = parsed.insert_mode tracks_limit = int(parsed.tracks_limit) limit_population = False if parsed.limit_population == "no" else True tdb = TracksDb() if insert_mode == "truncate": tdb.truncate() json_file = os.path.join(parsed.input_folder, parsed.input_file) all_tracks = utils.read_json_file(json_file) if limit_population: all_tracks = all_tracks[:tracks_limit] for pre_track in all_tracks: track = json.loads(pre_track) tdb.insert_track(track) print(">", json.dumps(track), "\n")
def run_nn(data_set_path, raw_input_path, input_genre, song_name): """ Run Neural Network """ data_set = SupervisedDataSet( NN_INPUT_NUMBER, NN_OUTPUT_NUMBER ) for root, _dirs, files in os.walk(data_set_path): for file_name in files: if ".json" in file_name: full_file_name = os.path.join(root, file_name) curr_track = utils.read_json_file(full_file_name) curr_track_genre = root.split("/")[6] genre_code = get_genre_code(curr_track_genre) print( "Including in set: ", file_name, curr_track_genre, genre_code ) data_set.addSample( ( float(curr_track["lowlevel"]["average_loudness"]), float(curr_track["rhythm"]["bpm"]), float(curr_track["rhythm"]["beats_loudness"]["mean"]), float(curr_track["rhythm"]["danceability"]), float(curr_track["tonal"]["chords_changes_rate"]), ), get_genre_code_tuple(genre_code), ) neural_network = buildNetwork( NN_INPUT_NUMBER, NN_LAYERS_NUMBER, NN_OUTPUT_NUMBER, bias=False ) trainer = BackpropTrainer( neural_network, data_set ) for index in range(NN_TRAINING_ROUNDS): print("Training:", index, trainer.train()) input_data = utils.read_json_file(raw_input_path) print("INPUT: ", raw_input_path, input_genre) restult = neural_network.activate( ( float(input_data["lowlevel"]["average_loudness"]), float(input_data["rhythm"]["bpm"]), float(input_data["rhythm"]["beats_loudness"]["mean"]), float(input_data["rhythm"]["danceability"]), float(input_data["tonal"]["chords_changes_rate"]), ) ) print("RESULT: ") print([r * 100 for r in restult]) utils.plot_genre_detected( [format(r * 100, '.2f') for r in restult], "{} ({})".format(song_name, input_genre) )