コード例 #1
0
ファイル: s3.py プロジェクト: mevers303/bothoven
def upload_latest_file(dir, verbose=True):

    bucket = get_bucket()

    files = get_filenames(dir)
    latest_file = max(files, key=os.path.getctime)
    upload_file(latest_file, bucket, verbose=verbose)
コード例 #2
0
def build_csv(cache_dir):

    files = get_filenames(cache_dir, [".pkl"])
    done = 0
    csv_path = os.path.join(cache_dir, "key_signatures.csv")
    if os.path.exists(csv_path) and os.path.getsize(csv_path) > 0:
        df = pd.read_csv(csv_path, index_col="path")
    else:
        df = pd.DataFrame(columns=["tonic", "mode"])
        df.index.name = "path"
    csv_f = open(csv_path, "a")

    for file in files:

        progress_bar(done, len(files), file)
        done += 1

        if file in df.index.values:
            continue

        score = pickle_load(file, verbose=False)
        key = score.analyze('key')
        df.loc[file] = [key.tonic.name, key.mode]
        s = ",".join([f'"{file}"', key.tonic.name, key.mode]) + "\n"
        csv_f.write(s)
        csv_f.flush()

    csv_f.close()
    progress_bar(done, len(files), "Done!")

    return df
コード例 #3
0
ファイル: model.py プロジェクト: mevers303/bothoven
def load_cached_model(model_name):

    last_epoch = 0
    last_file = ""

    # download any new files from S3
    if s3.down_sync_s3(f"models/{model_name}"):
        try:
            s3.download_file(f"models/{model_name}/log.csv")
        except Exception:
            pass
    s3.down_sync_s3(f"tensorboard/{model_name}")

    # find all the previous models
    for file in get_filenames(f"models/{model_name}", [".h5"]):
        filename = file[len(
            f"models/{model_name}/"):]  # remove the directory from the string
        epoch = int(filename.split("_")[1])
        if epoch > last_epoch:
            last_epoch = epoch
            last_file = file

    if last_epoch:
        print(f" -> found cached model (epoch {last_epoch})")
        print(" --> loading structure...")
        with open(f"models/{model_name}/{model_name}.json", "r") as f:
            model = keras.models.model_from_json(f.read())
        print(" --> loading weights...")
        model.load_weights(last_file)
        return model, last_epoch
    else:
        return None, 0
コード例 #4
0
ファイル: s3.py プロジェクト: mevers303/bothoven
def up_sync_s3(dir, overwrite=False, verbose=True):

    bucket = get_bucket()
    files = get_filenames(dir)

    if overwrite:
        upload_files(bucket, files, verbose=verbose)
        return len(files)
    else:
        keys = [key.name for key in bucket.get_all_keys()]
        new_files = set(files) - set(keys)
        upload_files(bucket, new_files, verbose=verbose)
        return len(new_files)
コード例 #5
0
ファイル: s3.py プロジェクト: mevers303/bothoven
def down_sync_s3(dir, overwrite=False, verbose=True):

    bucket = get_bucket()
    files = get_filenames(dir)
    keys = [
        key.name for key in bucket.get_all_keys() if key.name.startswith(dir)
    ]

    if overwrite:
        download_files(bucket, keys, verbose=verbose)
        return len(files)
    else:
        new_files = set(keys) - set(files)
        download_files(bucket, new_files, verbose=verbose)
        return len(new_files)
コード例 #6
0
def cache(in_dir, out_dir):

    files = get_filenames(in_dir, [".mid", ".midi", ".smf"])
    done = 0
    csv_path = os.path.join(out_dir, "key_signatures.csv")
    os.makedirs(out_dir, exist_ok=True)
    if os.path.exists(csv_path) and os.path.getsize(csv_path) > 0:
        df = pd.read_csv(csv_path, index_col="path")
        needs_column_header = False
    else:
        df = pd.DataFrame(columns=["tonic", "mode"])
        df.index.name = "path"
        needs_column_header = True
    csv_f = open(csv_path, "a")
    if needs_column_header:
        csv_f.write("path,tonic,mode\n")

    for file in files:

        progress_bar(done, len(files), "Loading:".ljust(11) + file + "...")
        done += 1

        relative_path = file[len(in_dir):]
        if relative_path[0] == "/":
            relative_path = relative_path[1:]
        new_file = os.path.join(out_dir, relative_path) + ".pkl"
        if new_file in df.index.values:
            continue
        new_dir = os.path.dirname(new_file)
        os.makedirs(new_dir, exist_ok=True)

        try:
            score = m21.converter.parse(file)
        except Exception as e:
            continue
        progress_bar(done - 1, len(files),
                     "Analyzing:".ljust(12) + file + "...")
        key = score.analyze('key')
        csv_f.write(",".join([f'"{new_file}"', key.tonic.name, key.mode]) +
                    "\n")
        csv_f.flush()

        m21.converter.freeze(score, fp=new_file)

    csv_f.close()
    progress_bar(done, len(files), "Done!")
コード例 #7
0
ファイル: MusicLibrary.py プロジェクト: mevers303/bothoven
    def find_files(self):

        self.filenames = np.array(
            get_filenames(self.base_dir, [".mid", ".midi", ".smf"]))
        print("Found", self.filenames.size, "files in", self.base_dir + "...")
コード例 #8
0
import mido
import os

from bothoven_globals import progress_bar
from functions.file_functions import get_filenames

in_dir = "/home/mark/Documents/raw_midi"
out_dir = "/media/mark/Data/Documents/python/bothoven/midi/monophonic"
good_programs = set(range(56, 80))

filenames = get_filenames(in_dir, [".mid", ".midi", ".smf"])
total = len(filenames)
done = 0

for filename in filenames:

    progress_bar(done, total, filename)
    done += 1

    mid = None
    try:
        mid = mido.MidiFile(filename)
    except KeyboardInterrupt as e:
        exit(1)
    except Exception as e:
        print("There was an error reading", filename)
        print(e)
        continue

    good_tracks = []