def main():

    parameters = arguments()

    model_commands = [

        ## 2
        "--model_type classifier --architecture rnn --data_type buckeye  --epochs 50 "
        +
        "--features_type mfcc --train_tag gt --enc 400_400_400 --latent 130 --batch_size 512",
        "--model_type classifier --architecture cnn --data_type omniglot --epochs 50 "
        +
        "--enc 3.3.32.1.1.2.2_3.3.64.1.1.2.2_3.3.128.1.1 --latent 130 --batch_size 64",
    ]

    test_seeds = parameters.test_seeds == "True"

    for this_command in model_commands:
        if test_seeds:
            for rnd_seed in SEEDS:
                cmd = "./train_model.py " + this_command + " --rnd_seed {}".format(
                    rnd_seed)
                print_string = cmd
                model_setup_library.command_printing(print_string)
                sys.stdout.flush()
                proc = subprocess.Popen(cmd, shell=True)
                proc.wait()
        else:
            cmd = "./train_model.py " + this_command
            print_string = cmd
            model_setup_library.command_printing(print_string)
            sys.stdout.flush()
            proc = subprocess.Popen(cmd, shell=True)
            proc.wait()
def main():

    feat_fns = []

    for dataset_type in SPEECH_DATASET_TYPE:
        for dataset in SPEECH_DATASETS:
            feat_fns.append(
                path.join(feats_path, dataset, "Subsets", "Words", "mfcc",
                          "gt_" + dataset_type + "_mfcc.npz"))

    for dataset_type in IMAGE_DATASET_TYPE:
        for dataset in IMAGE_DATASETS:
            feat_fns.append(
                path.join(feats_path, dataset, dataset_type + ".npz"))

    for (fn, num, metr) in list(itertools.product(feat_fns, NUM_PAIRS,
                                                  METRIC)):
        cmd = "./classifier_latents.py " + " --feats_fn {}".format(fn)
        print_string = cmd
        model_setup_library.command_printing(print_string)
        sys.stdout.flush()
        proc = subprocess.Popen(cmd, shell=True)
        proc.wait()

        cmd = "./classifier_pairs.py " + " --feats_fn {} --num_pairs {} --metric {}".format(
            fn, num, metr)
        print_string = cmd
        model_setup_library.command_printing(print_string)
        sys.stdout.flush()
        proc = subprocess.Popen(cmd, shell=True)
        proc.wait()
Beispiel #3
0
def main():

    parameters = arguments()

    model_commands = [

        ## CAE hard pairs
        "--model_type cae --architecture rnn --data_type TIDigits --epochs 50 "
        +
        "--features_type mfcc --train_tag gt --enc 400_400_400 --latent 130 --pair_type classifier",
        "--model_type cae --architecture cnn --data_type MNIST --epochs 50 " +
        "--enc 3.3.32.1.1.2.2_3.3.64.1.1.2.2_3.3.128.1.1 --latent 130 --pair_type classifier",

        ## 2
        "--model_type classifier --architecture rnn --data_type buckeye  --epochs 50 --one_shot_speech_dataset buckeye "
        + "--features_type mfcc --train_tag gt --enc 400_400_400 --latent 130",
        "--model_type classifier --architecture cnn --data_type omniglot --epochs 50 --one_shot_image_dataset omniglot "
        + "--enc 3.3.32.1.1.2.2_3.3.64.1.1.2.2_3.3.128.1.1 --latent 130",
    ]

    for this_command in model_commands:
        for batch_size in BATCH_SIZES:

            for rnd_seed in SEEDS:
                cmd = "./train_model.py " + this_command + " --rnd_seed {} --batch_size {} --final_model {}".format(
                    rnd_seed, batch_size, False)
                print_string = cmd
                model_setup_library.command_printing(print_string)
                sys.stdout.flush()
                proc = subprocess.Popen(cmd, shell=True)
                proc.wait()
def main():

    feat_fns = []

    for dataset_type in DATASET_TYPES:
        for (speech_dataset, image_dataset) in PAIR_DATASETS:
            speech_dataset_type = "val" if dataset_type == "validation" else dataset_type
            speech_fn = path.join(feats_path, speech_dataset, "Subsets",
                                  "Words", "mfcc",
                                  "gt_" + speech_dataset_type + "_mfcc.npz")
            image_fn = path.join(feats_path, image_dataset,
                                 dataset_type + ".npz")
            feat_fns.append((speech_fn, image_fn))

    for ((sp_fn, im_fn), metr) in list(itertools.product(feat_fns, METRIC)):

        cmd = "./classifier_latents.py " + " --feats_fn {}".format(sp_fn)
        print_string = cmd
        model_setup_library.command_printing(print_string)
        sys.stdout.flush()
        proc = subprocess.Popen(cmd, shell=True)
        proc.wait()

        cmd = "./classifier_latents.py " + " --feats_fn {}".format(im_fn)
        print_string = cmd
        model_setup_library.command_printing(print_string)
        sys.stdout.flush()
        proc = subprocess.Popen(cmd, shell=True)
        proc.wait()

        cmd = "./speech_image_pairs.py " + " --speech_feats_fn {} --image_feats_fn {} --metric {}".format(
            sp_fn, im_fn, metr)
        print_string = cmd
        model_setup_library.command_printing(print_string)
        sys.stdout.flush()
        proc = subprocess.Popen(cmd, shell=True)
        proc.wait()
Beispiel #5
0
def main():

    directories = os.walk("../Model_data/")
    valid_dirs = []
    for root, dirs, files in directories:
        for filename in files:
            if filename.split("_")[-1] == "log.txt" and len(dirs) != 0:
                log = path.join(root, filename)
                name = root.split("/")[-1]
                lib = path.join(root, dirs[0], name + "_lib.pkl")
                valid_dirs.append((lib, log))

    pair_logs = []
    appended_logs = []
    count = 0

    for lib1, log1 in valid_dirs:
        first_lib = model_setup_library.restore_lib(lib1)

        for lib2, log2 in valid_dirs:
            if lib1 != lib2 and log1 not in appended_logs and log2 not in appended_logs:
                second_lib = model_setup_library.restore_lib(lib2)

                if first_lib["training_on"] == "images":
                    image_lib = first_lib.copy()
                    image_log = log1
                else:
                    speech_lib = first_lib.copy()
                    speech_log = log1

                if second_lib["training_on"] == "images" and first_lib[
                        "training_on"] == "speech":
                    image_lib = second_lib.copy()
                    image_log = log2
                elif second_lib["training_on"] == "speech" and first_lib[
                        "training_on"] == "images":
                    speech_lib = second_lib.copy()
                    speech_log = log2
                else:
                    break

                pairs = pair_check(speech_lib, image_lib)

                if pairs:
                    pair_logs.append((speech_log, image_log))
                    appended_logs.append(speech_log)
                    appended_logs.append(image_log)

    for speech_fn, image_fn in pair_logs:
        cmd = "./few_shot_learning.py --speech_log_fn {} --image_log_fn {} --speech_data_fn {} --image_data_fn {} --episode_fn {}".format(
            speech_fn, image_fn, speech_dataset, image_dataset,
            one_shot_episode)
        print_string = cmd
        model_setup_library.command_printing(print_string)
        sys.stdout.flush()
        proc = subprocess.Popen(cmd, shell=True)
        proc.wait()

        cmd = "./few_shot_learning.py --speech_log_fn {} --image_log_fn {} --speech_data_fn {} --image_data_fn {} --episode_fn {}".format(
            speech_fn, image_fn, speech_dataset, image_dataset,
            few_shot_episode)
        print_string = cmd
        model_setup_library.command_printing(print_string)
        sys.stdout.flush()
        proc = subprocess.Popen(cmd, shell=True)
        proc.wait()