def __init__(
     self,
     game: Game,
     agent: GraphAgent,
     search_depth=SEARCH_DEPTH,
     max_branching_factor=MAX_BRANCHING_FACTOR,
     name=None,
 ):
     super().__init__(game, agent)
     self.search_depth = search_depth
     self.max_branching_factor = max_branching_factor
     self.name = name or generate_id()
示例#2
0
async def handle_connection(websocket, path):
    _logger = app_logger.bind(socket_id=generate_id())

    _logger.info("handle_connection")

    await on_connect(websocket, _logger)

    try:
        async for message in websocket:
            await on_receive(websocket, message, _logger)
    finally:
        await on_drop(websocket, _logger)
CROSS_VAL = 10
ES_PATIENCE = 2
LOSS = "mean_squared_error"
USE_LSTM = False
FEATURE_TYPE = "feature-streams"  # embeddings-ge2e, embeddings-trill, feature-streams (embeddings dir name)
FEATURE_DIR = "split-10"  # split-10, ... (subdir name in ./wavs)

seed = random.randint(0, 100000)
print(f'Seed: {seed}')

generator = get_folds(FEATURE_TYPE,
                      FEATURE_DIR,
                      USE_LSTM,
                      CROSS_VAL,
                      seed=seed)  # seed=21
run_name = generate_id(word_count=3)

print(f"Starting run {run_name} ...")

best_loss_per_fold = []
predictions = []
truths = []

for i in range(1, CROSS_VAL + 1):
    # run = wandb.init(
    #     config={"learning_rate": LEARNING_RATE,
    #             "batch size": BATCH_SIZE,
    #             "epochs": EPOCHS,
    #             "optimizer": OPTIMIZER,
    #             "activation_func": ACTIVATION_FUNC,
    #             "dropout_rate": DROPOUT_RATE,
def predict(save_predictions=False,
            n_neighbors=310,
            max_depth=20,
            remove_middle=False):
    generator = get_folds(FEATURE_TYPE,
                          FEATURE_DIR,
                          timeseries=False,
                          folds=CROSS_VAL,
                          seed=21)
    run_name = generate_id(word_count=3)

    print(f"Starting run {run_name}-{METHOD} ...")

    acc_per_fold = []
    predictions = []
    truths = []

    for i in range(1, CROSS_VAL + 1):
        start_message = f"Starting cross validation {i}/{CROSS_VAL} for param {'k=' + str(n_neighbors) if METHOD == 'KNN' else 'max_depth=' + str(max_depth)}"
        print('-' * len(start_message))
        print(start_message)
        print('-' * len(start_message))

        x_train, y_train, x_val, y_val = next(generator)

        print(f'Created folds for iteration {i}')

        if remove_middle:
            print(len(x_train))
            print(len(y_train))
            print('removing middle values from train set')
            x_train_new = []
            y_train_new = []
            for i in range(len(x_train)):
                if y_train[i] <= 0.333 or y_train[i] >= 0.666:
                    x_train_new += [x_train[i]]
                    y_train_new += [y_train[i]]
            print(len(x_train_new))
            print(len(y_train_new))
            print(len(x_val))
            print(len(y_val))
            print('removing middle values from val set')
            x_val_new = []
            y_val_new = []
            for i in range(len(x_val)):
                if y_val[i] <= 0.333 or y_val[i] >= 0.666:
                    x_val_new += [x_val[i]]
                    y_val_new += [y_val[i]]
            print(len(x_val_new))
            print(len(y_val_new))
            x_train = x_train_new
            x_val = x_val_new
            y_train = y_train_new
            y_val = y_val_new

        x_train = np.array(x_train)
        x_val = np.array(x_val)
        y_train = np.rint(y_train)
        y_val = np.rint(y_val)

        x_train, y_train = shuffle(x_train, y_train)

        if METHOD == 'KNN':
            knn = KNeighborsClassifier(n_neighbors=n_neighbors)
            knn.fit(x_train, y_train)
            prediction = knn.predict(x_val)
        elif METHOD == 'RF':
            rf = RandomForestClassifier(max_depth=max_depth)
            rf.fit(x_train, y_train)
            prediction = rf.predict(x_val)

        acc = accuracy_score(y_val, prediction)
        # acc = accuracy_score(y_val, np.random.randint(2, size=len(y_val)))  # replace predictions by random classes to simulate random guessing

        print(f'Accuracy for fold {i}: {acc}\n')

        predictions += [prediction.flatten().tolist()]
        acc_per_fold += [acc]
        truths += [y_val.flatten().tolist()]

    avg_loss = mean(acc_per_fold)
    result = f"| Average accuracy for {'k=' + str(n_neighbors) if METHOD == 'KNN' else 'max_depth=' + str(max_depth)}: {avg_loss} |"
    print(f'Accuracy per fold: {acc_per_fold}')
    print()
    print('-' * len(result))
    print(result)
    print('-' * len(result))

    if save_predictions:
        predictionsname = f"predictions/{FEATURE_TYPE}-CLASS-{avg_loss:.4f}-{run_name}.pickle"
        pickle.dump((predictions, truths), open(predictionsname, "wb"))
        print(f'Saved predictions as {predictionsname}')

        model = KNeighborsClassifier(n_neighbors=n_neighbors)
        x_train, y_train, x_val, y_val = next(
            get_folds(FEATURE_TYPE,
                      FEATURE_DIR,
                      timeseries=False,
                      folds=CROSS_VAL,
                      seed=21))

        if remove_middle:
            print(len(x_train))
            print(len(y_train))
            print('removing middle values from train set')
            x_train_new = []
            y_train_new = []
            for i in range(len(x_train)):
                if y_train[i] <= 0.333 or y_train[i] >= 0.666:
                    x_train_new += [x_train[i]]
                    y_train_new += [y_train[i]]
            print(len(x_train_new))
            print(len(y_train_new))
            print(len(x_val))
            print(len(y_val))
            print('removing middle values from val set')
            x_val_new = []
            y_val_new = []
            for i in range(len(x_val)):
                if y_val[i] <= 0.333 or y_val[i] >= 0.666:
                    x_val_new += [x_val[i]]
                    y_val_new += [y_val[i]]
            print(len(x_val_new))
            print(len(y_val_new))
            x_train = x_train_new
            x_val = x_val_new
            y_train = y_train_new
            y_val = y_val_new

        x_train = np.array(x_train)
        x_val = np.array(x_val)
        y_train = np.rint(y_train)
        y_val = np.rint(y_val)

        print(len(x_train[0]))
        print(len(x_val[0]))
        print(y_train[0])
        print(y_val[0])

        x = np.concatenate((x_train, x_val))
        y = np.concatenate((y_train, y_val))
        model.fit(x, y)
        modelname = f"models/{FEATURE_TYPE}-CLASS-{'NOMIDDLE' if remove_middle else 'FULL'}-{avg_loss:.4f}-{run_name}.pickle"
        pickle.dump(model, open(modelname, "wb"))
        print(f'Saved model as {modelname}')

    return avg_loss
示例#5
0
def test_with_seed():
    random_uuid = uuid.uuid4()
    id1 = generate_id(seed=random_uuid)
    id2 = generate_id(seed=random_uuid)
    assert id1 == id2
示例#6
0
def test_word_count():
    result = generate_id(word_count=5)
    assert len(result.split("-")) == 5
示例#7
0
def test_word_count_throws():
    with pytest.raises(ValueError, match="word_count cannot be lower than 3"):
        generate_id(word_count=1)
示例#8
0
def test_separator():
    result = generate_id(separator="!")
    assert len(result.split("!")) == 4
示例#9
0
def test_without_seed():
    id1 = generate_id()
    id2 = generate_id()
    assert id1 != id2
示例#10
0
def main(words, sep, seed, count):
    """
    Generate human readable IDs
    """
    for i in range(count):
        click.echo(generate_id(separator=sep, seed=seed, word_count=words))
示例#11
0
def predict(save_predictions=False,
            n_neighbors=310,
            max_depth=20,
            method='KNN'):
    generator = get_folds(FEATURE_TYPE,
                          FEATURE_DIR,
                          timeseries=False,
                          folds=CROSS_VAL,
                          seed=21)
    run_name = generate_id(word_count=3)

    print(f"Starting run {run_name}-{method} ...")

    loss_per_fold = []
    predictions = []
    truths = []

    for i in range(1, CROSS_VAL + 1):
        start_message = f"Starting cross validation {i}/{CROSS_VAL} for param {'k=' + str(n_neighbors) if method == 'KNN' else 'max_depth=' + str(max_depth)}"
        print('-' * len(start_message))
        print(start_message)
        print('-' * len(start_message))

        x_train, y_train, x_val, y_val = next(generator)

        print(f'Created folds for iteration {i}')

        x_train = np.array(x_train)
        x_val = np.array(x_val)
        y_train = np.array(y_train)
        y_val = np.array(y_val)

        x_train, y_train = shuffle(x_train, y_train)

        if method == 'KNN':
            knn = KNeighborsRegressor(n_neighbors=n_neighbors)
            knn.fit(x_train, y_train)
            prediction = knn.predict(x_val)
        elif method == 'RF':
            rf = RandomForestRegressor(max_depth=max_depth)
            rf.fit(x_train, y_train)
            prediction = rf.predict(x_val)

        # loss = mean_squared_error(y_val, prediction)
        # use line below to simulate guessing 0.5
        # loss = mean_squared_error(y_val, np.array([0.5]*len(y_val)))
        # use line below to simulate random guessing
        loss = mean_squared_error(y_val, np.random.uniform(0, 1, [len(y_val)]))

        print(f'MSE for fold {i}: {loss}\n')

        predictions += [prediction.flatten().tolist()]
        loss_per_fold += [loss]
        truths += [y_val.flatten().tolist()]

    avg_loss = mean(loss_per_fold)
    result = f"| Average loss for {'k=' + str(n_neighbors) if method == 'KNN' else 'max_depth=' + str(max_depth)}: {avg_loss} |"
    print(f'MSE loss per fold: {loss_per_fold}')
    print()
    print('-' * len(result))
    print(result)
    print('-' * len(result))

    if save_predictions:
        predictionsname = f"predictions/{FEATURE_TYPE}-{method}-{avg_loss:.4f}-{run_name}.pickle"
        pickle.dump((predictions, truths), open(predictionsname, "wb"))
        print(f'Saved predictions as {predictionsname}')

        if method == 'KNN':
            model = KNeighborsRegressor(n_neighbors=n_neighbors)
        else:
            model = RandomForestRegressor(max_depth=max_depth)
        x_train, y_train, x_val, y_val = next(
            get_folds(FEATURE_TYPE,
                      FEATURE_DIR,
                      timeseries=False,
                      folds=CROSS_VAL,
                      seed=21))
        model.fit(x_train + x_val, y_train + y_val)
        modelname = f"models/{FEATURE_TYPE}-{method}-{avg_loss:.4f}-{run_name}.pickle"
        pickle.dump(model, open(modelname, "wb"))
        print(f'Saved model as {modelname}')

    return avg_loss
示例#12
0
import argparse
import importlib
import os
import sys

from functions import create_exp_name_and_datetime_path, merge_cfg_with_cli, run

from human_id import generate_id

if __name__ == "__main__":
    # parser = argparse.ArgumentParser()
    exp = sys.argv[1].split("/")[-1].split(".")[0]

    module = importlib.import_module("." + exp, package="experiments")
    Experiment = getattr(module, "Experiment")
    cfg = getattr(module, "Config")
    parser = merge_cfg_with_cli(cfg)
    parser.add_argument("exp")
    args = parser.parse_args()
    path = create_exp_name_and_datetime_path(Experiment)
    path = os.path.join("results", path)
    args.run_id = generate_id()
    run(Experiment, args, path)
示例#13
0
def _get_random_name():
    return generate_id(word_count=3)