Esempio n. 1
0
def main(args):
    start = time.time()
    utils.fancy_print("Starting workflow", color="green", size=70)
    ml_params = utils.read_config(args.ml_config_path)
    utils.validate_ml_params(ml_params)

    if args.experiment_config_path:
        experiment_params = utils.read_config(args.experiment_config_path)
    else:
        assert args.experiment_id is not None, (
            "If no --experiment_config_path provided, --experiment_id must"
            " be provided to fetch experiment config from bandit app."
        )
        assert "bandit_app_credential_path" in ml_params, (
            "If getting experiment config from banditml.com, must provide"
            " valid api key path in `bandit_app_credential_path` in ml config."
        )
        logger.info("Getting experiment config from banditml.com...")
        experiment_params = utils.get_experiment_config_from_bandit_app(
            ml_params["bandit_app_credential_path"], args.experiment_id
        )
        experiment_params["experiment_id"] = args.experiment_id

    logger.info("Using parameters: {}\n".format(ml_params))
    train(
        ml_params=ml_params,
        experiment_params=experiment_params,
        predictor_save_dir=args.predictor_save_dir,
        s3_bucket_to_write_to=args.s3_bucket_to_write_to,
    )
    logger.info("Workflow completed successfully.")
    logger.info(f"Took {time.time() - start} seconds to complete.")
Esempio n. 2
0
def main(args):
    start = time.time()
    fancy_print("Starting workflow", color="green", size=70)
    ml_params = read_config(args.ml_config_path)

    if args.experiment_config_path:
        experiment_params = read_config(args.experiment_config_path)
    else:
        assert args.experiment_id is not None, (
            "If no --experiment_config_path provided, --experiment_id must"
            " be provided to fetch experiment config from bandit app.")
        logger.info("Getting experiment config from banditml.com...")
        experiment_params = get_experiment_config_from_bandit_app(
            args.experiment_id)
        # in transit this gets dumped as a string so load it
        experiment_params["reward_function"] = json.loads(
            experiment_params["reward_function"])
        experiment_params["experiment_id"] = args.experiment_id
        experiment_params["model_name"] = args.model_name

    logger.info("Using parameters: {}".format(ml_params))
    train(
        ml_params=ml_params,
        experiment_params=experiment_params,
        predictor_save_dir=args.predictor_save_dir,
        s3_bucket_to_write_to=args.s3_bucket_to_write_to,
    )
    logger.info("Workflow completed successfully.")
    logger.info(f"Took {time.time() - start} seconds to complete.")
def lambda_handler(ws_event, context):
    """
        Lambda Handler that changes my slack status

    :param ws_event: body request and all that
    :param context: not sure
    :return:
    """

    # read configs
    utils.read_config()

    ###########
    #  PING   #
    ###########
    if process_ping(ws_event):
        now = get_bst_datetime().isoformat()
        return {
            'statusCode': 200,
            'body': json.dumps('Ping received at ' + now)
        }

    ############################
    #  STATE (WORK, AWAY, OFF) #
    ############################

    state = infer_state()

    # Check if slack status says 'in a meeting' and don't change
    if is_in_known_status_checker(state):
        return {
            'statusCode': 200,
            'body': json.dumps('In na event, waiting for event to expire')
        }

    ################################################
    #  detect HOME OFFICE, VACATION and MEETING #
    ################################################
    try:
        found_relevant_event = check_calendar_events(state)
    except:
        logger.error("Expired token")
        found_relevant_event = False

    ###############
    #   FALLBACK  #
    ###############
    if not found_relevant_event:
        logger.info('NO relevant event. Processing fallback status')
        process_fallback(state)

    return {
        'statusCode': 200,
        'body': json.dumps('Everything OK!')
    }
Esempio n. 4
0
    def test_ddpg(self):
        CONFIG_PATH = 'tests/config_test.yml'
        config = read_config(CONFIG_PATH)

        config['env'] = 'BipedalWalker-v3'
        config['model'] = 'ddpg'
        config['state_dim'] = 24
        config['action_dim'] = 4

        engine = load_engine(config)
        engine.train()
Esempio n. 5
0
    def test_ddpg(self):
        CONFIG_PATH = 'tests/config_test.yml'
        config = read_config(CONFIG_PATH)

        config['env'] = 'Pendulum-v0'
        config['model'] = 'ddpg'
        config['state_dim'] = 3
        config['action_dim'] = 1

        engine = load_engine(config)
        engine.train()
Esempio n. 6
0
    def test_ddpg(self):
        CONFIG_PATH = 'tests/config_test.yml'
        config = read_config(CONFIG_PATH)

        config['env'] = 'LunarLanderContinuous-v2'
        config['model'] = 'ddpg'
        config['state_dim'] = 8
        config['action_dim'] = 2

        engine = load_engine(config)
        engine.train()
Esempio n. 7
0
                        version='| Version | {:^8} |'.format(VERSION))
    parser.add_argument(
        '--train',
        action='store_true',
        help='Train a new model or continue training an existing model.')
    parser.add_argument('--evaluate',
                        action='store_true',
                        help='Evaluate a trained model.')
    parser.add_argument('--config',
                        type=str,
                        default="configs/smn_last.config.json",
                        help='Location of config file')
    args = parser.parse_args()

    # lower the configuration parameters dict
    trainerParams = utils.read_config(args.config)

    if args.train:
        logger = utils.create_logger(trainerParams["global"]["log_file"])
        logger.info(json.dumps(trainerParams, indent=4))
        train(trainerParams, evaluate=args.evaluate)
    elif args.evaluate:
        # modify the name of log file
        trainerParams["global"]["log_file"] = trainerParams["global"][
            "log_file"].rsplit('.', maxsplit=1)[0] + ".evaluation.log"
        logger = utils.create_logger(trainerParams["global"]["log_file"])
        logger.info(json.dumps(trainerParams, indent=4))
        evaluate(trainerParams)
    else:
        parser.print_help()
        # logger.error("\n"+parser.format_help())
Esempio n. 8
0
import argparse
from models.engine import load_engine
from utils.utils import read_config

parser = argparse.ArgumentParser(description='Run training')
parser.add_argument("--config", type=str, help="Path to the config file.")

if __name__ == "__main__":
    args = vars(parser.parse_args())
    config = read_config(args['config'])
    engine = load_engine(config)
    engine.train()
Esempio n. 9
0
File: test.py Progetto: alik604/ra
from models.engine import load_engine
from utils.utils import read_config

if __name__ == "__main__":
    CONFIG_PATH = "d4pg-pytorch/configs/follow_rl.yml"
    CONFIG_PATH = "configs/LunarLanderContinuous_d4pg.yml"
    config = read_config(CONFIG_PATH)

    engine = load_engine(config)
    engine.test()
Esempio n. 10
0
    def __init__(self,
                 embedding_or_raw_data_file="",
                 voc_limit=None,
                 model_name="word2vec",
                 intersecting_embedding="",
                 config_file=""):
        """Initialize the embedding module.

        Args:
            embedding_or_raw_data_file: str
                        Path to the word vectors.
            voc_limit:  int
                        A restriction on the embedding vocabulary.
            model_name: str
                        The name of the embedding model that is used:
                        'word2vec' or 'bert'.
            embedding_combiner: numpy callable
                                A function to combine two or more (sub-)
                                word vectors.
        """
        # get the configuration
        self.config = utils.read_config(config_file)

        self.embedding_model_name = model_name
        self.max_seq_length = 20
        self.check_other_embedding = self.config["check_other_embedding"]

        if self.embedding_model_name == "word2vec":
            if word2vec_available and embedding_or_raw_data_file:

                self.word_vectors, self.embedding_dim = self.load_word2vec(
                    embedding_or_raw_data_file, voc_limit)

                print(type(self.word_vectors))

                self.vocab = self.word_vectors.vocab

                self.embedding_combiner = None

            elif word2vec_available and not embedding_or_raw_data_file:
                print("No file for embedding data given.")
                sys.exit(1)
            else:
                raise RuntimeError(
                    "Please download the necessary word2vec embedding file.")

        # elif self.embedding_model_name == "bert":
        #     if pretrained_bert_available:
        #         used_bert_model = "bert-base-uncased"

        #         self.word_vectors, self.embedding_dim = self.load_bert(
        #             used_bert_model)

        #         self.vocab = list(self.word_vectors.keys())

        #         self.embedding_combiner = getattr(
        #             self, self.config["embedding_combiner"])()

        #         print("\nNOTE: the chosen embedding combiner is {}\n".format(
        #             self.embedding_combiner))

        #     else:
        #         raise RuntimeError("Please install pytorch and the bert "
        #                            "model via 'pip install "
        #                            "pytorch-pretrained-bert'")

        elif self.embedding_model_name == "glove":
            self.word_vectors, self.embedding_dim = self.load_glove(
                embedding_or_raw_data_file, voc_limit)

            self.vocab = self.word_vectors.vocab

            self.embedding_combiner = None

        elif self.embedding_model_name == "fasttext":
            self.word_vectors, self.embedding_dim = self.load_fasttext(
                embedding_or_raw_data_file, voc_limit)

            self.vocab = self.word_vectors.vocab

            self.embedding_combiner = None

        else:
            assert False, "Embedding model name unknown: '{}'\n".format(
                self.embedding_model_name)

        if self.check_other_embedding == "word2vec":
            self.load_w2v_as_comparison(intersecting_embedding, voc_limit)

        if self.check_other_embedding == "bert":
            used_bert_model = "bert-base-uncased"
            self.load_bert_as_comparison(used_bert_model)
Esempio n. 11
0
import scipy.signal

import torch
import torchaudio

from flask import Flask, jsonify, request
from typing import List, Dict, Text, Union
from utils.utils import read_config

from model.network import SpeechRecognitionModel
from utils.text_transform import TextTransform

app = Flask(__name__)


config = read_config('config/config.yaml')
tt = TextTransform()

def load_model(config: Dict = config):
    state_dict_path = config.get('CHECKPOINT_PATH')
    checkpoint = torch.load(state_dict_path)
    model = SpeechRecognitionModel(
        n_cnn_layers =  config.get('N_CNN_LAYERS'), 
        n_rnn_layers = config.get('N_RNN_LAYERS'),
        rnn_dim = config.get('RNN_DIM'),
        n_class = config.get('N_CLASS'), 
        n_feats = config.get('N_FEATS'), 
        stride = config.get('STRIDE'),
        dropout = config.get('DROPOUT')
    )
import argparse
from models.engine import load_engine
from utils.utils import read_config

if __name__ == "__main__":

    config = read_config("config.yml")
    engine = load_engine(config)
    engine.train()
Esempio n. 13
0
    write_new_file([train_argmax], [train_max], train_examples,
                   train_examples[0][::train_step], training_params)
    _, dev_argmax, dev_max = calculate_cosine_similarity(
        dev_preds[0], train_preds[0])
    logger.info("dev_argmax: {} \tdev_max: {}".format(len(dev_argmax),
                                                      len(dev_max)))
    write_new_file([dev_argmax], [dev_max], dev_examples,
                   train_examples[0][::train_step], validation_params)
    _, test_argmax, test_max = zip(*[
        calculate_cosine_similarity(test_preds[i], train_preds[0])
        for i in range(len(test_preds))
    ])
    logger.info("test_argmax: {} \ttest_max: {}".format(
        len(test_argmax[0]), len(test_max[0])))
    write_new_file(test_argmax, test_max, test_examples,
                   train_examples[0][::train_step], evaluation_params)


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='Create Dataset.')
    parser.add_argument('--config',
                        type=str,
                        default="",
                        help='Location of config file')
    args = parser.parse_args()
    config = utils.read_config(args.config)

    logger = utils.create_logger(config["global"]["log_file"])

    create_dataset_for_context_retrieval(config)
            # specify subplot and turn of axis
            ax = pyplot.subplot(4, 8, ix)
            ax.set_xticks([])
            ax.set_yticks([])
            # plot filter channel in grayscale
            print(filter.shape)
            pyplot.imshow(filter[:, :, 0, j])
            ix += 1
        pyplot.savefig(
            f'visualization/Layer_{layer_index}/subplot_32_filters.png')


def visualize_filters(model, config):
    stable_mode = config.get('stable_mode')
    testset_size = config.get('N_test')
    print('Loading data set')
    data_parser = DataSetParser(stable_mode=stable_mode, config=config)
    X = data_parser.train_data[0][0]
    print(X.shape)
    X = np.expand_dims(X, axis=-1)
    print(X.shape)
    plot_feature_map(model, X)


if __name__ == '__main__':
    print('CURRENT DIR: {}'.format(os.getcwd()))
    args = get_args()
    config = read_config(args)
    model = build_or_load_model(config)
    visualize_filters(model=model['train'], config=config)