Пример #1
0
    def load(experiments_path):
        experiments = list()
        with open(experiments_path, 'r') as experiments_file:
            experiment_configurations = json.load(experiments_file)

            configuration = None
            with open(experiment_configurations['configuration_path'], 'r') as configuration_file:
                configuration = yaml.load(configuration_file, Loader=yaml.FullLoader)

            if type(experiment_configurations['seed']) == list:
                for seed in experiment_configurations['seed']:
                    for experiment_configuration_key in experiment_configurations['experiments'].keys():
                        experiment = Experiment(
                            name=experiment_configuration_key + '-seed' + str(seed),
                            experiments_path=experiment_configurations['experiments_path'],
                            results_path=experiment_configurations['results_path'],
                            global_configuration=configuration,
                            experiment_configuration=experiment_configurations['experiments'][experiment_configuration_key],
                            seed=seed
                        )
                        experiments.append(experiment)
            else:
                for experiment_configuration_key in experiment_configurations['experiments'].keys():
                    experiment = Experiment(
                        name=experiment_configuration_key,
                        experiments_path=experiment_configurations['experiments_path'],
                        results_path=experiment_configurations['results_path'],
                        global_configuration=configuration,
                        experiment_configuration=experiment_configurations['experiments'][experiment_configuration_key],
                        seed=experiment_configurations['seed']
                    )
                    experiments.append(experiment)

        return Experiments(experiments)
Пример #2
0
def run_experiment(experiment_config):
    dataset = Dataset('data/autism.tsv')
    num_epochs = 1000
    eval_every = 10

    for fold_id, (train_idxs, test_idxs) in dataset.cross_validation():

        data_train_fold = dataset.get_data(train_idxs)
        num_instances, labels_train_fold = dataset.get_labels(train_idxs)

        data_test_fold = dataset.get_data(test_idxs)
        _, labels_test_fold = dataset.get_labels(test_idxs)

        with tf.Graph().as_default() as graph:

            experiment = Experiment(experiment_config, num_instances,
                                    NeuralNetworkClassifier, data_train_fold)

            with tf.Session() as session:

                global_step = 0
                session.run(tf.global_variables_initializer())

                log_saver = LogSaver('logs', 'fisher_fold{}'.format(fold_id),
                                     session.graph)

                train_selected_data = session.run(
                    experiment.selection_wrapper.selected_data)
                test_selected_data = session.run(
                    experiment.selection_wrapper.select(data_test_fold))

                tqdm_iter = tqdm(range(num_epochs), desc='Epochs')

                for epoch in tqdm_iter:
                    feed_dict = {
                        experiment.clf.x: train_selected_data,
                        experiment.clf.y: labels_train_fold
                    }
                    loss, _ = session.run(
                        [experiment.clf.loss, experiment.clf.opt],
                        feed_dict=feed_dict)

                    if epoch % eval_every == 0:
                        summary = session.run(experiment.clf.summary_op,
                                              feed_dict=feed_dict)
                        log_saver.log_train(summary, epoch)

                        feed_dict = {
                            experiment.clf.x: test_selected_data,
                            experiment.clf.y: labels_test_fold
                        }
                        summary = session.run(experiment.clf.summary_op,
                                              feed_dict=feed_dict)
                        log_saver.log_test(summary, epoch)

                    tqdm_iter.set_postfix(loss='{:.2f}'.format(float(loss)),
                                          epoch=epoch)
    def get(self, experiment_configuration):
        self.logger.write("Building experiment:",
                          area="context",
                          subject="configuration")
        self.logger.write_configuration(experiment_configuration,
                                        area="context",
                                        subject="configuration")

        model_trainer = self.model_trainer_factory.get(
            experiment_configuration)
        model_tester = self.model_tester_factory.get(experiment_configuration)

        model = self.model_factory.get(experiment_configuration)

        experiment = Experiment(model_trainer, model_tester, model,
                                self.logger)

        return experiment
Пример #4
0
def train(data, model, base_params, train_params, save_dir):
    trainer = training.load_trainer(base_params, train_params, model, data)
    expt = Experiment(
        train_params.experiment_name, train_params.experiment_time,
        base_params.data)
    loss_reporter = training.LossReporter(expt, len(data.train), trainer)
    def report_loss_fn(msg):
        loss_reporter.report_items(msg.n_items, msg.loss)

    for epoch_no in range(train_params.epochs):
        loss_reporter.start_epoch(epoch_no + 1, 0)
        random.shuffle(data.train)
        trainer.train(report_loss_fn=report_loss_fn)
        loss_reporter.report()
        # 583 set how often to save models
        if epoch_no % 5 == 0:
            save_file = os.path.join(save_dir, 'epoch_%03d.mdl' % (epoch_no+1,))
            trainer.save_checkpoint(epoch_no, -1, save_file)
            test(trainer, save_dir)
    save_file = os.path.join(save_dir, 'epoch_final.mdl')
    trainer.save_checkpoint(epoch_no, -1, save_file)

    return trainer
def main():
    args = parse_args()

    # Work around TensorFlow's absl.logging depencency which alters the
    # default Python logging output behavior when present.
    if "absl.logging" in sys.modules:
        logger.info("Tentative de correction de la verbosité des logs")
        import absl.logging

        if args.loglevel == 20:
            logger.info("Logging : info")
            absl.logging.set_verbosity("info")
            absl.logging.set_stderrthreshold("info")
        if args.loglevel == 10:
            logger.info("Logging : debug")
            absl.logging.set_verbosity("debug")
            absl.logging.set_stderrthreshold("debug")

    # Parsage des arguments
    theme = "theory"
    fix_seed = False

    all_encoders = args.all_encoders
    encoder = [args.encoder]

    if encoder[0] in SUPPORTED_ENCODERS and not all_encoders:
        logger.debug(f"Encodeur {encoder[0]} sélectionné.")
    elif all_encoders:
        logger.debug(
            "Option all_encoders sélectionnée. Sélection de tous les encodeurs"
        )
        encoder = SUPPORTED_ENCODERS
    else:
        logger.error(
            "Utiliser -e ou -a pour sélectionner un ou plusieurs encodeurs. -h pour plus d'informations."
        )
        exit()

    method = args.method
    if method not in SUPPORTED_METHODS:
        logger.error(
            f"Méthode {method} non implémentée. Choix = {SUPPORTED_METHODS}")
        exit()

    # Variables pour les résultats bruts
    raw_variables_list = [
        "ID",
        "fixed_sample",
        "data_filename",
        "date test",
        "theme",
        "encoder_name",
        "model_name",
        "methode",
        "size_historic",
        "size_context",
        "size_novelty",
        "iteration",
        "AUC",
        "temps",
        "faux positifs",
        "faux négatifs",
        "vrais positifs",
        "vrais négatifs",
        "précision",
        "rappel",
        "accuracy",
        "fscore",
        "gmean",
    ]

    # Variables pour les résultats condensés
    condensed_variables_list = [
        "ID",
        "fixed_sample",
        "data_filename",
        "date test",
        "theme",
        "encoder_name",
        "model_name",
        "methode",
        "size_historic",
        "size_context",
        "size_novelty",
        "iteration",
        "AUC",
        "temps",
        "moy. faux positifs",
        "moy. faux négatifs",
        "moy. vrais positifs",
        "moy. vrais négatifs",
        "moy. précision",
        "moy. rappel",
        "moy. accuracy",
        "moy. fscore",
        "moy. gmean",
        "std. faux positifs",
        "std. faux négatifs",
        "std. vrais positifs",
        "std. vrais négatifs",
        "std. précision",
        "std. rappel",
        "std. accuracy",
        "std. fscore",
        "std. gmean",
        "q0.25 faux positifs",
        "q0.25 faux négatifs",
        "q0.25 vrais positifs",
        "q0.25 vrais négatifs",
        "q0.25 précision",
        "q0.25 rappel",
        "q0.25 accuracy",
        "q0.25 fscore",
        "q0.25 gmean",
        "q0.5 faux positifs",
        "q0.5 faux négatifs",
        "q0.5 vrais positifs",
        "q0.5 vrais négatifs",
        "q0.5 précision",
        "q0.5 rappel",
        "q0.5 accuracy",
        "q0.5 fscore",
        "q0.5 gmean",
        "q0.75 faux positifs",
        "q0.75 faux négatifs",
        "q0.75 vrais positifs",
        "q0.75 vrais négatifs",
        "q0.75 précision",
        "q0.75 rappel",
        "q0.75 accuracy",
        "q0.75 fscore",
        "q0.75 gmean",
    ]

    raw_results_filename = "Exports/Résultats_bruts.csv"
    condensed_results_filename = "Exports/Résultats_condensés.csv"

    # si le fichier n'existe pas, on le crée et y insère l'entête
    if not os.path.isfile(raw_results_filename):
        logger.debug(f"Création du fichier {raw_results_filename}")
        with open(raw_results_filename, "a+") as f:
            f.write(f"{';'.join(raw_variables_list)}\n")

    if not os.path.isfile(condensed_results_filename):
        logger.debug(f"Création du fichier {condensed_results_filename}")
        with open(condensed_results_filename, "a+") as f:
            f.write(f"{';'.join(condensed_variables_list)}\n")

    # Boucle sur les encodeurs sélectionnés
    for single_encoder in encoder:

        # Chargement de l'encodeur
        logger.debug("Chargement de l'encodeur")

        # Initialisation de l'encodeur
        model_name = "Non applicable"
        if single_encoder == "infersent":
            encoder_model = infersent_model(pkl_path=PATH_INFERSENT_PKL,
                                            w2v_path=PATH_INFERSENT_W2V)
            model_name = PATH_INFERSENT_W2V
        elif single_encoder == "sent2vec":
            encoder_model = sent2vec_model(model_path=PATH_SENT2VEC_BIN)
            model_name = PATH_SENT2VEC_BIN
        elif single_encoder == "USE":
            module_url = (
                "https://tfhub.dev/google/universal-sentence-encoder/2"
            )  # @param ["https://tfhub.dev/google/universal-sentence-encoder/2", "https://tfhub.dev/google/universal-sentence-encoder-large/3"]

            # Import the Universal Sentence Encoder's TF Hub module
            encoder_model = hub_module(module_url)
        elif single_encoder == "fasttext":
            logger.info(
                f"Chargement du modèle fasttext ({PATH_FASTTEXT}) (~15 minutes)"
            )
            encoder_model = KeyedVectors.load_word2vec_format(PATH_FASTTEXT)
            model_name = PATH_FASTTEXT
        elif single_encoder == "tf-idf":
            logger.info("Utilisation de TF-IDF")
            encoder_model = None

        # Mise en forme du nom du modèle
        model_name = Path(model_name).stem

        for exp in SAMPLES_LIST:
            # Générateur fichiers (10 normalement)
            experiment = Experiment(single_encoder, exp)

            # Chargement des données datapapers_fixed pour cette expérience
            gen_files = sorted(
                Path(PATH_DATAPAPERS_FIXED).glob(
                    f"**/context_{experiment.size_historic}_{experiment.size_context}_{experiment.size_novelty}_*"
                ))

            # Liste des résultats
            AUC_list = []
            matrice_confusion_list = []
            mesures_list = []
            iteration_time_list = []

            # Résumé du test
            logger.info(
                f"Paramètres : Données = datapapers_fixed, Nouveauté = {theme}, Encodeur = {experiment.encoder}, Méthode = {method}, Historique/Contexte/Nouveauté : {experiment.size_historic}/{experiment.size_context}/{experiment.size_novelty}"
            )

            # Boucle sur les 10 fichiers
            for iteration in gen_files:
                iteration_begin = time.time()

                data_context = pd.read_csv(iteration, sep="\t")
                # n_data_historic = str(PATH_DATAPAPERS_FIXED) + "/historic_" + '_'.join(i.stem.split('/')[-1].split('_')[1:]) + ".csv"
                n_data_historic = (
                    "Exports/datapapers_fixed/historic_" +
                    "_".join(iteration.stem.split("/")[-1].split("_")[1:]) +
                    ".csv")
                data_filename = n_data_historic
                print(n_data_historic)
                data_historic = pd.read_csv(n_data_historic, sep="\t")
                data_historic.columns = ["id", "abstract", "theme"]
                data_context.columns = ["id", "abstract", "theme"]
                # Assignation de l'historique et du context
                experiment.set_datasets(data_historic, data_context)

                # Application de l'encodeur, création des vecteurs d'embeddings
                experiment.run_experiment(encoder_model)

                # Exports des vecteurs pour débogage
                # experiment.export_vectors()

                # Application de la classification
                try:
                    score, pred = experiment.run_classif(method)
                except Exception as e:
                    logger.error(e)
                    exit()

                obs = [
                    1 if elt == theme else 0
                    for elt in experiment.data_context.theme
                ]
                obs2 = [-1 if x == 1 else 1 for x in obs]

                matrice_confusion = mat_conf(obs, pred)
                logger.debug(f"matrice : {matrice_confusion}")
                mesures = all_measures(matrice_confusion, obs, pred)

                AUC = roc_auc_score(obs2, score)

                logger.debug(f"AUC : {AUC}")
                iteration_time = "%.2f" % (time.time() - iteration_begin)
                logger.debug(f"temps itération = {iteration_time}")

                # Ajout des résultats dans une liste
                AUC_list.append(AUC)
                matrice_confusion_list.append(matrice_confusion)
                mesures_list.append(mesures)
                iteration_time_list.append(float(iteration_time))

                # Arrondi des résultats numériques avant export
                AUC = round(AUC, 2)
                # iteration_time = round(iteration_time, 2)
                # matrice_confusion = [round(x, 2) for x in matrice_confusion]
                mesures = [round(x, 2) for x in mesures]

                # Export des résultats bruts
                logger.debug("Exports des résultats bruts")
                with open(raw_results_filename, "a+") as f:
                    f.write(
                        f"{experiment.ID};{fix_seed};{data_filename};{experiment.heure_test};{theme};{experiment.encoder};{model_name};{method};{experiment.size_historic};{experiment.size_context};{experiment.size_novelty};{iteration};{AUC};{iteration_time};{';'.join(map(str, matrice_confusion))};{';'.join(map(str, mesures))}\n"
                    )

            # Création résultats condensés
            AUC_condensed = round(sum(AUC_list) / float(len(AUC_list)), 2)
            # iteration_time_condensed = round(sum(iteration_time_list) / float(len(iteration_time_list)), 2)
            iteration_time_condensed = "rien"
            mean_matrice_confusion_condensed = np.round(
                np.mean(np.array(matrice_confusion_list), axis=0), 2)
            mean_mesures_condensed = np.round(
                np.mean(np.array(mesures_list), axis=0), 2)
            std_matrice_confusion_condensed = np.round(
                np.std(np.array(matrice_confusion_list), axis=0), 2)
            std_mesures_condensed = np.round(
                np.std(np.array(mesures_list), axis=0), 2)
            quantile025_matrice_confusion_condensed = np.round(
                np.quantile(np.array(matrice_confusion_list), 0.25, axis=0), 2)
            quantile025_mesures_condensed = np.round(
                np.quantile(np.array(mesures_list), 0.25, axis=0), 2)
            med_matrice_confusion_condensed = np.round(
                np.quantile(np.array(matrice_confusion_list), 0.5, axis=0), 2)
            med_mesures_condensed = np.round(
                np.quantile(np.array(mesures_list), 0.5, axis=0), 2)
            quantile075_matrice_confusion_condensed = np.round(
                np.quantile(np.array(matrice_confusion_list), 0.75, axis=0), 2)
            quantile075_mesures_condensed = np.round(
                np.quantile(np.array(mesures_list), 0.75, axis=0), 2)

            # Export des résultats condensés
            logger.debug("Exports des résultats condensés")
            with open(condensed_results_filename, "a+") as f:
                f.write(
                    f"{experiment.ID};{fix_seed};{data_filename};{experiment.heure_test};{theme};{experiment.encoder};{model_name};{method};{experiment.size_historic};{experiment.size_context};{experiment.size_novelty};{iteration};{AUC_condensed};{iteration_time_condensed};{';'.join(map(str, mean_matrice_confusion_condensed))};{';'.join(map(str, mean_mesures_condensed))};{';'.join(map(str, std_matrice_confusion_condensed))};{';'.join(map(str, std_mesures_condensed))};{';'.join(map(str, quantile025_matrice_confusion_condensed))};{';'.join(map(str, quantile025_mesures_condensed))};{';'.join(map(str, med_matrice_confusion_condensed))};{';'.join(map(str, med_mesures_condensed))};{';'.join(map(str, quantile075_matrice_confusion_condensed))};{';'.join(map(str, quantile075_mesures_condensed))}\n"
                )

    logger.info("Temps d'exécution : %.2f secondes" %
                (time.time() - temps_debut))
Пример #6
0
                                                                                            32]
long_message = "Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book."
payload = [ord(l) for l in short_message]

current_carrier = sys.argv[1]
current_noise = sys.argv[2]
current_db = sys.argv[3]

for carrier_filename in CARRIER_FILENAMES:
    if (CARRIER_FILENAMES.index(current_carrier) <=
            CARRIER_FILENAMES.index(carrier_filename)):
        for noise_filename in NOISE_FILENAMES:
            if (CARRIER_FILENAMES.index(current_carrier) <
                    CARRIER_FILENAMES.index(carrier_filename)
                    or NOISE_FILENAMES.index(current_noise) <=
                    NOISE_FILENAMES.index(noise_filename)):
                noise_path = NOISE_DIR + noise_filename
                carrier_path = CARRIER_DIR + carrier_filename
                for noise_db in np.arange(NOISE_DB_MIN, NOISE_DB_MAX,
                                          NOISE_DB_STEP):
                    if (CARRIER_FILENAMES.index(current_carrier) <
                            CARRIER_FILENAMES.index(carrier_filename)
                            or NOISE_FILENAMES.index(current_noise) <
                            NOISE_FILENAMES.index(noise_filename)
                            or noise_db >= float(current_db)):
                        experiment = Experiment(carrier_path, noise_path, 0.0,
                                                noise_db, payload)
                        print "Running experiment for carrier '{0}' mixed with noise '{1}' ({2}dB).".format(
                            carrier_filename, noise_filename, noise_db)
                        experiment.run()
Пример #7
0
def main():
    """
    Main function for a training task.
    """
    parser = get_parser()
    options = get_options(parser)

    # Set up the logger.
    logger = logging.getLogger(consts.MAIN)
    logger.setLevel(logging.DEBUG if options[consts.DEBUG] else logging.INFO)
    file_handler = logging.FileHandler(os.path.join(options[consts.EXPORT_DIR], 'log.txt'), mode='w')
    logger.addHandler(file_handler)
    console_handler = logging.StreamHandler()
    logger.addHandler(console_handler)

    # Log the options given through the command-line arguments.
    logger.info('options: {}'.format(str(options)))

    experiment_id = 0
    status_path = os.path.join(options[consts.EXPORT_DIR], "status.pickle")
    # Check if the execution is a new one or a resumption of a previous experiment.
    if not options[consts.CONTINUE]:
        # Set up a new execution.
        options_path = os.path.join(options[consts.EXPORT_DIR], 'options.pickle')
        with open(options_path, 'wb') as file:
            pickle.dump(options, file)
        best_experiment_test_score = -float('inf')
        best_experiment_id = -1
        best_epoch_num = -1
        best_config = None
        status = 'working'
        with open(status_path, 'wb') as file:
            pickle.dump([best_experiment_test_score, best_experiment_id, best_config, status], file)
        with open(os.path.join(options[consts.EXPORT_DIR], 'id'), 'w') as file:
            file.write(experiments.experiment.execution_identifier)
    else:
        # Load the old execution from the export directory.
        epoch_stamp_path = os.path.join(options[consts.EXPORT_DIR], "epoch_stamp.pickle")
        with open(epoch_stamp_path, 'rb') as file:
            dictionary = pickle.load(file)
        with open(status_path, 'rb') as file:
            best_experiment_test_score, best_experiment_id, best_epoch_num, best_config, status = pickle.load(file)
        with open(os.path.join(options[consts.EXPORT_DIR], 'id'), 'r') as file:
            experiments.experiment.execution_identifier = file.read()

    # Check if the execution is still in progress. This check should fail when an ended execution is resumed.
    if status == 'working':
        # Iterate through the different configurations of hyperparameters ad create an experiment for each.
        for config in iterate_configs(parser, options):
            # If this a resumed execution, check if this experiment has already had finished.
            if options[consts.CONTINUE] and experiment_id < dictionary[consts.EXPERIMENT_ID]:
                experiment_id += 1
                continue
            # If this a resumed execution and this is the experiment that was running when the last checkpoint was
            # created.
            elif options[consts.CONTINUE] and experiment_id == dictionary[consts.EXPERIMENT_ID]:
                # Log the configurations of the present experiment.
                logger.info('continuing on config: {}'.format(str(config)))
                checkpoint_dir = os.path.join(config[consts.EXPORT_DIR],
                                              "checkpoints",
                                              "experiment_%09d" % experiment_id,
                                              "epoch_%09d" % dictionary[consts.EPOCH_NUMBER])
                # Create an experiment for the configuration at hand.
                experiment = Experiment(config=config, experiment_id=experiment_id,
                                        load_from_directory=checkpoint_dir)
            # If this is a new experiment.
            else:
                logger.info('starting on config: {}'.format(str(config)))
                # Create an experiment for the configuration at hand.
                experiment = Experiment(config=config, experiment_id=experiment_id)

            # Run the present experiment.
            experiment_test_score = experiment.run()

            # Record the results of the experiment and compare them to the results so far.
            logger.info('Experiment {} test score: {}'.format(experiment_id, experiment_test_score))
            if experiment_test_score > best_experiment_test_score:
                best_experiment_test_score = experiment_test_score
                best_experiment_id = experiment_id
                best_epoch_num = experiment.best_epoch_number
                best_config = config

            # Store the best results so far in a file.
            with open(status_path, 'wb') as file:
                pickle.dump([best_experiment_test_score, best_experiment_id, best_epoch_num, best_config, status],
                            file)
            experiment_id += 1

        # Mark the execution as over.
        status = 'ended'

        # Store the best results in a file.
        with open(status_path, 'wb') as file:
            pickle.dump([best_experiment_test_score, best_experiment_id, best_epoch_num, best_config, status], file)
            
    # Report the best results.
    logger.info('Execution is over. Best experiment test score: {}'
                '\nBest experiment config: {}'.format(best_experiment_test_score, str(best_config)))
Пример #8
0
def test_experiment():
    exp = Experiment("test_encoder", [2000, 300, 20])

    assert exp.size_historic == 2000
    assert exp.size_context == 300
    assert exp.size_novelty == 20
Пример #9
0
def run_training_coordinator(base_params, train_params):
    # type: (BaseParameters, TrainParameters) -> None

    torch.multiprocessing.set_sharing_strategy('file_system')
    expt = Experiment(train_params.experiment_name, train_params.experiment_time, base_params.data)

    socket_identifier = str(uuid.uuid4())

    context = zmq.Context()
    socket = context.socket(zmq.REP)
    socket.bind(get_socket_url(socket_identifier))

    def send_msg(msg):
        # type: (Union[object, List[object]]) -> None
        if isinstance(msg, list):
            socket.send_pyobj(msg)
        else:
            socket.send_pyobj([msg])

    # fork off trainers
    procs = []
    mp_config = MPConfig(train_params.threads)
    trainer_states = {} # type: Dict[int, TrainerState]

    def all_in_state(state):
        # type: (TrainerState) -> bool
        return all(trainer_states[rank] == state for rank in trainer_states)

    with mp_config:
        for idx in range(train_params.trainers):
            trainer_states[idx] = TrainerState.UNINITIALIZED
            mp_config.set_env(idx)
            procs.append(subprocess.Popen([sys.executable, __file__, socket_identifier, str(idx)]))

    @atexit.register
    def cleanup_procs():
        # type: () -> None
        print('cleaning up trainers')
        for proc in procs:
            proc.terminate()

    while not all_in_state(TrainerState.LOADING_DATA):
        msg = socket.recv_pyobj()
        if isinstance(msg, TrainerInitializeReq):
            send_msg(TrainerInitializeResp(
                base_params,
                train_params,
            ))
            trainer_states[msg.rank] = TrainerState.LOADING_DATA
        elif isinstance(msg, TrainerDataReq):
            send_msg(WaitResp())
        else:
            raise ValueError('Unexpected message {}'.format(msg))

    data = load_data(base_params)
    model = load_model(base_params, data)

    dump_model_and_data(model, data, os.path.join(expt.experiment_root_path(), 'predictor.dump'))

    trainer = load_trainer(base_params, train_params, model, data)

    while not all_in_state(TrainerState.READY_FOR_EPOCH):
        msg = socket.recv_pyobj()
        if isinstance(msg, TrainerDataReq):
            send_msg(TrainerDataResp(
                model.dump_shared_params(),
                trainer.dump_shared_params(),
            ))
            trainer_states[msg.rank] = TrainerState.READY_FOR_EPOCH
        elif isinstance(msg, TrainerStepReq):
            send_msg(WaitResp())
        else:
            raise ValueError('Unexpected message {}'.format(msg))

    current_lr = train_params.initial_lr
    loss_reporter = LossReporter(expt, len(data.train), trainer)

    for epoch_no in range(train_params.epochs):
        if train_params.decay_trainers:
            n_trainers = max(1, train_params.trainers - epoch_no)
        else:
            n_trainers = train_params.trainers

        loss_reporter.start_epoch(epoch_no + 1, n_trainers)

        # start exactly n_trainers trainers, kill the rest
        n_started_trainers = 0
        while not all_in_state(TrainerState.READY_FOR_DATA):
            msg = socket.recv_pyobj()

            if isinstance(msg, TrainerStepReq):
                if trainer_states[msg.rank] == TrainerState.READY_FOR_EPOCH:
                    if n_started_trainers >= n_trainers:
                        send_msg(KillResp())
                        del trainer_states[msg.rank]
                    else:
                        send_msg([ShuffleDataResp(random.getstate()), SetLrResp(current_lr)])
                        trainer_states[msg.rank] = TrainerState.READY_FOR_DATA
                        n_started_trainers += 1
                else:
                    send_msg([WaitResp()])
            else:
                raise ValueError('Unexpected message {}'.format(msg))

        # shuffle data locally to permute random state
        random.shuffle(data.train)

        # get partitions
        partitions = get_partitions(len(data.train), train_params)
        partition_idx = 0

        # run until all done with epoch or dead
        while not all(trainer_states[rank] in (TrainerState.READY_FOR_EPOCH, TrainerState.DEAD) for rank in trainer_states):
            msg = socket.recv_pyobj()

            if trainer_states[msg.rank] == TrainerState.DEAD:
                send_msg(WaitResp())
            elif isinstance(msg, TrainerStepReq):
                if partition_idx < len(partitions):
                    trainer_states[msg.rank] = TrainerState.READY_FOR_DATA
                    send_msg(RunTrainerResp(partitions[partition_idx]))
                    partition_idx += 1
                else:
                    send_msg(WaitResp())
                    trainer_states[msg.rank] = TrainerState.READY_FOR_EPOCH
            elif isinstance(msg, TrainerLossReq):
                send_msg(TrainerLossResp())
                loss_reporter.report_items(msg.n_items, msg.loss)
            elif isinstance(msg, TrainerDeathReq):
                send_msg(TrainerDeathResp())
                loss_reporter.report_trainer_death()
                trainer_states[msg.rank] = TrainerState.DEAD
                if msg.partition_remainder[0] < msg.partition_remainder[1]:
                    partitions.append(msg.partition_remainder)
            else:
                raise ValueError('Unexpected Message {}'.format(msg))

            loss_reporter.report()

        if all_in_state(TrainerState.DEAD):
            break

        # reset states
        for rank in trainer_states:
            trainer_states[rank] = TrainerState.READY_FOR_EPOCH

        # decay LR if necessary
        if train_params.decay_lr or (train_params.weird_lr and epoch_no > 0):
            current_lr /= train_params.lr_decay_rate

    loss_reporter.finish()
Пример #10
0
        with tf.device(self.tf_device(global_args)):
            agent = A2C(network_creator=network_creator(config),
                        lr=local_args.lr,
                        td_step=local_args.td_step,
                        ent_coef=local_args.ent_coef,
                        v_coef=local_args.v_coef)
        with agent.create_session(**self.tf_sess_opts(global_args)):
            env = ParallelEnvs.new(mode=local_args.mode,
                                   env_makers=default_macro_env_maker,
                                   env_num=local_args.env_num,
                                   env_args=env_args)
            obs_adapter = ObservationAdapter(config)
            act_adapter = MacroAdapter(config)
            rwd_adapter = RewardAdapter(config, 8)
            env_runner = EnvRunner(agent=agent,
                                   env=env,
                                   train=global_args.train,
                                   observation_adapter=obs_adapter,
                                   action_adapter=act_adapter,
                                   epoch_n=local_args.epoch,
                                   reward_adapter=rwd_adapter,
                                   step_n=local_args.td_step,
                                   logdir=local_args.logdir)
            env_runner.run()


Experiment.register(A2CProtossExperiment, "A2C training protoss")

if __name__ == '__main__':
    Experiment.main()
Пример #11
0
    },
    "timesteps": 144,
    "Vat": {
        "Line": Rad(1621230562029182607785180351895167282074137639278363742),
        "WETH": {
            "line":
            Rad(590000000000000000000000000000000000000000000000000000),
            "dust": Rad(500000000000000000000000000000000000000000000000),
        },
    },
    "Vow": {
        "wait": 561600,
        "dump": Wad(250000000000000000000),
        "sump": Rad(50000000000000000000000000000000000000000000000000),
        "bump": Rad(10000000000000000000000000000000000000000000000000),
        "hump": Rad(4000000000000000000000000000000000000000000000000000),
    },
    "Uniswap": {
        "pairs": {
            "0xa478c2975ab1ea89e8196811f51a7b7ade33eb11": {
                "path": "",  # No liquidity data for Black Thursday
                "token0": "DAI",
                "token1": "WETH",
            }
        }
    },
}

BlackThursday = Experiment(contracts, keepers, sort_actions, ilk_ids, Token,
                           stat_trackers, parameters)
Пример #12
0
                                            epsilon=1.0,
                                            alpha_decay=0.999,
                                            epsilon_decay=0.999)
                    ctrl_gaussian = Delegator([
                        GaussianAgentPrune(
                            bandit, 0.95, mu=currentMu, sigma=sigma)
                        for _ in range(team_sz)
                    ],
                                              alpha=1.0,
                                              epsilon=1.0,
                                              alpha_decay=0.999,
                                              epsilon_decay=0.999)
                    experiment_id = '%d/%d/%d/%.2f' % (e, n_arms, team_sz,
                                                       currentMu)

                    over_actions = Experiment(bandit, learner,
                                              'LtA/' + experiment_id)
                    over_gaussian_agents = Experiment(bandit, ctrl_gaussian,
                                                      'LtD/' + experiment_id)
                    # over_actions.run(trials)
                    # over_gaussian_agents.run(trials)
                    experiments.append(over_actions)
                    experiments.append(over_gaussian_agents)

print('\nSetup finished.')
manager = ParallelExperiment(experiments)
manager.run(trials)
print('\nExperiments finished.')
# organize results

for r in manager.result:
    # group by n_arms, team_sz and currentMu: each exec will be an entry
Пример #13
0
                                        alpha=1.0,
                                        epsilon=1.0,
                                        alpha_decay=0.999,
                                        epsilon_decay=0.999)
                ctrl_ltd = Delegator([
                    PruningAgentFair2(bandit, 0.95, u=currentU)
                    for _ in range(team_sz)
                ],
                                     alpha=1.0,
                                     epsilon=1.0,
                                     alpha_decay=0.999,
                                     epsilon_decay=0.999)
                experiment_id = '%d/%d/%d/%.2f' % (e, n_arms, team_sz,
                                                   currentU)

                over_actions = Experiment(bandit, learner,
                                          'LtA/' + experiment_id)
                over_ltd = Experiment(bandit, ctrl_ltd, 'LtD/' + experiment_id)
                experiments.append(over_actions)
                experiments.append(over_ltd)

print('\nSetup finished.')
manager = ParallelExperiment(experiments)
manager.run(trials)
print('\nExperiments finished.')
# organize results

for r in manager.result:
    # group by n_arms, team_sz and currentMu: each exec will be an entry
    index_str, execution_str, n_arms, team_sz, currentMu = r.id.split('/')
    exp_group_name = '%s/%s/%s' % (n_arms, team_sz, currentMu)
Пример #14
0
def main():
    args = parse_args()

    # Work around TensorFlow's absl.logging depencency which alters the
    # default Python logging output behavior when present.
    if "absl.logging" in sys.modules:
        logger.info("Tentative de correction de la verbosité des logs")
        import absl.logging

        if args.loglevel == 20:
            logger.info("Logging : info")
            absl.logging.set_verbosity("info")
            absl.logging.set_stderrthreshold("info")
        if args.loglevel == 10:
            logger.info("Logging : debug")
            absl.logging.set_verbosity("debug")
            absl.logging.set_stderrthreshold("debug")

    # Parsage des arguments
    dataset = args.dataset
    without_preprocessing = args.without_preprocessing
    theme = args.novelty
    fix_seed = args.fix_seed

    # Chargement du jeu de données
    if dataset == "datapapers":
        if theme not in SUPPORTED_NOVELTY_DATAPAPERS:
            logger.error(
                "novelty %s not supported for %s. Supported values : %s",
                theme,
                dataset,
                SUPPORTED_NOVELTY_DATAPAPERS,
            )
            exit()
        if without_preprocessing:
            logger.debug("Utilisation du jeu de données datapapers.csv")
            data_filename = "datapapers.csv"
            try:
                data = pd.read_csv(PATH_DATAPAPERS, sep="\t", encoding="utf-8")
                data = data.drop(
                    [
                        "id",
                        "conf",
                        "title",
                        "author",
                        "year",
                        "eq",
                        "conf_short",
                    ],
                    axis=1,
                )
            except Exception as e:
                logger.error(str(e))
                logger.error(f"Fichier {data_filename} non trouvé.")
                exit()
        else:
            logger.debug("Utilisation du jeu de données datapapers_clean.csv")
            data_filename = "datapapers_clean.csv"
            try:
                data = pd.read_csv(f"Exports/{data_filename}")
                data.columns = ["id", "abstract", "theme"]
                data = data.drop(["id"], axis=1)
            except Exception as e:
                logger.error(str(e))
                logger.error(
                    f"Fichier {data_filename} non trouvé. Lancez le script prepare.py."
                )
                exit()
    elif dataset == "nytdata":
        if theme not in SUPPORTED_NOVELTY_NYTDATA:
            logger.error("novelty %s not supported for %s", theme, dataset)
            exit()
        if without_preprocessing:
            logger.debug("Utilisation du jeu de données experimentations.csv")
            data_filename = "experimentations.csv"
            try:
                data = pd.read_csv(PATH_NYTDATA, sep="\t", encoding="utf-8")
                data = data.drop(["week", "titles"], axis=1)
                data.rename(
                    columns={
                        "texts": "abstract",
                        "principal_classifier": "theme",
                        "second_classifier": "theme2",
                        "third_classifier": "theme3",
                    },
                    inplace=True,
                )
            except Exception as e:
                logger.error(str(e))
                logger.error(f"Fichier {data_filename} non trouvé.")
                exit()
        else:
            logger.debug("Utilisation du jeu de données experimentations.csv")
            data_filename = "experimentations.csv"
            try:
                # data = pd.read_csv(f'Exports/{data_filename}')
                data = pd.read_csv(PATH_NYTDATA, sep="\t", encoding="utf-8")
                data.rename(
                    columns={
                        "texts": "abstract",
                        "principal_classifier": "theme",
                        "second_classifier": "theme2",
                        "third_classifier": "theme3",
                    },
                    inplace=True,
                )
            except Exception as e:
                logger.error(str(e))
                logger.error(
                    f"Fichier {data_filename} non trouvé. Lancez le script prepare.py."
                )
                exit()
    elif dataset is None:
        logger.error(
            f"Entrez un jeu de données avec l'argument -d/--dataset parmi {SUPPORTED_DATASETS}."
        )
        exit()
    else:
        logger.error(
            f"Jeu de données {dataset} non supporté. Jeux de données supportés : {SUPPORTED_DATASETS}"
        )
        exit()

    all_encoders = args.all_encoders
    encoder = [args.encoder]

    if encoder[0] in SUPPORTED_ENCODERS and not all_encoders:
        logger.debug(f"Encodeur {encoder[0]} sélectionné.")
    elif all_encoders:
        logger.debug(
            "Option all_encoders sélectionnée. Sélection de tous les encodeurs"
        )
        encoder = SUPPORTED_ENCODERS
    else:
        logger.error(
            "Utiliser -e ou -a pour sélectionner un ou plusieurs encodeurs. -h pour plus d'informations."
        )
        exit()

    method = args.method
    if method not in SUPPORTED_METHODS:
        logger.error(
            f"Méthode {method} non implémentée. Choix = {SUPPORTED_METHODS}")
        exit()

    # Variables pour les résultats bruts
    raw_variables_list = [
        "ID",
        "fixed_sample",
        "data_filename",
        "date test",
        "theme",
        "encoder_name",
        "model_name",
        "methode",
        "size_historic",
        "size_context",
        "size_novelty",
        "iteration",
        "AUC",
        "temps",
        "faux positifs",
        "faux négatifs",
        "vrais positifs",
        "vrais négatifs",
        "précision",
        "rappel",
        "accuracy",
        "fscore",
        "gmean",
    ]

    # Variables pour les résultats condensés
    condensed_variables_list = [
        "ID",
        "fixed_sample",
        "data_filename",
        "date test",
        "theme",
        "encoder_name",
        "model_name",
        "methode",
        "size_historic",
        "size_context",
        "size_novelty",
        "iteration",
        "AUC",
        "temps",
        "moy. faux positifs",
        "moy. faux négatifs",
        "moy. vrais positifs",
        "moy. vrais négatifs",
        "moy. précision",
        "moy. rappel",
        "moy. accuracy",
        "moy. fscore",
        "moy. gmean",
        "std. faux positifs",
        "std. faux négatifs",
        "std. vrais positifs",
        "std. vrais négatifs",
        "std. précision",
        "std. rappel",
        "std. accuracy",
        "std. fscore",
        "std. gmean",
        "q0.25 faux positifs",
        "q0.25 faux négatifs",
        "q0.25 vrais positifs",
        "q0.25 vrais négatifs",
        "q0.25 précision",
        "q0.25 rappel",
        "q0.25 accuracy",
        "q0.25 fscore",
        "q0.25 gmean",
        "q0.5 faux positifs",
        "q0.5 faux négatifs",
        "q0.5 vrais positifs",
        "q0.5 vrais négatifs",
        "q0.5 précision",
        "q0.5 rappel",
        "q0.5 accuracy",
        "q0.5 fscore",
        "q0.5 gmean",
        "q0.75 faux positifs",
        "q0.75 faux négatifs",
        "q0.75 vrais positifs",
        "q0.75 vrais négatifs",
        "q0.75 précision",
        "q0.75 rappel",
        "q0.75 accuracy",
        "q0.75 fscore",
        "q0.75 gmean",
    ]

    raw_results_filename = "Exports/Résultats_bruts.csv"
    condensed_results_filename = "Exports/Résultats_condensés.csv"

    # si le fichier n'existe pas, on le crée et y insère l'entête
    if not os.path.isfile(raw_results_filename):
        logger.debug(f"Création du fichier {raw_results_filename}")
        with open(raw_results_filename, "a+") as f:
            f.write(f"{';'.join(raw_variables_list)}\n")

    if not os.path.isfile(condensed_results_filename):
        logger.debug(f"Création du fichier {condensed_results_filename}")
        with open(condensed_results_filename, "a+") as f:
            f.write(f"{';'.join(condensed_variables_list)}\n")

    # Boucle sur les encodeurs sélectionnés
    for single_encoder in encoder:

        # Chargement de l'encodeur
        logger.debug("Chargement de l'encodeur")

        # Initialisation de l'encodeur
        model_name = "Non applicable"
        if single_encoder == "infersent":
            encoder_model = infersent_model(pkl_path=PATH_INFERSENT_PKL,
                                            w2v_path=PATH_INFERSENT_W2V)
            model_name = PATH_INFERSENT_W2V
        elif single_encoder == "sent2vec":
            encoder_model = sent2vec_model(model_path=PATH_SENT2VEC_BIN)
            model_name = PATH_SENT2VEC_BIN
        elif single_encoder == "USE":
            module_url = (
                "https://tfhub.dev/google/universal-sentence-encoder/2"
            )  # @param ["https://tfhub.dev/google/universal-sentence-encoder/2", "https://tfhub.dev/google/universal-sentence-encoder-large/3"]

            # Import the Universal Sentence Encoder's TF Hub module
            encoder_model = hub_module(module_url)
        elif single_encoder == "fasttext":
            logger.info(
                f"Chargement du modèle fasttext ({PATH_FASTTEXT}) (~15 minutes)"
            )
            encoder_model = KeyedVectors.load_word2vec_format(PATH_FASTTEXT)
            model_name = PATH_FASTTEXT
        elif single_encoder == "tf-idf":
            logger.info("Utilisation de TF-IDF")
            encoder_model = None

        # Mise en forme du nom du modèle
        model_name = Path(model_name).stem

        # Boucle sur les paramètres d'échantillons définis dans samples_list
        for exp in SAMPLES_LIST:
            experiment = Experiment(single_encoder, exp)

            # Liste des résultats
            AUC_list = []
            matrice_confusion_list = []
            mesures_list = []
            iteration_time_list = []

            # Résumé du test
            logger.info(
                f"Paramètres : Données = {data_filename}, Nouveauté = {theme}, Encodeur = {experiment.encoder}, Méthode = {method}, Historique/Contexte/Nouveauté : {experiment.size_historic}/{experiment.size_context}/{experiment.size_novelty}"
            )

            # Boucle d'itération
            for iteration in tqdm(range(1, ITERATION_NB + 1),
                                  dynamic_ncols=True):
                iteration_begin = time.time()
                logger.debug(f"iteration : {iteration}")
                data_historic, data_context = split_data(
                    data,
                    size_historic=experiment.size_historic,
                    size_context=experiment.size_context,
                    size_novelty=experiment.size_novelty,
                    theme=theme,
                    fix_seed=fix_seed,
                )

                # Assignation de l'historique et du context
                experiment.set_datasets(data_historic, data_context)

                # Application de l'encodeur, création des vecteurs d'embeddings
                experiment.run_experiment(encoder_model)

                # Exports des vecteurs pour débogage
                # experiment.export_vectors()

                # Application de la classification
                try:
                    score, pred = experiment.run_classif(method)
                except Exception as e:
                    logger.error(e)
                    exit()

                obs = [
                    1 if elt == theme else 0
                    for elt in experiment.data_context.theme
                ]
                obs2 = [-1 if x == 1 else 1 for x in obs]

                matrice_confusion = mat_conf(obs, pred)
                logger.debug(f"matrice : {matrice_confusion}")
                mesures = all_measures(matrice_confusion, obs, pred)

                # data_context["obs"] = pd.Series(obs).values
                # data_context["pred"] = pd.Series(pred).values
                # data_context["score"] = pd.Series(score).values
                # data_context.to_csv(
                #     f"Exports/bloub/data_context_{experiment.size_historic}_{experiment.size_context}_{experiment.size_novelty}_{iteration}.csv",
                #     sep="\t",
                # )

                AUC = roc_auc_score(obs2, score)

                logger.debug(f"AUC : {AUC}")
                iteration_time = "%.2f" % (time.time() - iteration_begin)
                logger.debug(f"temps itération = {iteration_time}")

                # Ajout des résultats dans une liste
                AUC_list.append(AUC)
                matrice_confusion_list.append(matrice_confusion)
                mesures_list.append(mesures)
                iteration_time_list.append(float(iteration_time))

                # Arrondi des résultats numériques avant export
                AUC = round(AUC, 2)
                # iteration_time = round(iteration_time, 2)
                # matrice_confusion = [round(x, 2) for x in matrice_confusion]
                mesures = [round(x, 2) for x in mesures]

                # Export des résultats bruts
                logger.debug("Exports des résultats bruts")
                with open(raw_results_filename, "a+") as f:
                    f.write(
                        f"{experiment.ID};{fix_seed};{data_filename};{experiment.heure_test};{theme};{experiment.encoder};{model_name};{method};{experiment.size_historic};{experiment.size_context};{experiment.size_novelty};{iteration};{AUC};{iteration_time};{';'.join(map(str, matrice_confusion))};{';'.join(map(str, mesures))}\n"
                    )

            # Création résultats condensés
            AUC_condensed = round(sum(AUC_list) / float(len(AUC_list)), 2)
            iteration_time_condensed = round(
                sum(iteration_time_list) / float(len(iteration_time_list)), 2)
            mean_matrice_confusion_condensed = np.round(
                np.mean(np.array(matrice_confusion_list), axis=0), 2)
            mean_mesures_condensed = np.round(
                np.mean(np.array(mesures_list), axis=0), 2)
            std_matrice_confusion_condensed = np.round(
                np.std(np.array(matrice_confusion_list), axis=0), 2)
            std_mesures_condensed = np.round(
                np.std(np.array(mesures_list), axis=0), 2)
            quantile025_matrice_confusion_condensed = np.round(
                np.quantile(np.array(matrice_confusion_list), 0.25, axis=0), 2)
            quantile025_mesures_condensed = np.round(
                np.quantile(np.array(mesures_list), 0.25, axis=0), 2)
            med_matrice_confusion_condensed = np.round(
                np.quantile(np.array(matrice_confusion_list), 0.5, axis=0), 2)
            med_mesures_condensed = np.round(
                np.quantile(np.array(mesures_list), 0.5, axis=0), 2)
            quantile075_matrice_confusion_condensed = np.round(
                np.quantile(np.array(matrice_confusion_list), 0.75, axis=0), 2)
            quantile075_mesures_condensed = np.round(
                np.quantile(np.array(mesures_list), 0.75, axis=0), 2)

            # Export des résultats condensés
            logger.debug("Exports des résultats condensés")
            with open(condensed_results_filename, "a+") as f:
                f.write(
                    f"{experiment.ID};{fix_seed};{data_filename};{experiment.heure_test};{theme};{experiment.encoder};{model_name};{method};{experiment.size_historic};{experiment.size_context};{experiment.size_novelty};{iteration};{AUC_condensed};{iteration_time_condensed};{';'.join(map(str, mean_matrice_confusion_condensed))};{';'.join(map(str, mean_mesures_condensed))};{';'.join(map(str, std_matrice_confusion_condensed))};{';'.join(map(str, std_mesures_condensed))};{';'.join(map(str, quantile025_matrice_confusion_condensed))};{';'.join(map(str, quantile025_mesures_condensed))};{';'.join(map(str, med_matrice_confusion_condensed))};{';'.join(map(str, med_mesures_condensed))};{';'.join(map(str, quantile075_matrice_confusion_condensed))};{';'.join(map(str, quantile075_mesures_condensed))}\n"
                )

    logger.info("Temps d'exécution : %.2f secondes" %
                (time.time() - temps_debut))
Пример #15
0
                             ],
                             gamma=0.99,
                             max_steps_per_episode=1000)
    agent.preprocess_state = shape_reward

    return agent


if __name__ == '__main__':

    from experiments.experiment import Experiment

    agents = [(build_agent_no_shaping(), 3, run_agent),
              (build_agent_with_shaping(), 3, run_agent)]

    e = Experiment('reward_shaping', agents)

    e.run()

    # steps_df.pivot(columns='sender', values='CumulativeReward').groupby(
    #     lambda colname: 'WithShape' if 'WithShape' in colname else 'NoShape', axis=1).plot()
    # plt.show()

    #    df = e.get_plots([])

    # df.pivot(columns='sender', values='CumulativeReward').plot()
    # plt.show()
    #
    import matplotlib.pyplot as plt
    fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 4))
Пример #16
0
            'map_name': local_args.map_name
        } for _ in range(local_args.env_num)]
        env_args[0]['visualize'] = local_args.visualize
        with tf.device(self.tf_device(global_args)):
            agent = A2C(network_creator=network_creator(config),
                        lr=local_args.lr,
                        td_step=local_args.td_step,
                        ent_coef=local_args.ent_coef,
                        v_coef=local_args.v_coef)
        with agent.create_session(**self.tf_sess_opts(global_args)):
            env = ParallelEnvs.new(mode=local_args.mode,
                                   env_num=local_args.env_num,
                                   env_args=env_args)
            obs_adapter = ObservationAdapter(config)
            act_adapter = ActionAdapter(config)
            env_runner = EnvRunner(agent=agent,
                                   env=env,
                                   train=global_args.train,
                                   observation_adapter=obs_adapter,
                                   action_adapter=act_adapter,
                                   epoch_n=local_args.epoch,
                                   step_n=local_args.td_step,
                                   logdir=local_args.logdir)
            env_runner.run()


Experiment.register(A2CExperiment, "A2C training")

if __name__ == '__main__':
    Experiment.main()
Пример #17
0
def run():
    #experiment_definitions = ["CMS_13TeV_2OSLEP_36invfb"] #"gaussian"
    #experiment_definitions = ["Hinv"] #,"ColliderBit_analysis"]
    experiment_definitions = []
    experiment_modules = {
        lib: importlib.import_module("experiments.{0}".format(lib))
        for lib in experiment_definitions
    }

    # Set the null hypothesis for 'gof' tests. This means fixing the non-nuisance
    # parameters in all of the experiments to something.
    # For testing I am just matching these exactly to observed data: in reality
    # they should come from some model of interested, e.g. a scan best-fit
    gof_null = {}
    gof_null["top_mass"] = {'loc': 173.34}
    gof_null["alpha_s"] = {'loc': 0.1181}
    gof_null["Z_invisible_width"] = {'loc': 0.2}
    gof_null["Higgs_invisible_width"] = {'BF': 0}

    def Collider_Null(N_SR):
        null_s = {"s_{0}".format(i): 0
                  for i in range(N_SR)
                  }  # Actually we'll leave this as zero signal for testing
        #null_theta = {"theta_{0}".format(i): 0 for i in range(7)}
        #null_parameters = {"mu": 0 , **null_s, **null_theta}
        return null_s  #parameters

    # The 'mu' hypothesis null parameters should be defined internally by each experiment.

    # Add the ColliderBit analyses; have to do this after signal hypothesis is
    # set.
    CBexperiments = {}
    for a in CBa.analyses.values():
        signal = Collider_Null(a.N_SR)
        gof_null[a.name] = signal
        CBexperiments[a.name] = a.make_experiment(signal)

    class Empty:
        pass

    experiment_modules["ColliderBit_analysis"] = Empty(
    )  # container to act like module
    experiment_modules["ColliderBit_analysis"].experiments = CBexperiments

    # Extract all experiments from modules (some define more than one experiment)
    # Not all experiments are used in all tests, so we also divide them up as needed
    gof_experiments = []
    mu_experiments = []
    all_experiments = []
    for em in experiment_modules.values():
        for e in em.experiments.values():
            #for e in [list(em.experiments.values())[0]]:
            all_experiments += [e]
            if 'gof' in e.tests.keys(): gof_experiments += [e]
            if 'mu' in e.tests.keys(): mu_experiments += [e]
            #break

    tag = "5e3"
    Nsamples = int(float(tag))
    #Nsamples = 0

    # Ditch the simulations and just compute asymptotic results
    # This is very fast! Only have to fit nuisance parameters under
    # the observed data. Could even save those and do them only once
    # ever, but meh.
    if Nsamples == 0:
        asymptotic_only = True
    else:
        asymptotic_only = False

    do_MC = True
    if asymptotic_only:
        do_MC = False

    # Do single-parameter mu scaling fit?
    do_mu = True

    # Skip to mu_monster
    skip_to_mu_mon = True

    # Analyse full combined model?
    do_monster = True

    # Dictionary of results
    results = {}

    # Actually, the GOF best should work a bit differently. Currently we
    # simulate under the background-only hypothesis, which is fine for
    # testing the goodness of fit of the data to the background-only
    # hypothesis, but we also want to test the goodness-of-fit of e.g.
    # a GAMBIT best fit point. For that, we need to simulate under
    # some best-fit signal hypothesis.

    # Create monster joint experiment
    if do_monster:
        m = Experiment.fromExperimentList(all_experiments)

    # Helper plotting function
    def makeplot(ax,
                 tobin,
                 theoryf,
                 log=True,
                 label="",
                 c='r',
                 obs=None,
                 pval=None,
                 qran=None,
                 title=None):
        print("Generating test statistic plot {0}".format(label))
        if qran is None:
            ran = (0, 25)
        else:
            ran = qran
        yran = (1e-4, 0.5)
        if tobin is not None:
            #print(tobin)
            n, bins = np.histogram(tobin, bins=50, normed=True, range=ran)
            #print(n)
            #print("Histogram y range:", np.min(n[n!=0]),np.max(n))
            ax.plot(bins[:-1], n, drawstyle='steps-post', label=label, c=c)
            yran = (1e-4, np.max([0.5, np.max(n)]))
        q = np.arange(ran[0], ran[1], 0.01)
        if theoryf is not None:
            ax.plot(q, theoryf(q), c='k')
        ax.set_xlabel("LLR")
        ax.set_ylabel("pdf(LLR)")
        if log:
            #ax.set_ylim(np.min(n[n!=0]),10*np.max(n))
            ax.set_yscale("log")
        if obs is not None:
            # Draw line for observed value, and show p-value region shaded
            qfill = np.arange(obs, ran[1], 0.01)
            if theoryf != None:
                ax.fill_between(qfill,
                                0,
                                theoryf(qfill),
                                lw=0,
                                facecolor=c,
                                alpha=0.2)
            pval_str = ""
            if pval != None:
                #print("pval:", pval)
                pval_str = " (p={0:.2g})".format(pval)
            ax.axvline(x=obs,
                       lw=2,
                       c=c,
                       label="Observed ({0}){1}".format(label, pval_str))
        ax.set_xlim(ran[0], ran[1])
        ax.set_ylim(yran[0], yran[1])
        if title is not None:
            ax.set_title(title)

    # Simulate data and prepare results dictionaries
    all_samples = []
    for e in gof_experiments:
        print(e.name)
        #print(e.general_model)
        #print(e.general_model.model)
        #print(e.general_model.model.submodels)
        #print(e.general_model.model.submodels[0].submodels)
        print("test_pars:", e.tests['gof'].test_pars)
        if do_MC:
            all_samples += [
                e.general_model.simulate(Nsamples, e.tests['gof'].test_pars)
            ]  # Just using test parameter values
        else:
            all_samples += [[]]
        results[e.name] = {}

    LLR_obs_monster = 0
    if not skip_to_mu_mon:
        # Main loop for fitting experiments
        LLR_monster = 0
        for j, (e, samples) in enumerate(zip(gof_experiments, all_samples)):
            # Do fit!
            test_parameters = gof_null[
                e.name]  # replace this with e.g. prediction from MSSM best fit
            LLR, LLR_obs, pval, epval, gofDOF = e.do_gof_test(
                test_parameters, samples)
            # Save LLR for combining (only works if experiments have no common parameters)
            #print("j:{0}, LLR:{1}".format(j,LLR))
            if LLR is not None:
                LLR_monster += LLR
            else:
                LLR_monster = None
            LLR_obs_monster += LLR_obs

            # Plot!
            fig = plt.figure(figsize=(6, 4))
            ax = fig.add_subplot(111)
            # Range for test statistic axis. Draw as far as is equivalent to 5 sigma
            qran = [0, sps.chi2.ppf(sps.chi2.cdf(25, df=1), df=gofDOF)]
            makeplot(ax,
                     LLR,
                     lambda q: sps.chi2.pdf(q, gofDOF),
                     log=True,
                     label='free s',
                     c='g',
                     obs=LLR_obs,
                     pval=pval,
                     qran=qran,
                     title=e.name + " (Nbins={0})".format(gofDOF))
            ax.legend(loc=1, frameon=False, framealpha=0, prop={'size': 10})
            fig.savefig('auto_experiment_{0}_{1}.png'.format(e.name, tag))
            plt.close(fig)

            # Fit mu model
            if do_mu:
                mu_LLR, mu_LLR_obs, mu_pval, mu_epval, muDOF = e.do_mu_test(
                    e.tests['mu'].test_signal, samples)

                # Plot!
                fig = plt.figure(figsize=(6, 4))
                ax = fig.add_subplot(111)
                makeplot(
                    ax,
                    mu_LLR,
                    lambda q: sps.chi2.pdf(q, muDOF),
                    log=True,  #muDOF should just be 1 
                    label='mu',
                    c='b',
                    obs=mu_LLR_obs,
                    pval=mu_pval,
                    title=e.name)
                ax.legend(loc=1,
                          frameon=False,
                          framealpha=0,
                          prop={'size': 10})
                fig.savefig('auto_experiment_mu_{0}_{1}.png'.format(
                    e.name, tag))
                plt.close(fig)

            # Store results
            results[e.name]["LLR_gof_b"] = LLR_obs
            results[e.name]["apval_gof_b"] = pval
            results[e.name]["asignif. gof_b"] = -sps.norm.ppf(
                pval
            )  #/2.) I prefer two-tailed but Andrew says 1-tailed is the convention...
            results[e.name]["DOF"] = gofDOF
            if do_mu:
                results[e.name]["LLR_mu_b"] = mu_LLR_obs
                results[e.name]["apval_mu_b"] = mu_pval
                results[e.name]["asignif. mu_b"] = -sps.norm.ppf(mu_pval)
            if LLR is not None:
                results[e.name]["epval_gof_b"] = epval
                results[e.name]["esignif. gof_b"] = -sps.norm.ppf(
                    epval
                )  #/2.) I prefer two-tailed but Andrew says 1-tailed is the convention...
                if do_mu:
                    results[e.name]["epval_mu_b"] = mu_epval
                    results[e.name]["esignif. mu_b"] = -sps.norm.ppf(mu_epval)

        a = np.argsort(LLR)
        #print("LLR_monster:",LLR_monster[a])
        #quit()

        # Plot monster LLR distribution
        fig = plt.figure(figsize=(6, 4))
        ax = fig.add_subplot(111)
        monster_DOF = np.sum([e.DOF for e in gof_experiments])
        monster_pval = np.atleast_1d(
            1 - sps.chi2.cdf(LLR_obs_monster, monster_DOF))[0]
        monster_epval = c.e_pval(LLR_monster,
                                 LLR_obs_monster) if do_MC else None
        monster_qran = [
            0, sps.chi2.ppf(sps.chi2.cdf(25, df=1), df=monster_DOF)
        ]
        print("Monster DOF:", monster_DOF)
        print("Monster pval:", monster_pval)
        print("Monster LLR_obs:", LLR_obs_monster)
        makeplot(ax,
                 LLR_monster,
                 lambda q: sps.chi2.pdf(q, monster_DOF),
                 log=True,
                 label='free s',
                 c='g',
                 obs=LLR_obs_monster,
                 pval=monster_pval,
                 qran=monster_qran,
                 title="Monster")
        ax.legend(loc=1, frameon=False, framealpha=0, prop={'size': 10})
        fig.savefig('auto_experiment_monster_{0}.png'.format(tag))
        plt.close(fig)

    # Join all samples
    if do_MC:
        monster_samples = np.concatenate(
            [samp.reshape(Nsamples, 1, -1) for samp in all_samples], axis=-1)
    else:
        monster_samples = None
    print("monster_samples.shape:", monster_samples.shape)

    if do_mu and do_monster:
        signal = m.tests['mu'].test_signal
        mu_LLR, mu_LLR_obs, mu_pval, mu_epval, muDOF = m.do_mu_test(
            signal, monster_samples)

        # Plot!
        fig = plt.figure(figsize=(6, 4))
        ax = fig.add_subplot(111)
        makeplot(ax,
                 mu_LLR,
                 lambda q: sps.chi2.pdf(q, 1),
                 log=True,
                 label='mu',
                 c='b',
                 obs=mu_LLR_obs,
                 pval=mu_pval,
                 title="Monster")
        ax.legend(loc=1, frameon=False, framealpha=0, prop={'size': 10})
        fig.savefig('auto_experiment_mu_monster_{0}.png'.format(tag))
        plt.close(fig)

    # Store results for Monster
    results["Combined"] = {}
    results["Combined"]["LLR_gof_b"] = LLR_obs_monster
    results["Combined"]["apval_gof_b"] = monster_pval
    results["Combined"]["asignif. gof_b"] = -sps.norm.ppf(monster_pval)
    results["Combined"]["DOF"] = monster_DOF
    if do_mu and do_monster:
        results["Combined"]["LLR_mu_b"] = mu_LLR_obs
        results["Combined"]["apval_mu_b"] = mu_pval
        results["Combined"]["asignif. mu_b"] = -sps.norm.ppf(mu_pval)
    if do_MC:
        results["Combined"]["epval_gof_b"] = monster_epval
        results["Combined"]["esignif. gof_b"] = -sps.norm.ppf(monster_epval)
    if do_MC and do_mu and do_monster:
        results["Combined"]["epval_mu_b"] = mu_epval
        results["Combined"]["esignif. mu_b"] = -sps.norm.ppf(mu_epval)

    # Ok let's produce some nice tables of results. Maybe even
    # some cool bar graphs showing the "pull" of each experiment

    # Convert results to Pandas dataframe
    r = pd.DataFrame.from_dict(results)
    order = ['DOF', 'LLR_gof_b', 'apval_gof_b']
    if do_MC: order += ['epval_gof_b']
    order += ['asignif. gof_b']
    if do_MC: order += ['esignif. gof_b']
    if do_mu: order += ['LLR_mu_b', 'apval_mu_b']
    if do_MC and do_mu: order += ['epval_mu_b']
    if do_mu: order += ['asignif. mu_b']
    if do_MC and do_mu: order += ['esignif. mu_b']
    exp_order = [e.name for e in gof_experiments] + ['Combined']
    print(r[exp_order].reindex(order))
Пример #18
0
def main():
    config_obj = config.Config.get_instance()
    settings = config_obj.parse(sys.argv[1])

    team_sizes = settings['team_sizes']
    bandit_sizes = settings['bandit_sizes']
    trials = settings['trials']
    executions = settings['executions']

    experiments = []

    # values of the prob. distribution of agent generation
    # it vary in nature if we're dealing with gaussian or uniform
    dist_params = settings['upper_bounds']

    if settings['ltd_type'] == 'gaussian':
        # must use list comprehension otherwise generator is consumed in 1st use
        dist_params = [
            x for x in itertools.product(settings['mus'], settings['sigmas'])
        ]

    print('Parameters:')
    pprint.PrettyPrinter().pprint(settings)

    # sets up a number of experiments for each variation on bandit size,
    # team size and distribution parameters
    # the number of repetitions for each variation is in the 'executions' variable
    for n_arms in bandit_sizes:
        for team_sz in team_sizes:
            for param in dist_params:

                print('Preparing for %d/%d/%s' % (n_arms, team_sz, param))

                # if experiment is gaussian, param has two values
                mu_or_upper_bound = param if settings[
                    'ltd_type'] == 'uniform' else param[0]

                # TODO: make experiments with different sigmas be written to different places
                os.system("mkdir -p " + os.path.join(
                    settings['output_dir'], str(n_arms), str(team_sz), '%.2f' %
                    mu_or_upper_bound))

                # identifies groups of experiments by their parameters
                exp_group_name = '%d/%d/%.2f' % (n_arms, team_sz,
                                                 mu_or_upper_bound)

                # will have one experiment for each configuration of the parameters
                experiment_batch = []

                for e in range(executions):
                    sys.stdout.write(
                        '\rSetup for %d arms, |X| = %6d, u/mu = %.4f, exec=%6d'
                        % (n_arms, team_sz, mu_or_upper_bound, e))

                    bandit = Bandit(n_arms, None, 0.25)

                    learner = LearningAgent(
                        bandit,
                        alpha=settings['alpha'],
                        epsilon=settings['epsilon'],
                        alpha_decay=settings['alpha_decay'],
                        epsilon_decay=settings['epsilon_decay'])

                    if settings['ltd_type'] == 'uniform':
                        controller = Delegator(
                            [
                                PruningAgentFair2(
                                    bandit, 0.95, u=mu_or_upper_bound)
                                for _ in range(team_sz)
                            ],
                            alpha=settings['alpha'],
                            epsilon=settings['epsilon'],
                            alpha_decay=settings['alpha_decay'],
                            epsilon_decay=settings['epsilon_decay'])
                    else:
                        controller = Delegator(
                            [
                                GaussianAgentPrune(bandit,
                                                   0.95,
                                                   mu=mu_or_upper_bound,
                                                   sigma=param[1])
                                for _ in range(team_sz)
                            ],
                            alpha=settings['alpha'],
                            epsilon=settings['epsilon'],
                            alpha_decay=settings['alpha_decay'],
                            epsilon_decay=settings['epsilon_decay'])

                    experiment_id = '%d/%d/%d/%.2f' % (e, n_arms, team_sz,
                                                       mu_or_upper_bound)

                    lta_experiment = Experiment(bandit, learner,
                                                'LtA/' + experiment_id)
                    ltd_experiment = Experiment(bandit, controller,
                                                'LtD/' + experiment_id)

                    experiment_batch.append(lta_experiment)
                    experiment_batch.append(ltd_experiment)

                # this batch of experiment is ready. run it:
                print('\nSetup finished for %d experiments.' %
                      len(experiment_batch))
                manager = ParallelExperiment(experiment_batch)
                manager.run(trials)
                plot(manager.result, settings['output_dir'],
                     settings['ltd_type'])
                print('Plot OK for %s' % exp_group_name)
                                 CumulativeReward(),
                                 EpisodeTime()
                             ],
                             gamma=0.99,
                             max_steps_per_episode=500)

    return agent


if __name__ == '__main__':

    from experiments.experiment import Experiment

    agents = [
        (build_agent('FastUpdate'), num_agents, train_fast_update),
        (build_agent('MediumUpdate'), num_agents, train_medium_update),
        (build_agent('SlowUpdate'), num_agents, train_slow_update),
    ]

    e = Experiment(experiment_name, agents)

    e.run()

    import matplotlib.pyplot as plt
    p = e.get_plots(
        ['CumulativeReward', 'EpisodeReward', 'RollingEpisodeReward50'])

    plt.show()

#     fig.savefig(experiment_name + '_avg_reward.png', dpi=500, linewidth=1)
Пример #20
0
def main():
    config_obj = config.Config.get_instance()
    settings = config_obj.parse(sys.argv[1])

    expNumber = int(sys.argv[2])

    team_sizes = settings['team_sizes']
    bandit_sizes = settings['bandit_sizes']
    mus = settings['mus']
    sigmas = settings['sigmas']
    trials = settings['trials']
    executions = settings['executions']

    experiments = []
    #exp_dict = {}

    # values of the prob. distribution of agent generation
    # it vary in nature if we're dealing with gaussian or uniform
    dist_params = settings['upper_bounds']

    if settings['ltd_type'] == 'gaussian':
        # must use list comprehension otherwise generator is consumed in 1st use
        dist_params = [
            x for x in itertools.product(settings['mus'], settings['sigmas'])
        ]

    #name = "results_gaussian"

    print('Parameters:')
    pprint.PrettyPrinter().pprint(settings)

    # execution rewards
    execution_rwd_lta = np.zeros((executions, trials))
    execution_rwd_ltd = np.zeros((executions, trials))

    # probability of taking the best action
    p_best_lta = np.zeros((executions, trials))
    p_best_ltd = np.zeros((executions, trials))

    # times the best action was taken
    times_best_lta = np.zeros((executions, trials))
    times_best_ltd = np.zeros((executions, trials))

    # cumulative rewards
    cumulative_rewards_lta = np.zeros((executions, trials))
    cumulative_rewards_ltd = np.zeros((executions, trials))

    # cumulative regrets
    cumulative_regret_lta = np.zeros((executions, trials))
    cumulative_regret_ltd = np.zeros((executions, trials))

    # cumulative regrets in expectation
    cumulative_regret_exp_lta = np.zeros((executions, trials))
    cumulative_regret_exp_ltd = np.zeros((executions, trials))

    for n_arms in bandit_sizes:
        for team_sz in team_sizes:
            for param in dist_params:

                print('Preparing for %d/%d/%s' % (n_arms, team_sz, param))

                # if experiment is gaussian, param has two values
                mu_or_upper_bound = param if settings[
                    'ltd_type'] == 'uniform' else param[0]

                os.system("mkdir -p " +
                          os.path.join(settings['output_dir'], str(expNumber),
                                       str(n_arms), str(team_sz), '%.2f' %
                                       mu_or_upper_bound))

                # identifies groups of experiments by their parameters
                exp_group_name = '%d/%d/%.2f' % (n_arms, team_sz,
                                                 mu_or_upper_bound)

                for e in range(executions):
                    sys.stdout.write(
                        '\rSetup for %d arms, |X| = %6d, u/mu = %.4f, exec=%6d'
                        % (n_arms, team_sz, mu_or_upper_bound, e))

                    bandit = Bandit(n_arms, None, 0.25)

                    learner = LearningAgent(
                        bandit,
                        alpha=settings['alpha'],
                        epsilon=settings['epsilon'],
                        alpha_decay=settings['alpha_decay'],
                        epsilon_decay=settings['epsilon_decay'])

                    if settings['ltd_type'] == 'uniform':
                        controller = Delegator(
                            [
                                PruningAgentFair2(
                                    bandit, 0.95, u=mu_or_upper_bound)
                                for _ in range(team_sz)
                            ],
                            alpha=settings['alpha'],
                            epsilon=settings['epsilon'],
                            alpha_decay=settings['alpha_decay'],
                            epsilon_decay=settings['epsilon_decay'])
                    else:
                        controller = Delegator(
                            [
                                GaussianAgentPrune(bandit,
                                                   0.95,
                                                   mu=mu_or_upper_bound,
                                                   sigma=param[1])
                                for _ in range(team_sz)
                            ],
                            alpha=settings['alpha'],
                            epsilon=settings['epsilon'],
                            alpha_decay=settings['alpha_decay'],
                            epsilon_decay=settings['epsilon_decay'])

                    experiment_id = '%d/%d/%d/%.2f' % (e, n_arms, team_sz,
                                                       mu_or_upper_bound)

                    # creates and runs the experiments
                    lta_experiment = Experiment(bandit, learner,
                                                'LtA/' + experiment_id)
                    lta_experiment.run(trials)

                    ltd_experiment = Experiment(bandit, controller,
                                                'LtD/' + experiment_id)
                    ltd_experiment.run(trials)

                    # extracts data the experiments just performed
                    execution_rwd_lta[e] = lta_experiment.rewards
                    execution_rwd_ltd[e] = ltd_experiment.rewards

                    p_best_lta[e] = lta_experiment.p_best
                    p_best_ltd[e] = ltd_experiment.p_best

                    times_best_lta[e] = lta_experiment.cumulative_times_best
                    times_best_ltd[e] = ltd_experiment.cumulative_times_best

                    cumulative_rewards_lta[
                        e] = lta_experiment.cumulative_rewards
                    cumulative_rewards_ltd[
                        e] = ltd_experiment.cumulative_rewards

                    cumulative_regret_lta[
                        e] = lta_experiment.cumulative_regrets
                    cumulative_regret_ltd[
                        e] = ltd_experiment.cumulative_regrets

                    cumulative_regret_exp_lta[
                        e] = lta_experiment.cumulative_regrets_exp
                    cumulative_regret_exp_ltd[
                        e] = ltd_experiment.cumulative_regrets_exp

                # here all repetitions of a parameter configuration have finished
                # results will be gathered and plotted in the following lines

                # determines the meeting point (where LtA's performance meets LtD) for each criteria:
                # rewards, prob. of best action, times of best action executed, cumulative reward,
                # cumulative regret and expected regret
                meeting_rewards = meeting_point(np.mean(execution_rwd_lta, 0),
                                                np.mean(execution_rwd_ltd, 0))
                meeting_pbest = meeting_point(np.mean(p_best_lta, 0),
                                              np.mean(p_best_ltd, 0))
                meeting_tbest = meeting_point(np.mean(times_best_lta, 0),
                                              np.mean(times_best_ltd, 0))
                meeting_cumulative_reward = meeting_point(
                    np.mean(cumulative_rewards_lta, 0),
                    np.mean(cumulative_rewards_ltd, 0))
                meeting_cumulative_regret = meeting_point(
                    np.mean(cumulative_regret_ltd, 0),
                    np.mean(cumulative_regret_lta, 0))
                meeting_regret_exp = meeting_point(
                    np.mean(cumulative_regret_exp_ltd, 0),
                    np.mean(cumulative_regret_exp_lta, 0))

                # plots the data
                plt.figure()
                plt.plot(np.mean(execution_rwd_lta, 0), label="Actions")
                plt.plot(np.mean(execution_rwd_ltd, 0), label="Delegate")
                plt.plot(
                    np.convolve(np.mean(execution_rwd_lta, 0),
                                np.ones((100, )) / 100,
                                mode='valid'))
                plt.plot(
                    np.convolve(np.mean(execution_rwd_ltd, 0),
                                np.ones((100, )) / 100,
                                mode='valid'))
                plt.xlabel("Iteration")
                plt.ylabel("Reward")
                plt.legend()
                plt.savefig(
                    os.path.join(settings['output_dir'], str(expNumber),
                                 str(n_arms), str(team_sz),
                                 '%.2f' % mu_or_upper_bound, "reward.pdf"))
                plt.close()

                plt.figure()
                plt.plot(np.mean(p_best_lta, 0),
                         color="#1f77b4",
                         label="Actions")
                plt.plot(np.mean(p_best_ltd, 0),
                         color="#ff7f0e",
                         label="Delegate")
                plt.errorbar(range(0, trials, 50),
                             np.mean(p_best_lta, 0)[0:trials:50],
                             yerr=np.std(p_best_lta, 0)[0:trials:50],
                             color="#1f77b4",
                             fmt=".",
                             capsize=3)
                plt.errorbar(range(0, trials, 50),
                             np.mean(p_best_ltd, 0)[0:trials:50],
                             yerr=np.std(p_best_ltd, 0)[0:trials:50],
                             color="#ff7f0e",
                             fmt=".",
                             capsize=3)
                plt.xlabel("Iteration")
                plt.ylabel(r"$p_{a^*}$")
                plt.legend()
                plt.savefig(
                    os.path.join(settings['output_dir'], str(expNumber),
                                 str(n_arms), str(team_sz),
                                 '%.2f' % mu_or_upper_bound, "pbest.pdf"))
                plt.close()

                plt.figure()
                plt.plot(np.mean(times_best_lta, 0),
                         color="#1f77b4",
                         label="Actions")
                plt.plot(np.mean(times_best_ltd, 0),
                         color="#ff7f0e",
                         label="Delegate")
                plt.errorbar(range(0, trials, 50),
                             np.mean(times_best_lta, 0)[0:trials:50],
                             yerr=np.std(times_best_lta, 0)[0:trials:50],
                             color="#1f77b4",
                             fmt=".",
                             capsize=3)
                plt.errorbar(range(0, trials, 50),
                             np.mean(times_best_ltd, 0)[0:trials:50],
                             yerr=np.std(times_best_ltd, 0)[0:trials:50],
                             color="#ff7f0e",
                             fmt=".",
                             capsize=3)
                plt.xlabel("Iteration")
                plt.ylabel(r"# $a^*$")
                plt.legend()
                plt.savefig(
                    os.path.join(settings['output_dir'], str(expNumber),
                                 str(n_arms), str(team_sz),
                                 '%.2f' % mu_or_upper_bound, "timesBest.pdf"))
                plt.close()

                plt.figure()
                plt.plot(np.mean(cumulative_rewards_lta, 0),
                         color="#1f77b4",
                         label="Actions")
                plt.plot(np.mean(cumulative_rewards_ltd, 0),
                         color="#ff7f0e",
                         label="Delegate")
                plt.errorbar(range(0, trials, 50),
                             np.mean(cumulative_rewards_lta, 0)[0:trials:50],
                             yerr=np.std(cumulative_rewards_lta,
                                         0)[0:trials:50],
                             color="#1f77b4",
                             fmt=".",
                             capsize=3)
                plt.errorbar(range(0, trials, 50),
                             np.mean(cumulative_rewards_ltd, 0)[0:trials:50],
                             yerr=np.std(cumulative_rewards_ltd,
                                         0)[0:trials:50],
                             color="#ff7f0e",
                             fmt=".",
                             capsize=3)
                plt.xlabel("Iteration")
                plt.ylabel("Cumulative reward")
                plt.legend()
                plt.savefig(
                    os.path.join(settings['output_dir'], str(expNumber),
                                 str(n_arms), str(team_sz),
                                 '%.2f' % mu_or_upper_bound,
                                 "cumulativeRewards.pdf"))
                plt.close()

                plt.figure()
                plt.plot(np.mean(cumulative_regret_lta, 0),
                         color="#1f77b4",
                         label="Actions")
                plt.plot(np.mean(cumulative_regret_ltd, 0),
                         color="#ff7f0e",
                         label="Delegate")
                plt.errorbar(range(0, trials, 50),
                             np.mean(cumulative_regret_lta, 0)[0:trials:50],
                             yerr=np.std(cumulative_regret_lta,
                                         0)[0:trials:50],
                             color="#1f77b4",
                             fmt=".",
                             capsize=3)
                plt.errorbar(range(0, trials, 50),
                             np.mean(cumulative_regret_ltd, 0)[0:trials:50],
                             yerr=np.std(cumulative_regret_ltd,
                                         0)[0:trials:50],
                             color="#ff7f0e",
                             fmt=".",
                             capsize=3)
                plt.xlabel("Iteration")
                plt.ylabel(r"$\sum $Regret")
                plt.legend()
                plt.savefig(
                    os.path.join(settings['output_dir'], str(expNumber),
                                 str(n_arms), str(team_sz),
                                 '%.2f' % mu_or_upper_bound,
                                 "cumulativeRegret.pdf"))
                plt.close()

                plt.figure()
                plt.plot(np.mean(cumulative_regret_exp_lta, 0),
                         color="#1f77b4",
                         label="Actions")
                plt.plot(np.mean(cumulative_regret_exp_ltd, 0),
                         color="#ff7f0e",
                         label="Delegate")
                plt.errorbar(range(0, trials, 50),
                             np.mean(cumulative_regret_exp_lta,
                                     0)[0:trials:50],
                             yerr=np.std(cumulative_regret_exp_lta,
                                         0)[0:trials:50],
                             color="#1f77b4",
                             fmt=".",
                             capsize=3)
                plt.errorbar(range(0, trials, 50),
                             np.mean(cumulative_regret_exp_ltd,
                                     0)[0:trials:50],
                             yerr=np.std(cumulative_regret_exp_ltd,
                                         0)[0:trials:50],
                             color="#ff7f0e",
                             fmt=".",
                             capsize=3)
                plt.xlabel("Iteration")
                plt.ylabel(r"$E(\sum $Regret$)$")
                plt.legend()
                plt.savefig(
                    os.path.join(settings['output_dir'], str(expNumber),
                                 str(n_arms), str(team_sz),
                                 '%.2f' % mu_or_upper_bound,
                                 "expectedCumulativeRegret.pdf"))
                plt.close()

                # creates the file to dump the results in
                pickle_file = open(
                    os.path.join(settings['output_dir'], str(expNumber),
                                 str(n_arms), str(team_sz),
                                 '%.2f' % mu_or_upper_bound, "results.pickle"),
                    "wb")

                # dumps result data to file
                pickle.dump([
                    np.mean(execution_rwd_lta, 0),
                    np.mean(execution_rwd_ltd, 0),
                    np.mean(p_best_lta, 0),
                    np.mean(p_best_ltd, 0),
                    np.mean(times_best_lta, 0),
                    np.mean(times_best_ltd, 0),
                    np.mean(cumulative_rewards_lta, 0),
                    np.mean(cumulative_rewards_ltd, 0),
                    np.mean(cumulative_regret_lta, 0),
                    np.mean(cumulative_regret_ltd, 0),
                    np.mean(cumulative_regret_exp_lta, 0),
                    np.mean(cumulative_regret_exp_ltd, 0),
                    meeting_rewards,
                    meeting_pbest,
                    meeting_tbest,
                    meeting_cumulative_reward,
                    meeting_cumulative_regret,
                    meeting_regret_exp,
                ], pickle_file)
                pickle_file.close()
Пример #21
0
        in_data[carrier] = dict()
    if noise not in in_data[carrier]:
        in_data[carrier][noise] = dict()
    in_data[carrier][noise][int(dB)] = (
        correct,
        computed,
    )

for carrier in in_data.iterkeys():
    for noise in in_data[carrier].iterkeys():
        X = sorted(in_data[carrier][noise].keys())
        Y = []
        for x in X:
            data = in_data[carrier][noise][x]
            # print "ber for [{0},{1},{2}]: {3}".format(carrier, noise, x, BER(data[0], data[1]))
            Y.append(Experiment.BER(data[0], data[1]))
            # Y = [BER(in_data[carrier][noise][x][0], in_data[carrier][noise][x][1]) for x in X]
        plt.clf()
        plt.axis([-30, 0, 0, 1])
        plt.title('{0} with {1}'.format(carrier, noise))
        plt.xlabel('Noise amplitude (dB)')
        plt.ylabel('Bit error rate')
        plt.grid(True)
        plt.plot(X, Y, 'bo-')
        filename = "BER_{0}_{1}.png".format(carrier, noise)
        path = os.path.join(os.path.realpath('../doc/figures/experiments'),
                            filename)
        plt.savefig(path, bbox_inches='tight', transparent=True)
        print "Generated BER plot for {0}/{1}".format(carrier, noise)