示例#1
0
            logger.info("Cache miss, calculating")
            if timeout > 0:
                result = abortable_worker(run, timeout=timeout)
            else:
                result = run()
            save(result)
            return result
    except Exception, err:
        logger.error(traceback.format_exc())
        logger.error(sys.exc_info()[0])
        raise(err)

@ex.capture
def save(results, experiment_detailed_name, _config, _log):
    _config_cleaned = copy.deepcopy(_config)
    del _config_cleaned['force_reload']

    ninja_set_value(value=results, master_key=experiment_detailed_name, **_config_cleaned)

@ex.capture
def try_load(experiment_detailed_name, _config, _log):
    _config_cleaned = copy.deepcopy(_config)
    del _config_cleaned['force_reload']
    return ninja_get_value(master_key=experiment_detailed_name, **_config_cleaned)

if __name__ == '__main__':
    results = ex.run_commandline().result

import kaggle_ninja
kaggle_ninja.register("fit_active_learning", ex)
示例#2
0
        data_description[2] = dataset['num_classes']
    with model(data_description=data_description, **net_config) as net:
        data = data(**dataset)
        import_weights_into_network(net, starting_weights)
        _run.info['measurements'] = measure_metrics(net, data.get_testset(),
                                                    uncertainty_metrics)


@ex.main
def uncertainty_benchmark(modelname, net_config, dataset, starting_weights,
                          benchmark, uncertainty_metrics, _run):
    model = get_model(modelname)
    data = get_dataset(dataset['name'])
    with model(data_description=data.get_data_description(),
               **net_config) as net:
        data = data(**dataset)
        import_weights_into_network(net, starting_weights)
        for metric in uncertainty_metrics:
            measurements = evaluate_uncertainty(net,
                                                data.get_testset(),
                                                metric,
                                                benchmark=benchmark)
            _run.info.setdefault('measurements', {})[metric] = measurements


if __name__ == '__main__':
    ex.run_commandline()
    # for some reason we have processes running in the background that won't stop
    # this is the only way to kill them
    os._exit(os.EX_OK)
示例#3
0
                for i_, (image, image_label, attribute_labels, _) \
                        in enumerate(test_dataset):
                    x = image.view((1,) + image.size())
                    x = _make_cuda(torch.autograd.Variable(x))
                    z, z_patches, reconstructed_x = model(x)

                    reconstructed_image = \
                        get_image_from_tensor(reconstructed_x)
                    reconstructed_image.save(
                        os.path.join(IMAGES_DIR, '%d-%d.png' % (epoch, i_)))

                # Save the intermediate model
                model.save_weights(os.path.join(RUN_DIR, MODEL_FILE_NAME))

        # Add the prototype embeddings to tensorboard at the end
        if epoch == num_epochs:
            writer.add_embedding(
                model.prototypes.weight[1:],
                metadata=attribute_names,
                global_step=steps)

    # Save the final model and commit the tensorboard logs
    model.save_weights(os.path.join(RUN_DIR, MODEL_FILE_NAME))
    writer.close()

    return epoch_loss


if __name__ == '__main__':
    ex.run_commandline()
示例#4
0
        else:
            d[k] = v
    return d


if __name__ == '__main__':
    params = deepcopy(sys.argv)

    # Get the defaults from default.yaml
    with open(
            os.path.join(os.path.dirname(__file__), "config", "default.yaml"),
            "r") as f:
        try:
            config_dict = yaml.load(f)
        except yaml.YAMLError as exc:
            assert False, "default.yaml error: {}".format(exc)
    # Load algorithm and env base configs
    env_config = _get_config(params, "--env-config", "envs")
    alg_config = _get_config(params, "--config", "algs")
    config_dict = recursive_dict_update(config_dict, env_config)
    config_dict = recursive_dict_update(config_dict, alg_config)
    # now add all the config to sacred
    ex.add_config(config_dict)

    # Save to disk by default for sacred
    logger.info("Saving to FileStorageObserver in results/sacred.")
    file_obs_path = os.path.join(results_path, "sacred")
    ex.observers.append(FileStorageObserver.create(file_obs_path))

    ex.run_commandline(params)
                  oobMAE = {:.4f}".format(maeScoreTrain, maeScore))

    # Optionally save oob predictions
        if save_oob_predictions:
            filename = '{}_oob_pred.csv'.format(time)
            pd.DataFrame(data.inverseScaleTransform(pred.values),
                         index=data.trainids,
                         columns=['loss']).to_csv(os.path.join(
                             output_path, filename),
                                                  index_label='id')
    # Optionally generate test predictions
    if save_test_predictions:
        filename = '{}_test_pred.csv'.format(time)
        Xtr, ytr, Xte, _ = data.get_train_test_features()
        print alg.get_params()
        alg.fit(Xtr, ytr.values.reshape(-1, ))
        predtest = pd.DataFrame(data.inverseScaleTransform(alg.predict(Xte)),
                                index=data.testids,
                                columns=['loss'])
        predtest.to_csv(os.path.join(output_path, filename), index_label='id')
    return maeScore


if __name__ == '__main__':
    #     print sys.argv
    #     if len(sys.argv) > 1 and sys.argv[1] == 'stam':
    #         ex.observers.append(MongoObserver.create(url='login1:27017',db_name = "allstateRF"))
    #     else:
    #         ex.observers.append(MongoObserver.create(db_name = "allstate"))
    run = ex.run_commandline()
示例#6
0
            if schedule == '1/t':
                step_sizes = step_size / (n_updates + 1)
            elif schedule == '1/sqrt(t)':
                step_sizes = step_size / np.sqrt(n_updates + 1)

        elapsed_time += time.perf_counter() - t0

    return values, avg_policies


@exp.main
def single(n_players, n_actions, n_matrices, _seed, conditioning, skewness,
           l1_penalty, gaussian_noise, stochastic_noise, _run):
    mem = Memory(location=expanduser('~/cache'))
    H = mem.cache(make_positive_matrices)(n_players, n_actions, n_matrices,
                                          conditioning, skewness,
                                          stochastic_noise, _seed)
    game = MatrixGame(H, l1_penalty=l1_penalty, gaussian_noise=gaussian_noise)

    values, policies = compute_nash(game)

    _run.info['policies'] = policies.tolist()
    _run.info['values'] = values.tolist()


if __name__ == '__main__':
    if not os.path.exists(exp_dir):
        os.makedirs(exp_dir)
    exp.observers = [FileStorageObserver.create(exp_dir)]
    exp.run_commandline()
示例#7
0
    num_classes = 10
    num_epochs = 2  # SACRED: Have a look at train_nn.job for an example of how we can change parameter settings
    batch_size = 100
    learning_rate = 0.001

    model_file = 'model.ckpt'


@ex.main
def main(_run):
    """
    Sacred needs this main function, to start the experiment.
    If you want to import this experiment in another file (and use its configurations there, you can do that as follows:

    import train_nn
    ex = train_nn.ex

    Then you can use the 'ex' the same way we also do in this code.
    """

    trainer = Trainer()
    accuracy = trainer.run()

    return {'accuracy': accuracy}  # SACRED: Everything you return here is stored as a result,
    # and will be shown as such on Sacredboard


if __name__ == '__main__':
    ex.run_commandline()  # SACRED: this allows you to run Sacred not only from your terminal,
    # (but for example in PyCharm)
示例#8
0
        training_sources["BalancedBuildingRealAggregateSource"] = bbs

    ### VALIDATION

    if "RandomizedSequentialSource" in validation_source_names:
        rss = RandomizedSequentialSource(
            activity_data=real_aggregate_activity_data,
            seq_length=validation_seq_length,
            stride=validation_stride,
            vampire_power_per_building=VAMPIRE_POWER,
            rng_seed=sources_seed
        )
        validation_sources["RandomizedSequentialSource"] = rss

    return training_sources, validation_sources


@dataset.command
def convert():
    load_activity_data()
    return True


@standalone.main
def standalone_main():
    load_activity_data()
    return True    

if __name__ == '__main__':
    standalone.run_commandline()