Exemple #1
0
def set_env_metadata(env_id: str, cfg: ExperimentConfig) -> gym.Env:
    env = gym.make(env_id)
    cfg.obs_dim = env.observation_space['observation'].shape[0]
    cfg.goal_dim = env.observation_space['desired_goal'].shape[0]
    cfg.action_dim = env.action_space.shape[0]
    cfg.action_range = env.action_space.high[0]
    return env
    def __init__(self, config, host, port, output_dir):

        with open(config, 'r') as f:
            LOGGER.info('Loading config')
            self.config = ExperimentConfig(
                toml.load(f, _dict=RecursiveNestedDict))

        LOGGER.warning('Loaded config from %s', config)
        LOGGER.warning('Output directory: %s', output_dir)

        self.clients = list()
        self.host = host
        self.port = port
        self.tcpdump_proc = None
        self.output_dir = output_dir

        self.backend_mgr = BackendManager(self.config)

        if self.config.gen_load:
            self.load_mgr = CPULoadManager(self.config)
        else:
            self.load_mgr = NullCPULoadManager()

        self.ntp_client = ntplib.NTPClient()
        self.offset = 0

        LOGGER.info('Experiment ID: %s', self.config.name)
        LOGGER.info('Clients: %d', self.config.clients)
        LOGGER.info('Runs: %d', self.config.runs)
Exemple #3
0
def main(cli_opts: List[str], **kwargs) -> None:
    assert len(cli_opts) == 4
    logdir, mode, source_env_id, target_env_id = cli_opts
    options = {
        'source':
        dict(train_env_id=source_env_id, cfg=ExperimentConfig(**kwargs)),
        'residual':
        dict(train_env_id=target_env_id,
             cfg=ExperimentConfig(burn_epochs=5, **kwargs),
             pretrain_path=save_path(logdir, 'source')),
        'target':
        dict(train_env_id=target_env_id, cfg=ExperimentConfig(**kwargs)),
    }
    train(eval_env_id=target_env_id,
          logdir=os.path.join('output', 'logs', logdir, mode),
          save_path=save_path(logdir, mode),
          **options[mode])
def train(config: ExperimentConfig):
    global CONFIG, DF_TRAIN, DF_TEST
    CONFIG = config
    df_train, df_test = read_data(config.get("experiment/data_path"))

    model_family = config.get("experiment/model/family")
    experiment_name = config.get("experiment/meta/name")
    study_name = pjoin(model_family,
                       experiment_name)  # Unique identifier of the study.
    storage_name = f"sqlite:///{study_name}.db"
    study = optuna.create_study(study_name=study_name,
                                storage=storage_name,
                                load_if_exists=True)

    n_trials = config.get("experiment/training/n_optuna_trials")
    study.optimize(ridge_objective, n_trials=n_trials)

    print('Number of finished trials:', len(study.trials))
    print('Best trial: ')
    pprint.pprint(study.best_trial.params)
    print(f'Best rmse: {study.best_trial.value:.6f}')
Exemple #5
0
def train_RNNLM(_run):
    # maintain consistency between sacred config and experiment config
    config = ExperimentConfig(**_run.config)

    experiment_dump_path = os.path.join(experiment_path, str(_run._id),
                                        "tf_dump")
    if not os.path.exists(experiment_dump_path):
        os.makedirs(experiment_dump_path)

    if config.is_debug:
        print("Running in debug mode...")

    with tf.Graph().as_default():
        # set random seed before the graph is built
        tf.set_random_seed(config.tf_random_seed)

        model = RNNLM_Model(config)
        model.load_corpus(debug=config.is_debug)

        init = tf.global_variables_initializer()
        saver = tf.train.Saver()

        tf_config = tf.ConfigProto()
        tf_config.gpu_options.allow_growth = True

        with tf.Session(config=tf_config) as session:
            best_val_pp = float('inf')
            best_val_epoch = 0

            tf.summary.FileWriter(os.path.join(train_path, "TensorLog"),
                                  session.graph)

            session.run(init)
            for epoch in range(config.max_epochs):
                print('Epoch {}'.format(epoch))
                start = time.time()
                train_pp = model.run_epoch(session,
                                           model.encoded_train,
                                           train_op=model.train_step)
                valid_pp = model.run_epoch(session, model.encoded_valid)
                print('Training perplexity: {}'.format(train_pp))
                print('Total Training time: {}'.format(time.time() - start))
                print('Validation perplexity: {}'.format(valid_pp))
                if valid_pp < best_val_pp:
                    best_val_pp = valid_pp
                    best_val_epoch = epoch
                    saver.save(
                        session,
                        os.path.join(experiment_dump_path, 'rnnlm.weights'))
                if epoch - best_val_epoch > config.early_stopping:
                    break
                print('Total time: {}'.format(time.time() - start))
    def test_combine_multiple_annotations(self):
        run_id = 1
        exp_config = ExperimentConfig(self.root_path,
                                      4,
                                      self.z_dim,
                                      [self.N_1, self.N_2, self.N_3],
                                      num_cluster_config=None)
        exp_config.check_and_create_directories(run_id)
        # Read all the individual data frames into a dictionary of format {"annotator_id"}
        base_path = get_base_path(exp_config.root_path,
                                  exp_config.Z_DIM,
                                  exp_config.num_units[2],
                                  exp_config.num_units[1],
                                  exp_config.num_cluster_config,
                                  run_id=run_id)

        data_dict = combine_annotation_sessions(keys=self.keys,
                                                base_path=base_path,
                                                max_epoch=self.max_epoch)

        df_set_1 = data_dict["manual_annotation_set_1"]["data_frame"]
        df_set_1 = df_set_1[df_set_1["has_multiple_value"]]
        self.assertEqual(df_set_1.shape[0], 0)

        df_set_2 = data_dict["manual_annotation_set_2"]["data_frame"]
        df_set_2 = df_set_2[df_set_2["has_multiple_value"]]
        self.assertEqual(df_set_2.shape[0], 9)

        combine_multiple_annotations(data_dict, exp_config, run_id)

        df_set_1 = data_dict["manual_annotation_set_1"]["data_frame"]
        df_set_1 = df_set_1[df_set_1["has_multiple_value"]]
        self.assertEqual(df_set_1.shape[0], 0)

        df_set_2 = data_dict["manual_annotation_set_2"]["data_frame"]
        df_set_2 = df_set_2[df_set_2["has_multiple_value"]]
        self.assertEqual(df_set_2.shape[0], 9)
from config import ExperimentConfig
from utils.dir_utils import get_eval_result_dir

# annotator = "ARYA"
# annotator = "MANJU"
annotator = "SUNIL"
eval_interval = 300

N_3 = 32
N_2 = 128
N_1 = 64
Z_DIM = 10
run_id = 1
ROOT_PATH = "/Users/sunilkumar/concept_learning_old/image_classification_old/"
exp_config = ExperimentConfig(ROOT_PATH,
                              4,
                              Z_DIM, [N_1, N_2, N_3],
                              num_cluster_config=None)
BATCH_SIZE = exp_config.BATCH_SIZE
DATASET_NAME = exp_config.dataset_name
exp_config.check_and_create_directories(run_id, create=False)

NUMBER_OF_ROWS = 16
NUM_DIGITS_PER_ROW = 4
MAX_BACKUPS = 10
last_epoch = 50

ANNOTATED_PATH = exp_config.BASE_PATH + "manual_annotation_combined"

# Initialize variables
counter_start = 2
idx_start = 0
import csv
import cv2
import argparse
import os
from config import ExperimentConfig
# from utils.utils import get_eval_result_dir
annotator = "SUNIL"
#annotator = "MANJU"
eval_interval = 300


ROOT_PATH = "/Users/sunilkumar/concept_learning_old/image_classification_old/"
config1 = ExperimentConfig(ROOT_PATH, 4, 10, [64, 128, 32], )
config2 = ExperimentConfig(ROOT_PATH, 4, 20, [64, 128, 32], )


exp_config = config2
N_3 = exp_config.num_units[exp_config.num_decoder_layer - 2]
N_2 = exp_config.num_units[exp_config.num_decoder_layer - 3]
Z_DIM = exp_config.Z_DIM
BATCH_SIZE = exp_config.BATCH_SIZE
DATASET_NAME = exp_config.dataset_name

run_id = 4
exp_config.check_and_create_directories(run_id, create=False)

NUMBER_OF_ROWS = 16
NUM_DIGITS_PER_ROW = 4
MAX_BACKUPS = 10
ANNOTATED_CSV = "annotation.csv"
last_epoch = 50
    args = parse_args()

    N_3 = 32
    N_2 = 128
    N_1 = 64
    Z_DIM = 20
    run_id = 5
    num_epochs = 6
    manual_labels_config = TrainValDataIterator.USE_CLUSTER_CENTER  # Possible values "USE_ACTUAL" and "USE_CLUSTER_CENTER"

    ROOT_PATH = "/Users/sunilkumar/concept_learning_old/image_classification_old/"
    _config = ExperimentConfig(ROOT_PATH, 4, Z_DIM, [N_1, N_2, N_3],
                               None,
                               confidence_decay_factor=5,
                               supervise_weight=150,
                               reconstruction_weight=1,
                               beta=5,
                               num_val_samples=128,
                               manual_labels_config=manual_labels_config
                               )
    _config.check_and_create_directories(run_id)
    BATCH_SIZE = _config.BATCH_SIZE
    DATASET_NAME = _config.dataset_name
    _config.check_and_create_directories(run_id, create=False)

    # TODO make this a configuration
    # to change output type from sigmoid to leaky relu, do the following
    # 1. In vae.py change the output layer type in decode()
    # 2. Change the loss function in build_model

    exp = Experiment(1, "VAE_MNIST", 128, _config, run_id)
Exemple #10
0
        TrainValDataIterator.VALIDATION_Y_ONE_HOT: _val_y,
        TrainValDataIterator.VALIDATION_Y_RAW: val_y
    }


if __name__ == "__main__":
    # Test cases for load_images
    from config import ExperimentConfig
    N_3 = 16
    N_2 = 128
    Z_DIM = 20
    run_id = 1

    ROOT_PATH = "/Users/sunilkumar/concept_learning_old/image_classification_old/"
    exp_config = ExperimentConfig(
        ROOT_PATH, 4, Z_DIM, [64, N_2, N_3],
        ExperimentConfig.NUM_CLUSTERS_CONFIG_TWO_TIMES_ELBOW)
    BATCH_SIZE = exp_config.BATCH_SIZE
    DATASET_NAME = exp_config.dataset_name
    exp_config.check_and_create_directories(run_id, create=False)

    iterator, val_images, val_labels, val_annotations = load_images(
        exp_config, dataset_type="val")
    print("Images shape={}".format(val_images.shape))
    print("Labels shape={}".format(val_labels.shape))
    print("Manual Annotations shape={}".format(val_annotations.shape))

    # Test cases for load_images
    # train_val_iterator, images, labels, manual_annotation = load_images(exp_config, "train",
    #                                                                     exp_config.DATASET_PATH_COMMON_TO_ALL_EXPERIMENTS)
from analysis.annotation_utils import combine_annotation_sessions, combine_multiple_annotations
from analysis.annotation_utils import KEY_FOR_DATA_FRAME
from config import get_base_path, ExperimentConfig, check_and_create_folder

# Initialize variables
debug = False
annotator = "SUNIL"

N_3 = 32
N_2 = 128
N_1 = 64
z_dim = 5
run_id = 3
ROOT_PATH = "/Users/sunilkumar/concept_learning_old/image_classification_old/"
exp_config = ExperimentConfig(ROOT_PATH,
                              4,
                              z_dim, [N_1, N_2, N_3],
                              num_cluster_config=None)
exp_config.check_and_create_directories(run_id)

NUMBER_OF_ROWS = 16
NUM_DIGITS_PER_ROW = 4

check_and_create_folder(exp_config.get_annotation_result_path())

# Setting debug = true will write all intermediate data frames
if debug:
    debug_path = os.path.join(exp_config.get_annotation_result_path(),
                              "debug/")
    check_and_create_folder(debug_path)

num_batches_per_epoch = exp_config.num_train_samples // exp_config.BATCH_SIZE
from config import get_base_path, ExperimentConfig, check_and_create_folder, get_keys


# Initialize variables
debug = False
annotator = "SUNIL"

N_3 = 32
N_2 = 128
N_1 = 64
z_dim = 10
run_id = 1
ROOT_PATH = "/Users/sunilkumar/concept_learning_old/image_classification_old/"
exp_config = ExperimentConfig(ROOT_PATH,
                              4,
                              z_dim,
                              [N_1, N_2, N_3],
                              num_cluster_config=None
                              )
exp_config.check_and_create_directories(run_id)

NUMBER_OF_ROWS = 16
NUM_DIGITS_PER_ROW = 4

check_and_create_folder(exp_config.get_annotation_result_path())

# Setting debug = true will write all intermediate data frames
if debug:
    debug_path = os.path.join(exp_config.get_annotation_result_path(), "debug/")
    check_and_create_folder(debug_path)

num_batches_per_epoch = exp_config.num_train_samples // exp_config.BATCH_SIZE
class ResolveDuplicate(unittest.TestCase):
    keys = ["manual_annotation_set_1", "manual_annotation_set_2"]
    max_epoch = 5

    N_3 = 32
    N_2 = 128
    N_1 = 64
    z_dim = 10
    run_id = 1
    root_path = "/Users/sunilkumar/concept_learning_old/image_classification_old/"
    exp_config = ExperimentConfig(root_path,
                                  4,
                                  z_dim, [N_1, N_2, N_3],
                                  num_cluster_config=None)

    #
    # def test_resolve_duplicates(self):
    #
    #     N_3 = 32
    #     N_2 = 128
    #     N_1 = 64
    #     z_dim = 10
    #     run_id = 1
    #     root_path = "/Users/sunilkumar/concept_learning_old/image_classification_old/"
    #     exp_config = ExperimentConfig(self.root_path,
    #                                   4,
    #                                   self.z_dim,
    #                                   [self.N_1, self.N_2, self.N_3],
    #                                   num_cluster_config=None
    #                                   )
    #     exp_config.check_and_create_directories(run_id)
    #
    #     # Read all the individual data frames into a dictionary of format {"annotator_id"}
    #     data_dict = combine_annotation_sessions(keys=self.keys,
    #                                             exp_config=exp_config,
    #                                             run_id=run_id,
    #                                             max_epoch=self.max_epoch)
    #
    #     df = data_dict["manual_annotation_set_1"]["data_frame"]
    #     df = df[df["has_multiple_value"]]
    #     print(df.shape)
    #     self.assertEquals(df.shape[0], 0 )
    #
    #     df = data_dict["manual_annotation_set_2"]["data_frame"]
    #     df = df[df["has_multiple_value"]]
    #     print(df.shape)
    #     self.assertEquals(df.shape[0], 9)

    def test_combine_multiple_annotations(self):
        run_id = 1
        exp_config = ExperimentConfig(self.root_path,
                                      4,
                                      self.z_dim,
                                      [self.N_1, self.N_2, self.N_3],
                                      num_cluster_config=None)
        exp_config.check_and_create_directories(run_id)
        # Read all the individual data frames into a dictionary of format {"annotator_id"}
        base_path = get_base_path(exp_config.root_path,
                                  exp_config.Z_DIM,
                                  exp_config.num_units[2],
                                  exp_config.num_units[1],
                                  exp_config.num_cluster_config,
                                  run_id=run_id)

        data_dict = combine_annotation_sessions(keys=self.keys,
                                                base_path=base_path,
                                                max_epoch=self.max_epoch)

        df_set_1 = data_dict["manual_annotation_set_1"]["data_frame"]
        df_set_1 = df_set_1[df_set_1["has_multiple_value"]]
        self.assertEqual(df_set_1.shape[0], 0)

        df_set_2 = data_dict["manual_annotation_set_2"]["data_frame"]
        df_set_2 = df_set_2[df_set_2["has_multiple_value"]]
        self.assertEqual(df_set_2.shape[0], 9)

        combine_multiple_annotations(data_dict, exp_config, run_id)

        df_set_1 = data_dict["manual_annotation_set_1"]["data_frame"]
        df_set_1 = df_set_1[df_set_1["has_multiple_value"]]
        self.assertEqual(df_set_1.shape[0], 0)

        df_set_2 = data_dict["manual_annotation_set_2"]["data_frame"]
        df_set_2 = df_set_2[df_set_2["has_multiple_value"]]
        self.assertEqual(df_set_2.shape[0], 9)