Exemplo n.º 1
0
 def test_load_save_args(self):
     parser = argparse.ArgumentParser()
     args = parser.parse_args(args=[])
     args.__dict__ = {"name": "test", "foo": "bar"}
     path = os.path.join(TMP, "args")
     ensure_dir(path)
     save_args(args, path)
     args_loaded = load_args(path)
     self.assertEqual(args, args_loaded)
Exemplo n.º 2
0
def main():

    args = load_args()
    init_random_seeds(args.seed)

    # EXPORT ARGS AS JSON
    json_path = export_args(args)  # write to file
    json_args = load_json_args(json_path)  # read from file
    fprint("RUNNING ARGS:\n{}\n".format(json.dumps(json_args, indent=4)), args)

    fprint("Python Version: {}".format(platform.python_version()), args)
    fprint("PyTorch Version: {}".format(torch.__version__), args)
    fprint(
        "Torchvision Version: {}".format(
            torchvision.__version__.split('a')[0]), args)

    # Get data loaders
    data_loaders = get_data_loaders(args)

    # Initialize model
    model, params_to_update = initialize_model(is_pretrained=args.pretrained)

    fprint("\nARCHITECTURE:\n{}\n".format(model), args)

    for name, param in model.named_parameters():
        fprint("{:25} requires_grad = {}".format(name, param.requires_grad),
               args)

    # Send the model to CPU or GPU
    model = model.to(torch.device(args.device))

    # Setup the optimizer
    if args.optimizer == 'sgdm':
        optimizer = optim.SGD(params_to_update,
                              lr=args.lr,
                              weight_decay=args.weight_decay,
                              momentum=0.9)
    elif args.optimizer == 'adam':
        optimizer = optim.AdamW(params_to_update,
                                lr=args.lr,
                                weight_decay=args.weight_decay)

    # Setup the loss function
    criterion = torch.nn.CrossEntropyLoss()

    # Train and evaluate
    model, optimizer = train_model(model, data_loaders, criterion, optimizer,
                                   args)

    # Test
    test_model(model, data_loaders, args)

    # Generate plots:
    generate_plots(json_path)
Exemplo n.º 3
0
 def build_args(self):
     if self.model_name in ['kgat', 'cfkg', 'nfm', 'cke']:
         from utility.parser import parse_args
     elif self.model_name.startswith('EKGCN'):
         from main import parse_args
     elif self.model_name == 'ripple':
         from ripple_main import parse_args
     args = parse_args()
     if self.arg_file is not None:
         args = load_args(self.arg_file, args)
     return args
Exemplo n.º 4
0
def main():
    input_dim = 6
    spatial_dims = [0, 1, 2]
    args = utils.read_args()

    experiment_dir = utils.get_experiment_dir(args.name, args.run)
    utils.initialize_experiment_if_needed(experiment_dir, args.evaluate)
    # Logger will print to stdout and logfile
    utils.initialize_logger(experiment_dir)

    # Optionally restore arguments from previous training
    # Useful if training is interrupted
    if not args.evaluate:
        try:
            args = utils.load_args(experiment_dir)
        except:
            args.best_tpr = 0.0
            args.nb_epochs_complete = 0  # Track in case training interrupted
            utils.save_args(experiment_dir, args)  # Save initial args

    net = utils.create_or_restore_model(experiment_dir, args.nb_hidden,
                                        args.nb_layer, input_dim, spatial_dims)
    if torch.cuda.is_available():
        net = net.cuda()
        logging.warning("Training on GPU")
        logging.info("GPU type:\n{}".format(torch.cuda.get_device_name(0)))
    criterion = nn.functional.binary_cross_entropy
    if not args.evaluate:
        assert (args.train_file != None)
        assert (args.val_file != None)
        train_loader = construct_loader(args.train_file,
                                        args.nb_train,
                                        args.batch_size,
                                        shuffle=True)
        valid_loader = construct_loader(args.val_file, args.nb_val,
                                        args.batch_size)
        logging.info("Training on {} samples.".format(
            len(train_loader) * args.batch_size))
        logging.info("Validate on {} samples.".format(
            len(valid_loader) * args.batch_size))
        train(net, criterion, args, experiment_dir, train_loader, valid_loader)

    # Perform evaluation over test set
    try:
        net = utils.load_best_model(experiment_dir)
        logging.warning("\nBest model loaded for evaluation on test set.")
    except:
        logging.warning(
            "\nCould not load best model for test set. Using current.")
    assert (args.test_file != None)
    test_loader = construct_loader(args.test_file, args.nb_test,
                                   args.batch_size)
    test_stats = evaluate(net, criterion, experiment_dir, args, test_loader,
                          TEST_NAME)
Exemplo n.º 5
0
import argparse

from utils import load_args
from data_model import DataModel
from predicate_alignment import PredicateAlignModel
from MultiKE_Late import MultiKE_Late

parser = argparse.ArgumentParser(description='run')
parser.add_argument('--training_data', type=str, default='')
parser_args = parser.parse_args()

if __name__ == '__main__':
    args = load_args('args.json')
    args.training_data = parser_args.training_data
    data = DataModel(args)
    attr_align_model = PredicateAlignModel(data.kgs, args)
    model = MultiKE_Late(data, args, attr_align_model)
    model.run()
Exemplo n.º 6
0
                        '--intervals',
                        nargs='*',
                        type=int,
                        default=None,
                        help='select from rsg intervals')
    # delay memory pro anti preset angles
    parser.add_argument('--angles',
                        nargs='*',
                        type=float,
                        default=None,
                        help='angles in degrees for dmpa tasks')

    args = parser.parse_args()
    if args.config is not None:
        # if using config file, load args from config, ignore everything else
        config_args = load_args(args.config)
        del config_args.name
        del config_args.config
        args = update_args(args, config_args)
    else:
        # add task-specific arguments. shouldn't need to do this if loading from config file
        task_args = get_task_args(args)
        args = update_args(args, task_args)

    args.argv = ' '.join(sys.argv)

    if args.mode == 'create':
        # create and save a dataset
        dset, config = create_dataset(args)
        save_dataset(dset, args.name, config=config)
    elif args.mode == 'load':
Exemplo n.º 7
0
logger = logging.getLogger('model')
fh = logging.FileHandler('logs/model.log')
fh.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
fh.setLevel('DEBUG')
logger.addHandler(fh)
logger.setLevel('DEBUG')


if __name__ == "__main__":
    # Parse arguments. The arguments contains the (path to the) data and the
    # parameters to the model. Provide the parameters as
    # [<name>, <type>, <default value>]
    parameters = load_args([
        ['weights', str, '.'],
        ['LR_INIT', float, 1e-4],
        ['LR_END', float, 1e-6],
        ['WARMUP_EPOCHS', int, 2],
        ['EPOCHS', int, 100]
    ])

    logger.debug(parameters.LR_INIT)
    logger.debug(parameters.LR_END)

    # Move weights file to correct folder
    for f in os.listdir(parameters.weights):
        shutil.copy(
            f"{parameters.weights}/{f}",
            f"Tensorflow_YOLO/model_data/{f}"
        )

    # Load/prepare the datasets
Exemplo n.º 8
0
from utils import load_args
from data_model import DataModel
from predicate_alignment import PredicateAlignModel
from MultiKE_CSL import MultiKE_CV
from MultiKE_Late import MultiKE_Late


parser = argparse.ArgumentParser(description='run')
parser.add_argument('--method', type=str, default='ITC')
parser.add_argument('--data', type=str, required=True)
parser.add_argument('--mode', type=str, default='TransE')
parser_args = parser.parse_args()


if __name__ == '__main__':
    args = load_args(os.path.join(os.path.dirname(__file__), 'args.json'))
    args.training_data = parser_args.data
    if 'BootEA' in parser_args.data:
        args.dataset_division = '631/'
    args.mode = parser_args.mode.lower()
    if parser_args.mode == 'MDE':
        args.vector_num = 8
    data = DataModel(args)
    attr_align_model = PredicateAlignModel(data.kgs, args)
    if parser_args.method == 'ITC':
        model = MultiKE_CV(data, args, attr_align_model)
    elif parser_args.method == 'SSL':
        model = MultiKE_Late(data, args, attr_align_model)
    model.run()
Exemplo n.º 9
0
def main():
    merged_df = load_data()
    args = load_args()

    plot(merged_df, args.county, args.statistic, args.num_miles)
Exemplo n.º 10
0
tf.debugging.set_log_device_placement(True)

logger = logging.getLogger('model')
fh = logging.FileHandler('logs/model.log')
fh.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
fh.setLevel('DEBUG')
logger.addHandler(fh)
logger.setLevel('DEBUG')

if __name__ == "__main__":
    # Parse arguments. The arguments contains the (path to the) data and the
    # parameters to the model. Provide the parameters as
    # [<name>, <type>, <default value>]
    parameters = load_args([['model_name', str, ''],
                            ['num_train_steps', int, 1002],
                            ['sample_1_of_n_eval_examples', int, 1],
                            ['checkpoint_files', str, '.'],
                            ['config_num', str, '']])

    # Move checkpoint files to this folder
    logger.debug(os.listdir(parameters.checkpoint_files))
    os.makedirs('tf2od/pretrained_model')
    for f in os.listdir(f"{parameters.checkpoint_files}/"):
        if f.startswith(parameters.model_name):
            logger.debug(f)
            shutil.copy(f"{parameters.checkpoint_files}/{f}",
                        f"tf2od/pretrained_model/{f}")
    logger.debug(os.listdir('.'))

    # Generate as CSV, list of labels as labelmap.pbtxt
    train_path = save_set_as_tfrecords('train', parameters.train_sets,
Exemplo n.º 11
0
from utils import load_args, find_set, load_set_as_txt

logger = logging.getLogger('model')
fh = logging.FileHandler('logs/model.log')
fh.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
fh.setLevel('DEBUG')
logger.addHandler(fh)
logger.setLevel('DEBUG')


if __name__ == "__main__":
    # Parse arguments. The arguments contains the (path to the) data and the
    # parameters to the model. Provide the parameters as
    # [<name>, <type>, <default value>]
    parameters = load_args([
        ['param_a', float, 10.0],
        ['param_b', float, 0.5]
    ])

    param_a = parameters.param_a
    param_b = parameters.param_b

    train_set, train_labels = load_set_as_txt('train', parameters.train_sets)
    test_set, test_labels = load_set_as_txt('test', parameters.test_sets)

    #### Implement/perform model training ####

    logger.info("Starting training")

    # Train the model
    model = Model(param_a, param_b)
    model.train(train_set, train_labels)
Exemplo n.º 12
0
import argparse
import logging

logger = logging.getLogger('model')
fh = logging.FileHandler('logs/model.log')
fh.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
fh.setLevel('DEBUG')
logger.addHandler(fh)
logger.setLevel('DEBUG')

if __name__ == "__main__":
    # Parse arguments. The arguments contains the (path to the) data and the
    # parameters to the model. Provide the parameters as
    # [<name>, <type>, <default value>]
    # TODO add the required parameters.
    parameters = load_args([['regularization_rate', float, 0.01]])

    # TODO: Validate that this version of load_set generates the type of output
    #       that is required for the model.
    train_set, train_labels = load_set_as_txt('train', parameters.train_sets)
    test_set, test_labels = load_set_as_txt('test', parameters.test_sets)

    # TODO: This is an example, adjust as required:
    regularization_rate = parameters.regularization_rate

    #### Implement/perform model training ####

    logger.info("Starting training")

    # TODO Add model here and training here
    model = None
Exemplo n.º 13
0
logger = logging.getLogger('model')
fh = logging.FileHandler('logs/model.log')
fh.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
fh.setLevel('DEBUG')
logger.addHandler(fh)
logger.setLevel('DEBUG')


if __name__ == "__main__":
    # Parse arguments. The arguments contains the (path to the) data and the
    # parameters to the model. Provide the parameters as
    # [<name>, <type>, <default value>]
    parameters = load_args([
        ['num_train_steps', int, 10000],
        ['sample_1_of_n_eval_examples', int, 1],
        ['checkpoint_dataset', str, '.']
    ])

    num_train_steps = parameters.num_train_steps
    sample_1_of_n_eval_examples = parameters.sample_1_of_n_eval_examples

    # Move checkpoint files to this folder
    logger.debug(os.listdir(parameters.checkpoint_dataset))
    for f in os.listdir(f"{parameters.checkpoint_dataset}/"):
        logger.debug(f)
        shutil.copy(
            f"{parameters.checkpoint_dataset}/{f}",
            f"./{f}"
        )
Exemplo n.º 14
0
        acc_r = eval(test_iterator, model, params, logger, rotate=True)
        logger.info(
            "**************** Epoch: [{epoch}/{total_epoch}] [R] Accuracy: [{acc}] ****************\n".format(epoch=epoch,
                                                                                                          total_epoch=
                                                                                                          params[
                                                                                                              'num_epochs'],
                                                                                                          loss=np.mean(
                                                                                                              running_loss),
                                                                                                          acc=acc_r))

    logger.info('Finished Training')


if __name__ == '__main__':

    args = utils.load_args()

    params = {
        'train_dir': os.path.join(args.data_path, "train"),
        'test_dir' : os.path.join(args.data_path, "test"),
        'save_dir' : os.path.join('./', "save"),
        
        'gpu'       : args.gpu,
        'num_epochs': args.num_epochs,
        'batch_size': args.batch_size,
        'num_points': args.num_points,
        'visualize' : bool(args.visualize),

        'log_interval': args.log_interval,
        'save_interval': args.save_interval,
        'baselr': args.baselr,
Exemplo n.º 15
0
def adjust_args(args):
    # don't use logging.info before we initialize the logger!! or else stuff is gonna fail

    # dealing with slurm. do this first!! before anything else
    # needs to be before seed setting, so we can set it
    if args.slurm_id is not None:
        from parameters import apply_parameters
        args = apply_parameters(args.slurm_param_path, args)

    # loading from a config file
    if args.config is not None:
        config = load_args(args.config)
        args = update_args(args, config)

    # setting seeds
    if args.res_seed is None:
        args.res_seed = random.randrange(1e6)
    if args.seed is None:
        args.seed = random.randrange(1e6)
    if args.network_seed is None:
        args.network_seed = random.randrange(1e6)

    torch.manual_seed(args.seed)
    np.random.seed(args.seed)
    random.seed(args.seed)

    # TODO
    # in case we are loading from a model
    # if we don't use this we might end up with an error when loading model
    # uses a new seed
    if args.model_path is not None:
        config = get_config(args.model_path)
        args = update_args(args, config,
                           overwrite=None)  # overwrite Nones only
        enforce_same = ['N', 'D1', 'D2', 'net', 'res_bias', 'use_reservoir']
        for v in enforce_same:
            if v in config and args.__dict__[v] != config[v]:
                print(
                    f'Warning: based on config, changed {v} from {args.__dict__[v]} -> {config[v]}'
                )
                args.__dict__[v] = config[v]

    # shortcut for specifying train everything including reservoir
    if args.train_parts == ['all']:
        args.train_parts = ['']

    # shortcut for training in designated order
    if args.sequential and len(args.train_order) == 0:
        args.train_order = list(range(len(args.dataset)))

    # TODO
    if 'rsg' in args.dataset[0]:
        args.out_act = 'exp'
    else:
        args.out_act = 'none'

    # number of task variables, latent variables, and output variables
    args.T = len(args.dataset)
    L, Z = 0, 0
    for dset in args.dataset:
        config = get_config(dset, ctype='dset', to_bunch=True)
        L = max(L, config.L)
        Z = max(Z, config.Z)
    args.L = L
    args.Z = Z

    # initializing logging
    # do this last, because we will be logging previous parameters into the config file
    if not args.no_log:
        if args.slurm_id is not None:
            log = log_this(args,
                           'logs',
                           os.path.join(
                               args.name.split('_')[0],
                               args.name.split('_')[1]),
                           checkpoints=args.log_checkpoint_models)
        else:
            log = log_this(args,
                           'logs',
                           args.name,
                           checkpoints=args.log_checkpoint_models)

        logging.basicConfig(format='%(message)s',
                            filename=log.run_log,
                            level=logging.DEBUG)
        console = logging.StreamHandler()
        console.setLevel(logging.DEBUG)
        logging.getLogger('').addHandler(console)
        args.log = log
    else:
        logging.basicConfig(format='%(message)s', level=logging.DEBUG)
        logging.info('NOT LOGGING THIS RUN.')

    # logging, when loading models from paths
    if args.model_path is not None:
        logging.info(f'Using model path {args.model_path}')
        if args.model_config_path is not None:
            logging.info(f'...with config file {args.model_config_path}')
        else:
            logging.info(
                '...but not using any config file. Errors may ensue due to net param mismatches'
            )

    return args
Exemplo n.º 16
0
if __name__=='__main__':
    devices = ['cuda', 'cpu'] if torch.cuda.is_available() else ['cpu']
    parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('folder', type=str, help="Folder with a trained model.")
    parser.add_argument('-tc', '--test_count', default=1, type=int, help="Number of episodes to test the model", dest="test_count")
    parser.add_argument('-g', '--greedy', action='store_true', help='Determines whether to use a stochastic or deterministic policy')
    parser.add_argument('-d', '--device', default=devices[0], type=str, choices=devices,
        help="Device to be used ('cpu' or 'cuda'). Use CUDA_VISIBLE_DEVICES to specify a particular GPU", dest="device")
    parser.add_argument('-v', '--visualize', action='store_true')
    parser.add_argument('--old_preprocessing', action='store_true',
                        help="""Previous image preprocessing squashed values in a [0, 255] int range to a [0.,1.] float range.
                                The new one returns an image with values in a [-1.,1.] float range.""")

    args = parser.parse_args()
    train_args = utils.load_args(folder=args.folder)
    args = fix_args_for_test(args, train_args)

    checkpoint_path = utils.join_path(
        args.folder, ParallelActorCritic.CHECKPOINT_SUBDIR, ParallelActorCritic.CHECKPOINT_LAST
    )
    env_creator = get_environment_creator(args)
    network = create_network(args, env_creator.num_actions, env_creator.obs_shape)
    steps_trained = load_trained_weights(network, checkpoint_path, args.device == 'cpu')

    if args.old_preprocessing:
        network._preprocess = old_preprocess_images

    print(args_to_str(args), '=='*30, sep='\n')
    print('Model was trained for {} steps'.format(steps_trained))
    if not args.visualize:
Exemplo n.º 17
0
logger = logging.getLogger('model')
fh = logging.FileHandler('logs/model.log')
fh.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
fh.setLevel('DEBUG')
logger.addHandler(fh)
logger.setLevel('DEBUG')


if __name__ == "__main__":
    # Parse arguments. The arguments contains the (path to the) data and the
    # parameters to the model. Provide the parameters as
    # [<name>, <type>, <default value>]
    # TODO add the required parameters.
    parameters = load_args([
        ['weights', str, '.']
    ])

    dataset_path = load_datasets_for_yolo_v5(
        parameters.train_sets,
        parameters.test_sets
    )

    # Move weights file to correct folder

#     shutil.copy(
#             f"{parameters.weights}",
#             f"yolov5/weights/{parameters.weights}"
#     )

    #### Implement/perform model training ####
Exemplo n.º 18
0
def start_training(args):
    """
	Function to start a training loop.
	"""
    if args.reverse:  # existing bugs
        dataset_name = args.dataset
        dataset_class = TaskLanguageModeling.get_dataset_class(dataset_name)
        vocab_dict = dataset_class.get_vocabulary()
        vocab_torchtext = dataset_class.get_torchtext_vocab()

        model_name = args.model_name
        model_params, _ = args_to_params(args)
        if model_name == "RNN":
            model = LSTMModel(num_classes=len(vocab_dict),
                              vocab=vocab_torchtext,
                              model_params=model_params)
        elif model_name == "CNF":
            model = CNFLanguageModeling(model_params=model_params,
                                        vocab_size=len(vocab_dict),
                                        vocab=vocab_torchtext,
                                        dataset_class=dataset_class)
        elif model_name in ["DAF", "DBF"]:
            model = DFModel(num_classes=len(vocab_dict),
                            batch_size=args.batch_size,
                            model_params=model_params,
                            model_name=model_name)

        # load best model
        checkpoint_file = args.best_model_file_path
        print("Loading checkpoint \"" + str(checkpoint_file) + "\"")
        if torch.cuda.is_available():
            checkpoint = torch.load(checkpoint_file)
        else:
            checkpoint = torch.load(checkpoint_file, map_location='cpu')

        pretrained_model_dict = {
            key: val
            for key, val in checkpoint['model_state_dict'].items()
        }
        model_dict = model.state_dict()
        model_dict.update(pretrained_model_dict)
        model.load_state_dict(model_dict)
        model = model.to(get_device())

        # sampling
        if model_name in ["DAF", "DBF"]:
            data_distribution = torch.distributions.OneHotCategorical(
                logits=model.base_log_probs)
        else:
            data_distribution = torch.distributions.OneHotCategorical(
                logits=torch.randn(args.max_seq_len, len(vocab_dict)))
        samples = data_distribution.sample([args.num_samples]).to(get_device())
        start_time = time.time()
        model(samples, reverse=True)
        end_time = time.time()
        print("generating a sequence of length {} takes {}s".format(
            args.max_seq_len, end_time - start_time))
    else:

        if args.cluster:
            set_debug_level(2)
            loss_freq = 250
        else:
            set_debug_level(0)
            loss_freq = 2
            if args.debug:
                # To find possible errors easier, activate anomaly detection. Note that this slows down training
                torch.autograd.set_detect_anomaly(True)

        if args.print_freq > 0:
            loss_freq = args.print_freq

        only_eval = args.only_eval

        if args.load_config:
            if args.checkpoint_path is None:
                print(
                    "[!] ERROR: Please specify the checkpoint path to load the config from."
                )
                sys.exit(1)
            debug = args.debug
            checkpoint_path = args.checkpoint_path
            args = load_args(args.checkpoint_path)
            args.clean_up = False
            args.checkpoint_path = checkpoint_path
            if only_eval:
                args.use_multi_gpu = False
                args.debug = debug

        # Setup training
        model_params, optimizer_params = args_to_params(
            args
        )  # make params to dict, set seed, model_params include prior distribution, cate encoding, scheduler...
        trainModule = TrainLanguageModeling(
            model_params=model_params,
            optimizer_params=optimizer_params,
            batch_size=args.batch_size,
            checkpoint_path=args.checkpoint_path,
            debug=args.debug,
            multi_gpu=args.use_multi_gpu)

        # Function for cleaning up the checkpoint directory
        def clean_up_dir():
            assert str(trainModule.checkpoint_path) not in ["/", "/home/", "/lhome/"], \
              "[!] ERROR: Checkpoint path is \"%s\" and is selected to be cleaned. This is probably not wanted..." % str(trainModule.checkpoint_path)
            print("Cleaning up directory " + str(trainModule.checkpoint_path) +
                  "...")
            for file_in_dir in sorted(
                    glob(os.path.join(trainModule.checkpoint_path, "*"))):
                print("Removing file " + file_in_dir)
                try:
                    if os.path.isfile(file_in_dir):
                        os.remove(file_in_dir)
                    elif os.path.isdir(file_in_dir):
                        shutil.rmtree(file_in_dir)
                except Exception as e:
                    print(e)

        if args.restart and args.checkpoint_path is not None and os.path.isdir(
                args.checkpoint_path) and not only_eval:
            clean_up_dir()

        if not only_eval:
            # Save argument namespace object for loading/evaluation
            args_filename = os.path.join(trainModule.checkpoint_path,
                                         PARAM_CONFIG_FILE)
            with open(args_filename, "wb") as f:
                pickle.dump(args, f)

            # Start training
            trainModule.train_model(
                args.max_iterations,
                loss_freq=loss_freq,
                eval_freq=args.eval_freq,
                save_freq=args.save_freq,
                no_model_checkpoints=args.no_model_checkpoints)

            # Cleaning up the checkpoint directory afterwards if selected
            if args.clean_up:
                clean_up_dir()
                os.rmdir(trainModule.checkpoint_path)
        else:
            # Only evaluating the model. Should be combined with loading a model.
            # However, the recommended way of evaluating a model is by the "eval.py" file in the experiment folder(s).
            trainModule.evaluate_model()
    args = parser.parse_args()

    # Get file paths
    data = args.data.split('&')
    labels = None if len(args.labels) == 0 else args.labels.split('&')
    if labels is not None and len(data) != len(labels):
        raise ValueError('Number of labels must match number of sequences!')

    # Load data
    def get_rewards(data):
        for d in data:
            with open(get_results_path(d) + 'rewards.pkl', 'rb') as f:
                yield pickle.load(f)

    # Load args
    args2 = load_args(data[0])
    if args.print_args is True:
        print(args2)

    # Load walltime
    def get_walltime(data):
        for d in data:
            with open(get_results_path(d) + 'walltime.pkl', 'rb') as f:
                yield pickle.load(f)

    rewards = list(get_rewards(data))
    walltime = list(get_walltime(data)) if args.walltime is True else None
    running_mean, running_std = [], []
    running_walltime = []

    for j in range(len(rewards)):