Пример #1
0
    def pack_dialogues(self, dataset, beliefs, responses):
        def batch_dialog(dialog):
            (items, goals, beliefs, responses,
             booked_domains) = tuple(zip(*dialog))
            return items, goals[0], beliefs, responses, booked_domains

        if isinstance(dataset, str):
            dataset = load_dataset(dataset, goal=True)
        current_dialogue = []
        for item, belief, response in zip(dataset, beliefs, responses):
            if len(item.context) == 1:
                if current_dialogue:
                    yield batch_dialog(current_dialogue)
                current_dialogue = []
            # print(item, item.goal)
            current_dialogue.append(
                (item, item.goal, belief, response, item.booked_domains))
            # current_dialogue.append((item, item.goal, item.raw_belief, item.response))
        yield batch_dialog(current_dialogue)
Пример #2
0
        print('\n#####################')
        print('MC ITERATION: {}/{}'.format(mcIter + 1, options['MCsamples']))
        print('#####################\n')

        # set the correct device to run on
        options['device'] = device

        file_name_general_it = file_name_general + '_MC{}'.format(mcIter)

        # select parameters for toy lgssm
        kwargs = {"k_max_train": 2000, "k_max_val": 2000, "k_max_test": 5000}

        # Specifying datasets
        loaders = loader.load_dataset(
            dataset=options["dataset"],
            dataset_options=options["dataset_options"],
            train_batch_size=options["train_options"].batch_size,
            test_batch_size=options["test_options"].batch_size,
            **kwargs)

        # Compute normalizers
        if options["normalize"]:
            normalizer_input, normalizer_output = compute_normalizer(
                loaders['train'])
        else:
            normalizer_input = normalizer_output = None

        # Define model
        modelstate = ModelState(seed=options["seed"],
                                nu=loaders["train"].nu,
                                ny=loaders["train"].ny,
                                model=options["model"],
Пример #3
0
def run_main_single(options, path_general, file_name_general):
    start_time = time.time()
    print('Run file: main_single.py')
    print(time.strftime("%c"))

    # get correct computing device
    if torch.cuda.is_available():
        device = torch.device('cuda')
    else:
        device = torch.device('cpu')
    print('Device: {}'.format(device))

    # get the options
    options['device'] = device
    options['dataset_options'] = dynsys_params.get_dataset_options(
        options['dataset'])
    options['model_options'] = model_params.get_model_options(
        options['model'], options['dataset'], options['dataset_options'])
    options['train_options'] = train_params.get_train_options(
        options['dataset'])
    options['test_options'] = train_params.get_test_options()

    # print model type and dynamic system type
    print('\n\tModel Type: {}'.format(options['model']))
    print('\tDynamic System: {}\n'.format(options['dataset']))

    file_name_general = file_name_general + '_h{}_z{}_n{}'.format(
        options['model_options'].h_dim, options['model_options'].z_dim,
        options['model_options'].n_layers)
    path = path_general + 'data/'
    # check if path exists and create otherwise
    if not os.path.exists(path):
        os.makedirs(path)
    # set logger
    set_redirects(path, file_name_general)

    # Specifying datasets
    loaders = loader.load_dataset(
        dataset=options["dataset"],
        dataset_options=options["dataset_options"],
        train_batch_size=options["train_options"].batch_size,
        test_batch_size=options["test_options"].batch_size,
    )

    # Compute normalizers
    if options["normalize"]:
        normalizer_input, normalizer_output = compute_normalizer(
            loaders['train'])
    else:
        normalizer_input = normalizer_output = None

    # Define model
    modelstate = ModelState(seed=options["seed"],
                            nu=loaders["train"].nu,
                            ny=loaders["train"].ny,
                            model=options["model"],
                            options=options,
                            normalizer_input=normalizer_input,
                            normalizer_output=normalizer_output)
    modelstate.model.to(options['device'])

    # save the options
    save_options(options, path_general, 'options.txt')

    # allocation
    df = {}
    if options['do_train']:
        # train the model
        df = training.run_train(modelstate=modelstate,
                                loader_train=loaders['train'],
                                loader_valid=loaders['valid'],
                                options=options,
                                dataframe=df,
                                path_general=path_general,
                                file_name_general=file_name_general)

    if options['do_test']:
        # test the model
        df = testing.run_test(options, loaders, df, path_general,
                              file_name_general)

    # save data
    # get saving path
    path = path_general + 'data/'
    # check if path exists and create otherwise
    if not os.path.exists(path):
        os.makedirs(path)
    # to pandas
    df = pd.DataFrame(df)
    # filename
    file_name = file_name_general + '.csv'
    # save data
    df.to_csv(path + file_name)

    # time output
    time_el = time.time() - start_time
    hours = time_el // 3600
    min = time_el // 60 - hours * 60
    sec = time_el - min * 60 - hours * 3600
    print('Total ime of file execution: {}:{:2.0f}:{:2.0f} [h:min:sec]'.format(
        hours, min, sec))
    print(time.strftime("%c"))
Пример #4
0
def get_perf_results(path_general, model_name):
    options = {
        'dataset': 'wiener_hammerstein',
        'model': model_name,
        'logdir': 'final',
        'normalize': True,
        'seed': 1234,
        'optim': 'Adam',
        'showfig': False,
        'savefig': False,
        'MCsamples': 20,
        'gridvalues': {
            'h_values': [30, 40, 50, 60, 70],
            'z_values': [3],
            'n_values': [3],
        },
        'train_set': 'small',
    }
    h_values = options['gridvalues']['h_values']
    z_values = options['gridvalues']['z_values']
    n_values = options['gridvalues']['n_values']

    # options
    # get the options
    options['device'] = torch.device('cpu')
    options['dataset_options'] = dynsys_params.get_dataset_options(
        options['dataset'])
    options['model_options'] = model_params.get_model_options(
        options['model'], options['dataset'], options['dataset_options'])
    options['train_options'] = train_params.get_train_options(
        options['dataset'])
    options['test_options'] = train_params.get_test_options()

    # file name
    file_name_general = dataset

    # allocation
    vaf_all_multisine = torch.zeros(
        [options['MCsamples'],
         len(h_values),
         len(z_values),
         len(n_values)])
    rmse_all_multisine = torch.zeros(
        [options['MCsamples'],
         len(h_values),
         len(z_values),
         len(n_values)])
    likelihood_all_multisine = torch.zeros(
        [options['MCsamples'],
         len(h_values),
         len(z_values),
         len(n_values)])

    vaf_all_sweptsine = torch.zeros(
        [options['MCsamples'],
         len(h_values),
         len(z_values),
         len(n_values)])
    rmse_all_sweptsine = torch.zeros(
        [options['MCsamples'],
         len(h_values),
         len(z_values),
         len(n_values)])
    likelihood_all_sweptsine = torch.zeros(
        [options['MCsamples'],
         len(h_values),
         len(z_values),
         len(n_values)])

    for mcIter in range(options['MCsamples']):
        print('\n#####################')
        print('MC ITERATION: {}/{}'.format(mcIter + 1, options['MCsamples']))
        print('#####################\n')

        for i1, h_sel in enumerate(h_values):
            for i2, z_sel in enumerate(z_values):
                for i3, n_sel in enumerate(n_values):

                    # output current choice
                    print('\nCurrent run: h={}, z={}, n={}\n'.format(
                        h_sel, z_sel, n_sel))

                    # get curren file names
                    file_name = file_name_general + '_h{}_z{}_n{}_MC{}'.format(
                        h_sel, z_sel, n_sel, mcIter)

                    # set new values in options
                    options['model_options'].h_dim = h_sel
                    options['model_options'].z_dim = z_sel
                    options['model_options'].n_layers = n_sel

                    # Specifying datasets (only matters for testing
                    kwargs = {
                        'test_set': 'multisine',
                        'MCiter': mcIter,
                        'train_set': options['train_set']
                    }
                    loaders_multisine = loader.load_dataset(
                        dataset=options["dataset"],
                        dataset_options=options["dataset_options"],
                        train_batch_size=options["train_options"].batch_size,
                        test_batch_size=options["test_options"].batch_size,
                        **kwargs)

                    kwargs = {'test_set': 'sweptsine', 'MCiter': mcIter}
                    loaders_sweptsine = loader.load_dataset(
                        dataset=options["dataset"],
                        dataset_options=options["dataset_options"],
                        train_batch_size=options["train_options"].batch_size,
                        test_batch_size=options["test_options"].batch_size,
                        **kwargs)

                    # Compute normalizers
                    if options["normalize"]:
                        normalizer_input, normalizer_output = compute_normalizer(
                            loaders_multisine['train'])
                    else:
                        normalizer_input = normalizer_output = None

                    # Define model
                    modelstate = ModelState(
                        seed=options["seed"],
                        nu=loaders_multisine["train"].nu,
                        ny=loaders_multisine["train"].ny,
                        model=options["model"],
                        options=options,
                        normalizer_input=normalizer_input,
                        normalizer_output=normalizer_output)
                    modelstate.model.to(options['device'])

                    # allocation
                    df = {}

                    # test the model
                    print('\nTest: Multisine')
                    kwargs = {'file_name_add': 'Multisine_'}
                    df_multisine = df
                    df_multisine = testing.run_test(options, loaders_multisine,
                                                    df_multisine, path_general,
                                                    file_name, **kwargs)
                    print('\nTest: Sweptsine')
                    kwargs = {'file_name_add': 'Sweptsine_'}
                    df_sweptsine = {}
                    df_sweptsine = testing.run_test(options, loaders_sweptsine,
                                                    df_sweptsine, path_general,
                                                    file_name, **kwargs)

                    # save performance values
                    vaf_all_multisine[mcIter, i1, i2, i3] = df_multisine['vaf']
                    rmse_all_multisine[mcIter, i1, i2,
                                       i3] = df_multisine['rmse'][0]
                    likelihood_all_multisine[
                        mcIter, i1, i2,
                        i3] = df_multisine['marginal_likeli'].item()

                    vaf_all_sweptsine[mcIter, i1, i2, i3] = df_sweptsine['vaf']
                    rmse_all_sweptsine[mcIter, i1, i2,
                                       i3] = df_sweptsine['rmse'][0]
                    likelihood_all_sweptsine[
                        mcIter, i1, i2,
                        i3] = df_sweptsine['marginal_likeli'].item()
    # save data
    datasaver = {
        'all_vaf_multisine': vaf_all_multisine,
        'all_rmse_multisine': rmse_all_multisine,
        'all_likelihood_multisine': likelihood_all_multisine,
        'all_vaf_sweptsine': vaf_all_sweptsine,
        'all_rmse_sweptsine': rmse_all_sweptsine,
        'all_likelihood_sweptsine': likelihood_all_sweptsine
    }
    # get saving path
    path = path_general + 'data/'
    # filename
    file_name = '{}.pt'.format(options['dataset'])
    # check if path exists and create otherwise
    if not os.path.exists(path):
        os.makedirs(path)
    # save data
    torch.save(datasaver, path + file_name)

    print('\n')
    print('# ' * 20)
    print('Performance computation for model {}: DONE'.format(model_name))
    print('# ' * 20)
    print('\n')
Пример #5
0
def run(options=None, load_model=None, mode_interactive=True):

    if options is None:
        options = {}

    if not mode_interactive:
        # Setup save paths and redirect stdout to file
        ctime = time.strftime("%c")
        run_path, run_name = get_run_path(options, ctime)
        # Create folder
        os.makedirs(run_path, exist_ok=True)

    if load_model is not None:
        #Load saved models options
        options = load_checkpoint_options_dict(
            os.path.join(os.path.dirname(load_model), "options.txt"), options,
            default_options)
    else:
        # Use option and fill in with default values
        options = create_full_options_dict(
            options,
            default_options=default_options)  # Fill with default values

    if not mode_interactive:
        if load_model is None:
            save_sourcecode(options, run_path, options['save_code'])

        logger.init(False, True, {
            "project": "MSVAE",
            "run_name": run_name,
            'options': options
        }, {'logdir': run_path})

        # Set stdout to print to file and console
        logger.set_redirects(run_path)

    if options["dataset"] == "mnist":
        if options["model_options"]["dimensions"] != 2:
            raise Exception("Dataset " + options["dataset"] +
                            " requires 2 dimensions!")
    elif options["dataset"] == "pianoroll":
        if options["model_options"]["dimensions"] != 1:
            raise Exception("Dataset " + options["dataset"] +
                            " requires 1 dimensions!")

    # Load datasets
    loaders, statistics = \
        loader.load_dataset(dataset_basepath=options["dataset_basepath"],
                            dataset=options["dataset"],
                            dataset_options=options["dataset_options"],
                            train_batch_size=options["train_options"]["batch_size"],
                            val_batch_size=options["test_options"]["batch_size"])

    # Define Model
    model = Model(n_channels=loaders["train"].nc,
                  statistics=statistics,
                  **options["model_options"])

    # Setup Modelstate
    modelstate = ModelState(seed=options["seed"],
                            model=model,
                            optimizer=options["optimizer"],
                            optimizer_options=options["optimizer_options"],
                            init_lr=options["train_options"]["init_lr"],
                            init_freebits=options["train_options"]
                            ["freebits_options"]["init_threshold"],
                            init_annealing=options["train_options"]
                            ["annealing_options"]["init_annealing"])

    if options["cuda"]:
        modelstate.model.cuda()

    if load_model is not None:
        # Restore model
        current_epoch = modelstate.load_model(load_model)
    else:
        current_epoch = 0

    if not mode_interactive:
        print("Training starting at: " + ctime)

        # Run model
        train.run_train(start_epoch=current_epoch,
                        cuda=options["cuda"],
                        ll_normalization=options["ll_normalization"],
                        ind_latentstate=options["ind_latentstate"],
                        modelstate=modelstate,
                        logdir=run_path,
                        loaders=loaders,
                        train_options=options["train_options"],
                        test_options=options["test_options"])
    else:
        return modelstate, loaders, options
Пример #6
0
def run_main_ndata(options, vary_data, path_general, file_name_general,
                   params):
    print('Run file: main_ndata.py')
    start_time = time.time()
    # get correct computing device
    if torch.cuda.is_available():
        device = torch.device('cuda')
    else:
        device = torch.device('cpu')
    print('Device: {}'.format(device))

    # get the options
    options['device'] = device
    options['dataset_options'] = dynsys_params.get_dataset_options(
        options['dataset'])
    options['model_options'] = model_params.get_model_options(
        options['model'], options['dataset'], options['dataset_options'])
    options['train_options'] = train_params.get_train_options(
        options['dataset'])
    options['test_options'] = train_params.get_test_options()

    # set new values in options
    options['model_options'].h_dim = params['h_best']
    options['model_options'].z_dim = params['z_best']
    options['model_options'].n_layers = params['n_best']

    # print model type and dynamic system type
    print('\n\tModel Type: {}'.format(options['model']))
    print('\tDynamic System: {}\n'.format(options['dataset']))

    path = path_general + 'data/'
    # check if path exists and create otherwise
    if not os.path.exists(path):
        os.makedirs(path)
    # set logger
    set_redirects(path, file_name_general + '_runlog')

    # values of evaluation
    k_max_train_values = vary_data['k_max_train_values']
    k_max_val_values = vary_data['k_max_val_values']
    k_max_test_values = vary_data['k_max_test_values']

    # print number of evaluations
    print('Total number of data point sets: {}'.format(
        len(k_max_train_values)))

    # allocation
    all_vaf = torch.zeros([len(k_max_train_values)])
    all_rmse = torch.zeros([len(k_max_train_values)])
    all_likelihood = torch.zeros([len(k_max_train_values)])
    all_df = {}

    for i, _ in enumerate(k_max_train_values):

        # output current choice
        print('\nCurrent run: k_max_train={}\n'.format(k_max_train_values[i]))

        # get current file name
        file_name = file_name_general + '_kmaxtrain_{}'.format(
            k_max_train_values[i])

        # select parameters
        kwargs = {
            "k_max_train": k_max_train_values[i],
            "k_max_val": k_max_val_values[i],
            "k_max_test": k_max_test_values[i]
        }

        # Specifying datasets
        loaders = loader.load_dataset(
            dataset=options["dataset"],
            dataset_options=options["dataset_options"],
            train_batch_size=options["train_options"].batch_size,
            test_batch_size=options["test_options"].batch_size,
            **kwargs)

        # Compute normalizers
        if options["normalize"]:
            normalizer_input, normalizer_output = compute_normalizer(
                loaders['train'])
        else:
            normalizer_input = normalizer_output = None

        # Define model
        modelstate = ModelState(seed=options["seed"],
                                nu=loaders["train"].nu,
                                ny=loaders["train"].ny,
                                model=options["model"],
                                options=options,
                                normalizer_input=normalizer_input,
                                normalizer_output=normalizer_output)
        modelstate.model.to(options['device'])

        # allocation
        df = {}

        if options['do_train']:
            # train the model
            df = training.run_train(
                modelstate=modelstate,
                loader_train=loaders['train'],
                loader_valid=loaders['valid'],
                options=options,
                dataframe=df,
                path_general=path_general,
                file_name_general=file_name,
            )

        if options['do_test']:
            # test the model
            df = testing.run_test(options, loaders, df, path_general,
                                  file_name)

        # store values
        all_df[i] = df

        # save performance values
        all_vaf[i] = df['vaf']
        all_rmse[i] = df['rmse'][0]
        all_likelihood[i] = df['marginal_likeli'].item()

    # save data
    # get saving path
    path = path_general + 'data/'
    # to pandas
    all_df = pd.DataFrame(all_df)
    # filename
    file_name = file_name_general + '_gridsearch.csv'
    # check if path exists and create otherwise
    if not os.path.exists(path):
        os.makedirs(path)
    # save data
    all_df.to_csv(path_general + file_name)
    # save performance values
    torch.save(all_vaf, path_general + 'data/' + 'all_vaf.pt')
    torch.save(all_rmse, path_general + 'data/' + 'all_rmse.pt')
    torch.save(all_likelihood, path_general + 'data/' + 'all_likelihood.pt')

    # plot performance
    dv.plot_perf_ndata(k_max_train_values, all_vaf, all_rmse, all_likelihood,
                       options, path_general)

    # time output
    time_el = time.time() - start_time
    hours = time_el // 3600
    min = time_el // 60 - hours * 60
    sec = time_el - min * 60 - hours * 3600
    print('Total ime of file execution: {}:{:2.0f}:{:2.0f} [h:min:sec]'.format(
        hours, min, sec))
Пример #7
0
def run_main_gridsearch(options, kwargs, gridvalues, path_general, file_name_general):
    print('Run file: main_gridsearch.py')
    start_time = time.time()
    # get correct computing device
    if torch.cuda.is_available():
        device = torch.device('cuda')
    else:
        device = torch.device('cpu')
    print('Device: {}'.format(device))

    # get the options
    options['device'] = device
    options['dataset_options'] = dynsys_params.get_dataset_options(options['dataset'])
    options['model_options'] = model_params.get_model_options(options['model'], options['dataset'],
                                                              options['dataset_options'])
    options['train_options'] = train_params.get_train_options(options['dataset'])
    options['test_options'] = train_params.get_test_options()

    # print model type and dynamic system type
    print('\n\tModel Type: {}'.format(options['model']))
    print('\tDynamic System: {}\n'.format(options['dataset']))

    path = path_general + 'data/'
    # check if path exists and create otherwise
    if not os.path.exists(path):
        os.makedirs(path)
    # set logger
    set_redirects(path, file_name_general+'_runlog')

    h_values = gridvalues['h_values']
    z_values = gridvalues['z_values']
    n_values = gridvalues['n_values']

    # print number of searches
    temp = len(h_values) * len(z_values) * len(n_values)
    print('Total number of search points: {}'.format(temp))

    # allocation
    all_vaf = torch.zeros([len(h_values), len(z_values), len(n_values)])
    all_rmse = torch.zeros([len(h_values), len(z_values), len(n_values)])
    all_likelihood = torch.zeros([len(h_values), len(z_values), len(n_values)])
    all_df = {}

    for i1, h_sel in enumerate(h_values):
        for i2, z_sel in enumerate(z_values):
            for i3, n_sel in enumerate(n_values):

                # output current choice
                print('\nCurrent run: h={}, z={}, n={}\n'.format(h_sel, z_sel, n_sel))

                # get current file names
                file_name = file_name_general + '_h{}_z{}_n{}'.format(h_sel, z_sel, n_sel)

                # set new values in options
                options['model_options'].h_dim = h_sel
                options['model_options'].z_dim = z_sel
                options['model_options'].n_layers = n_sel

                # Specifying datasets
                loaders = loader.load_dataset(dataset=options["dataset"],
                                              dataset_options=options["dataset_options"],
                                              train_batch_size=options["train_options"].batch_size,
                                              test_batch_size=options["test_options"].batch_size,
                                              **kwargs)

                # Compute normalizers
                if options["normalize"]:
                    normalizer_input, normalizer_output = compute_normalizer(loaders['train'])
                else:
                    normalizer_input = normalizer_output = None

                # Define model
                modelstate = ModelState(seed=options["seed"],
                                        nu=loaders["train"].nu, ny=loaders["train"].ny,
                                        model=options["model"],
                                        options=options,
                                        normalizer_input=normalizer_input,
                                        normalizer_output=normalizer_output)
                modelstate.model.to(options['device'])

                # allocation
                df = {}

                if options['do_train']:
                    # train the model
                    df = training.run_train(modelstate=modelstate,
                                            loader_train=loaders['train'],
                                            loader_valid=loaders['valid'],
                                            options=options,
                                            dataframe=df,
                                            path_general=path_general,
                                            file_name_general=file_name)

                if options['do_test']:
                    # test the model
                    df = testing.run_test(options, loaders, df, path_general, file_name)

                # store values
                all_df[(i1, i2, i3)] = df

                # save performance values
                all_vaf[i1, i2, i3] = df['vaf']
                all_rmse[i1, i2, i3] = df['rmse'][0]
                all_likelihood[i1, i2, i3] = df['marginal_likeli'].item()

    # save data
    # get saving path
    path = path_general + 'data/'
    # to pandas
    all_df = pd.DataFrame(all_df)
    # filename
    file_name = '{}_gridsearch.csv'.format(options['dataset'])
    # check if path exists and create otherwise
    if not os.path.exists(path):
        os.makedirs(path)

    # save data
    all_df.to_csv(path_general + file_name)
    # save performance values
    torch.save(all_vaf, path_general + 'data/' + 'all_vaf.pt')
    torch.save(all_rmse, path_general + 'data/' + 'all_rmse.pt')
    torch.save(all_likelihood, path_general + 'data/' + 'all_likelihood.pt')

    # output best parameters
    all_vaf = all_vaf.numpy()
    i, j, k = np.unravel_index(all_vaf.argmax(), all_vaf.shape)
    print('Best Parameters max vaf={}, h={}, z={}, n={}, ind(h,z,n)=({},{},{})'.format(all_vaf[i, j, k],
                                                                                       h_values[i],
                                                                                       z_values[j],
                                                                                       n_values[k], i, j, k))
    all_rmse = all_rmse.numpy()
    i, j, k = np.unravel_index(all_rmse.argmin(), all_rmse.shape)
    print('Best Parameters min rmse={}, h={}, z={}, n={}, ind(h,z,n)=({},{},{})'.format(all_rmse[i, j, k],
                                                                                        h_values[i],
                                                                                        z_values[j],
                                                                                        n_values[k], i, j, k))
    all_likelihood = all_likelihood.numpy()
    i, j, k = np.unravel_index(all_likelihood.argmax(), all_likelihood.shape)
    print('Best Parameters max likelihood={}, h={}, z={}, n={}, ind(h,z,n)=({},{},{})'.format(all_likelihood[i, j, k],
                                                                                              h_values[i],
                                                                                              z_values[j],
                                                                                              n_values[k], i, j, k))

    # plot results
    dv.plot_perf_gridsearch(all_vaf, all_rmse, all_likelihood, z_values, h_values, path_general, options)

    # time output
    time_el = time.time() - start_time
    hours = time_el // 3600
    min = time_el // 60 - hours * 60
    sec = time_el - min * 60 - hours * 3600
    print('Total ime of file execution: {:2.0f}:{:2.0f}:{:2.0f} [h:min:sec]'.format(hours, min, sec))
Пример #8
0
                        h_sel, z_sel, n_sel, mcIter)

                    # set new values in options
                    options['model_options'].h_dim = h_sel
                    options['model_options'].z_dim = z_sel
                    options['model_options'].n_layers = n_sel

                    # Specifying datasets (only matters for testing
                    kwargs = {
                        'test_set': 'multisine',
                        'MCiter': mcIter,
                        'train_set': options['train_set']
                    }
                    loaders_multisine = loader.load_dataset(
                        dataset=options["dataset"],
                        dataset_options=options["dataset_options"],
                        train_batch_size=options["train_options"].batch_size,
                        test_batch_size=options["test_options"].batch_size,
                        **kwargs)

                    kwargs = {'test_set': 'sweptsine', 'MCiter': mcIter}
                    loaders_sweptsine = loader.load_dataset(
                        dataset=options["dataset"],
                        dataset_options=options["dataset_options"],
                        train_batch_size=options["train_options"].batch_size,
                        test_batch_size=options["test_options"].batch_size,
                        **kwargs)

                    # Compute normalizers
                    if options["normalize"]:
                        normalizer_input, normalizer_output = compute_normalizer(
                            loaders_multisine['train'])