Exemple #1
0
def tran_mlp_sklearn():
    input_dim = 32 * 32
    hidden_dim = 200
    output_dim = 10
    lr = 0.001
    train_data, train_label = datasets.load_datasets("digits/trainingDigits")
    train_label = [np.argmax(label) for label in train_label]
    train_data = train_data.reshape(train_data.shape[0], train_data.shape[2])
    model = MLPClassifier(hidden_layer_sizes=(hidden_dim,), activation="tanh", solver="sgd", learning_rate_init=lr,
                          max_iter=1000)
    model.fit(train_data, train_label)

    test_data, test_label = datasets.load_datasets("digits/testDigits")
    test_label = [np.argmax(label) for label in test_label]
    test_data = test_data.reshape(test_data.shape[0], test_data.shape[2])
    predictions = model.predict(test_data)
    ok_predictions = 0
    for i in range(len(predictions)):
        expected = test_label[i]
        prediction = predictions[i]
        if expected == prediction:
            ok_predictions += 1

    accuracy = round((ok_predictions / len(predictions)) * 100, 2)
    wrong_numbers = len(predictions) - ok_predictions
    print("Accuracy on test data: " + str(accuracy) + "%")
    print(f"wrong_numbers: {wrong_numbers}")
Exemple #2
0
def main():
    client_config = ClientConfig(
        idx=args.idx,
        master_ip_addr=args.master_ip,
        action=""
    )
    print("start")
    
    loop = asyncio.new_event_loop()
    asyncio.set_event_loop(loop)
    tasks = []
    task = asyncio.ensure_future(get_init_config(client_config))
    tasks.append(task)
    loop.run_until_complete(asyncio.wait(tasks))
    loop.close()

    train_dataset, test_dataset = datasets.load_datasets(client_config.custom["dataset_type"])
    train_loader = utils.create_dataloaders(train_dataset, batch_size=args.batch_size, selected_idxs=client_config.custom["train_data_idxes"])
    test_loader = utils.create_dataloaders(test_dataset, batch_size=128, selected_idxs=client_config.custom["test_data_idxes"], shuffle=False)

    while True:
        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(loop)
        tasks = []
        tasks.append(
            asyncio.ensure_future(
                local_training(client_config, train_loader, test_loader)
            )
        )
        loop.run_until_complete(asyncio.wait(tasks))

        loop.close()
def main():
    global args
    args = parser.parse_args()

    print()
    print('Command-line argument values:')
    for key, value in vars(args).items():
        print('-', key, ':', value)
    print()

    test_params = [
        args.model,
        path_to_save_string(args.dataset),
        path_to_save_string(args.test_dataset), args.viewpoint_modulo,
        args.batch_size, args.epochs, args.lr, args.weight_decay, args.seed,
        args.routing_iters
    ]
    test_name = '_'.join([str(x) for x in test_params]) + '.pth'
    model_params = [
        args.model,
        path_to_save_string(args.dataset), args.viewpoint_modulo,
        args.batch_size, args.epochs, args.lr, args.weight_decay, args.seed,
        args.routing_iters
    ]
    model_name = '_'.join([str(x) for x in model_params]) + '.pth'
    header = 'model,training-dataset,test-dataset,viewpoint_modulo,' \
             'batch_size,epochs,lr,weight_decay,seed,em_iters,accuracy'
    snapshot_path = os.path.join('.', 'snapshots', model_name)
    result_path = os.path.join('.', 'results', 'pytorch_test.csv')

    make_dirs_if_not_exist([snapshot_path, result_path])

    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    model, criterion, optimizer, scheduler = load_model(
        args.model,
        device_ids=args.device_ids,
        lr=args.lr,
        routing_iters=args.routing_iters)

    num_class, train_loader, test_loader = load_datasets(
        args.test_dataset, args.batch_size, args.test_batch_size,
        args.test_viewpoint_modulo)
    model.load_state_dict(torch.load(snapshot_path))
    acc, predictions, labels, logits = test(test_loader,
                                            model,
                                            criterion,
                                            chunk=1)
    print(f'Accuracy: {acc:.2f}%')
    print(f'Memory usage: {gpu_memory_usage()}')

    to_write = test_params + [acc.cpu().numpy()]
    append_to_csv(result_path, to_write, header=header)

    if args.roc != '':
        make_dirs_if_not_exist(args.roc)
        torch.save((predictions, labels, logits), args.roc)
Exemple #4
0
def main(unused_args):
    require_flag('billboard_path')

    feature_type = FLAGS.feature_type
    cache_filename = '../billboard-datasets-%s.cpkl' % feature_type
    if os.path.exists(cache_filename):
        datasets = load_datasets(cache_filename)
    else:
        datasets = billboard.read_billboard_datasets(FLAGS.billboard_path,
                                                     feature_type=FLAGS.feature_type)
        dump_datasets(datasets, cache_filename)

    # grid = {
    #     'n_hidden': [300, 800, 1300, 1800, 2300, 2800, 3300],
    #     'n_steps': [200],
    #     'spectral_radius': [1.],
    #     'connectivity': [.01, .0001],
    #     'max_leakage': [.99],
    #     'min_leakage': [.3],
    #     'ridge_beta': [0, .5],
    #     'input_scaling': [.2],
    #     'input_shift': [0],
    # }

    grid = {
        'n_hidden': [2000],
        'n_steps': [200],
        'spectral_radius': [1.],
        'connectivity': [.01],
        'max_leakage': [.99],
        'min_leakage': [.3],
        'ridge_beta': [.5],
        'input_scaling': [.2],
        'input_shift': [0],
    }
    grid_product = list(dict_product(grid))
    #random.shuffle(grid_product)

    for config in grid_product:

        config['n_inputs'] = datasets.train.feature_reader.num_features
        config['n_outputs'] = datasets.train.label_reader.num_labels

        print '\n%s' % config

        if FLAGS.backend == 'numpy':
            numpy_eval(datasets, config)
        elif FLAGS.backend == 'tensorflow':
            tensorflow_eval(datasets, config)
        else:
            print 'unknown backend'
Exemple #5
0
def train_knn_sklearn():
    K = 12
    train_data, train_label = datasets.load_datasets("digits/trainingDigits")
    train_label = [np.argmax(label) for label in train_label]
    train_data = train_data.reshape(train_data.shape[0], train_data.shape[2])
    model = KNeighborsClassifier(n_neighbors=K)
    model.fit(train_data, train_label)

    test_data, test_label = datasets.load_datasets("digits/testDigits")
    test_label = [np.argmax(label) for label in test_label]
    test_data = test_data.reshape(test_data.shape[0], test_data.shape[2])
    predictions = model.predict(test_data)
    ok_predictions = 0
    for i in range(len(predictions)):
        expected = test_label[i]
        prediction = predictions[i]
        if expected == prediction:
            ok_predictions += 1

    accuracy = round((ok_predictions / len(predictions)) * 100, 2)
    wrong_numbers = len(predictions) - ok_predictions
    print("Accuracy on test data: " + str(accuracy) + "%")
    print(f"wrong_numbers: {wrong_numbers}")
def run_training(dataset, batch_size, net_type, net_choice, optim_func,
                 optim_kwargs, simple_math_net_kwargs, simple_math_config):
    """
    Trains and saves network.

    dataset: string specifying which dataset we're using
    batch_size: int, take a guess.
    net_type: string indicating whether the model is an MLP or a CNN
    net_choice: string choosing which model to train
    optim_func: string specifying whether you're using adam, sgd, etc.
    optim_kwargs: dict of kwargs that you're passing to the optimizer.
    simple_math_net_kwargs: Dict of kwargs passed on to the simple math net.
        Only used if net_choice is simple
    simple_math_config: dict of config variables for simple math experiments.
    """
    device = (torch.device("cuda")
              if torch.cuda.is_available() else torch.device("cpu"))
    if dataset == "simple_dataset":
        criterion = nn.MSELoss()
    elif dataset == "add_mul":
        criterion = csordas_loss
    else:
        criterion = nn.CrossEntropyLoss()

    if net_choice == "simple":
        network = mlp_dict[net_choice](**simple_math_net_kwargs)
    else:
        network = (mlp_dict[net_choice]()
                   if net_type == 'mlp' else cnn_dict[net_choice]())

    if optim_func == 'adam':
        optimizer_ = optim.Adam
    elif optim_func == 'sgd':
        optimizer_ = optim.SGD
    else:
        optimizer_ = optim.SGD
    optimizer = optimizer_(network.parameters(), **optim_kwargs)

    train_loader, test_loader_dict, classes = load_datasets(
        dataset, batch_size, simple_math_config)
    test_results_dict, loss_list = train_and_save(network, optimizer,
                                                  criterion, train_loader,
                                                  test_loader_dict, device)
    return {
        'test results dict': test_results_dict,
        'train loss list': loss_list
    }
    def get_datasets(self, train=True):
        """
        Gets the datasets from the "datasets" module
        
        Parameters:
		-train (bool, default=True): either load train or eval data
        
		Returns:
		-Nothing
        """

        data = datasets.load_datasets(self.args.data, train)
        self.datasets = {}
        # Convert to DataLoader
        for (i, batch_size) in self.dataset_list:
            self.datasets[i] = torch.utils.data.DataLoader(
                data[i], batch_size=batch_size, shuffle=True)
def evaluate_mlp_numpy(model):
    x, y = datasets.load_datasets("digits/testDigits")
    nn = model

    test_examples = len(x)

    ok_predictions = 0

    for i in range(test_examples):
        expected = np.argmax(y[i])
        prediction = np.argmax(nn.test(x[i]))
        if expected == prediction:
            ok_predictions += 1

    accuracy = round((ok_predictions / test_examples) * 100, 2)
    wrong_numbers = test_examples - ok_predictions
    print("Accuracy on test data: " + str(accuracy) + "%")
    print(f"wrong_numbers: {wrong_numbers}")
    return accuracy, wrong_numbers
Exemple #9
0
def average_by_cell_line():
    datasets = load_datasets()
    ess_train_data = datasets['ess_train_data']

    lines_board = load_cell_lines(CELL_LINES_LEADERBOARD_PH1)
    lines_train = load_cell_lines(CELL_LINES_TRAINING_PH1)

    data = {}

    for line in lines_board.index:
        site = lines_board.at[line, 'Site_primary']
        matches = lines_train.index[lines_train['Site_primary'] == site]
        if matches.size > 0:
            data[line] = ess_train_data.loc[:, matches].mean(1).tolist()
        else:
            data[line] = ess_train_data.mean(1).tolist()

    ess_avg_data = DataFrame(data, index=ess_train_data.index, columns=lines_board.index)
    ess_avg_data.insert(0, 'Description', ess_train_data.index)
    save_gct_data(ess_avg_data, 'avg_per_line.gct')
def train_mlp_numpy():
    input_dim = 1024
    hidden_dim = 100
    output_dim = 10
    epochs = 1000
    lr = 0.001

    nn = NeuralNetwork(input_dim, hidden_dim, output_dim, lr)
    inputs, targets = datasets.load_datasets("digits/trainingDigits")
    number_examples = len(inputs)

    print("Training...")
    best_result = [0, 0]
    no_update = 0
    for e in range(epochs):
        init_time = time.time()
        err = 0

        for i in range(number_examples):
            error = nn.train(inputs[i], targets[i])
            err = err + error
        err = err / number_examples
        finish_time = time.time()
        diff = round((finish_time - init_time), 2)
        time_to_finish = round(((epochs - e) * diff) / 60, 2)
        print("Error: " + str(err) + " | EPOCH: " + str(e) + " | Time to finish: " + str(time_to_finish) + " mins")

        if e % 50 == 0:
            accuracy, wrong_numbers = evaluate_mlp_numpy(nn)
            if accuracy > best_result[0]:
                best_result[0] = accuracy
                best_result[1] = wrong_numbers
                no_update = 0
            else:
                no_update += 1
        if no_update >= 5:
            print("Best Accuracy on test data: " + str(best_result[0]) + "%")
            print(f"Best wrong_numbers: {best_result[1]}")
            exit()
    print("Best Accuracy on test data: " + str(best_result[0]) + "%")
    print(f"Best wrong_numbers: {best_result[1]}")
Exemple #11
0
def pipeline(args):
    phase = args['phase']
    sc = args['sc']
    filter_method = args['filter']
    filter_threshold = args['filter_threshold']
    normalize = args['normalize']
    feature_selection = args['feature_selection']
    n_features = args['n_features']
    selection_args = args['selection_args']
    estimator = args['estimator']
    estimation_args = args['estimation_args']
    submit = args['submit']
    outputfile = args['outputfile']
    use_cnv = args['use_cnv']
    use_mut = args['use_mut'] if phase == 'phase3' else False
    split_train_set = args['split_train_set']
    max_predictions = args['max_predictions']

    datasets = load_datasets(phase=phase, get_cnv=use_cnv, get_mut=use_mut)
    gene_list = datasets['gene_list']

    if split_train_set:
        split_datasets(datasets, use_cnv, use_mut)

    print 'pre-processing with:', filter_method, 'at', filter_threshold, 'normalize:', normalize, 'use_cnv', use_cnv, 'use_mut', use_mut

    pre_process_datasets(datasets, filter_method, filter_threshold, normalize, use_cnv, use_mut)

    feat_train_data = datasets['feat_train_data']
    feat_board_data = datasets['feat_board_data']
    ess_train_data = datasets['ess_train_data']
    X = feat_train_data.values.T
    Y = ess_train_data.values if sc == 'sc1' else ess_train_data.loc[gene_list, :].values
    Z = feat_board_data.values.T
    feature_list = feat_board_data.index.values

    if max_predictions and split_train_set:
        Y = Y[::len(Y)/max_predictions][:max_predictions]

    print 'selection with', feature_selection, '(', n_features, ')', selection_args
    print 'estimation with', estimator, estimation_args,

    t0 = time()
    if sc == 'sc1':
        W, features = select_train_predict(X, Y, Z, feature_list, feature_selection, estimator, n_features, selection_args, estimation_args)

    if sc == 'sc2':
        W, features = select_train_predict(X, Y, Z, feature_list, feature_selection, estimator, n_features, selection_args, estimation_args)

    if sc == 'sc3':
        if estimator.startswith('mt'):
            W, features = sc3_multitask(X, Y, Z, feature_list, feature_selection, estimator, selection_args, estimation_args)
        else:
            W, features = sc3_top100(X, Y, Z, feature_list, feature_selection, estimator, n_features, selection_args, estimation_args)

    t1 = time() - t0

    print 'tested', feature_selection,  estimator, 'elapsed', t1, 'secs'

    if split_train_set:
        W0 = datasets['ess_score_data'].values if sc == 'sc1' else datasets['ess_score_data'].loc[gene_list, :].values
        if max_predictions:
            W0 = W0[::len(W0)/max_predictions][:max_predictions]
        score = training_score(W0, W)
        print 'scored:', score
        print

    else:
        index = ess_train_data.index if sc == 'sc1' else gene_list
        ess_board_data = DataFrame(W, columns=feat_board_data.columns, index=index)
        ess_board_data.insert(0, 'Description', index)
        save_gct_data(ess_board_data, outputfile + '.gct')

        if sc == 'sc2':
            features_data = DataFrame(features, index=gene_list)
            features_data.to_csv(RESULTS_FOLDER + outputfile + '.txt', sep='\t', header=False)
            zip_files(outputfile, [outputfile + '.txt', outputfile + '.gct'])

        if sc == 'sc3':
            features_data = DataFrame([features])
            features_data.to_csv(RESULTS_FOLDER + outputfile + '.txt', sep='\t', header=False, index=False)
            zip_files(outputfile, [outputfile + '.txt', outputfile + '.gct'])

        if submit:
            label = outputfile
            submit_to_challenge(outputfile, sc, label)
Exemple #12
0
def main():
    global args
    args = parser.parse_args()

    print()
    print('Command-line argument values:')
    for key, value in vars(args).items():
        print('-', key, ':', value)
    print()

    params = [
        args.model,
        path_to_save_string(args.dataset), args.viewpoint_modulo,
        args.batch_size, args.epochs, args.lr, args.weight_decay, args.seed,
        args.routing_iters
    ]
    model_name = '_'.join([str(x) for x in params]) + '.pth'
    header = 'model,dataset,viewpoint_modulo,batch_size,epochs,lr,weight_decay,seed,em_iters,accuracy'
    snapshot_path = os.path.join('.', 'snapshots', model_name)
    data_path = os.path.join('.', 'results', 'training_data', model_name)
    result_path = os.path.join('.', 'results', 'pytorch_train.csv')

    make_dirs_if_not_exist([snapshot_path, data_path, result_path])

    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    model, criterion, optimizer, scheduler = load_model(
        args.model,
        device_ids=args.device_ids,
        lr=args.lr,
        routing_iters=args.routing_iters)
    num_class, train_loader, test_loader = load_datasets(
        args.dataset, args.batch_size, args.test_batch_size,
        args.viewpoint_modulo)

    best_acc = 0
    training_accuracies = []
    test_accuracies = []

    if args.append:
        model.load_state_dict(torch.load(snapshot_path))
    try:
        for epoch in range(1, args.epochs + 1):
            print()
            acc = train(train_loader,
                        model,
                        criterion,
                        optimizer,
                        epoch,
                        epochs=args.epochs,
                        log_interval=args.log_interval)
            training_accuracies.append(acc)
            scheduler.step(acc)
            print('Epoch accuracy was %.1f%%. Learning rate is %.9f.' %
                  (acc, optimizer.state_dict()['param_groups'][0]['lr']))
            if epoch % args.test_interval == 0:
                test_acc, __, __, __ = test(test_loader,
                                            model,
                                            criterion,
                                            chunk=args.test_size)
                test_accuracies.append(test_acc)
                if test_acc > best_acc:
                    best_acc = test_acc
    except KeyboardInterrupt:
        print('Cancelled training after %d epochs' % (epoch - 1))
        args.epochs = epoch - 1

    acc, predictions, labels, logits = test(test_loader,
                                            model,
                                            criterion,
                                            chunk=1)
    print(f'Accuracy: {acc:.2f}% (best: {best_acc:.2f}%)')

    to_write = params + [acc.cpu().numpy()]
    append_to_csv(result_path, to_write, header=header)
    snapshot(snapshot_path, model)
    #torch.save((accuracies, labels, predictions), data_path)

    if args.learn_curve != '':
        make_dirs_if_not_exist(args.learn_curve)
        torch.save((training_accuracies, test_accuracies), args.learn_curve)
Exemple #13
0
from bokeh.io import curdoc
from bokeh.models.widgets import Tabs
from bokeh.layouts import row

from datasets import load_datasets
from scripts.empresa import company_info_tab
from scripts.metricas import metricas_tab
from scripts.valoracion import valoracion_tab

# Load the datasets and perform any processing on them
(number_ticks, datasets) = load_datasets()

curdoc().add_root(company_info_tab(number_ticks, datasets))
curdoc().add_root(metricas_tab(number_ticks, datasets))
curdoc().add_root(valoracion_tab(number_ticks, datasets))
curdoc(
).title = "Modelo de Diagnóstico Financiero y de Valoración para Empresas"
Exemple #14
0
def main():
    #-------------------------------------------------------------------------------------------------------------------
    # Parse arguments.
    parser = argparse.ArgumentParser(description="Set purpose of run.")
    parser.add_argument("--dataset",  default=False, action="store_true", help="Create new datasets from raw data.")
    parser.add_argument("--train",    default=False, action="store_true", help="Train ML model.")
    parser.add_argument("--test",     default=False, action="store_true", help="Test pre-trained ML model.")
    parser.add_argument("--use",      default=False, action="store_true", help="Use pre-trained ML model on new data.")
    parser.add_argument("--grs",      default=False, action="store_true", help="Perform parameter grid search.")
    args = parser.parse_args()

    print("\nEXECUTION INITIATED\n")

    if args.grs:
        print("-----------------------------------------------")
        print("-----------------------------------------------")
        print("Initiating parameter grid search.\n")
        for model_num in range(len(config.model_keys)):
            cfg = config.Config(
                group_name = config.group_name,
                run_name   = config.run_names[model_num][0],
                system     = config.systems[0],
                data_tag   = config.data_tags[0],
                model_key  = config.model_keys[model_num],
                do_train   = False,
                do_test    = False
            )
            print("- - - - - - - - - - - - - - - - - - - - - - - -")
            print("- - - - - - - - - - - - - - - - - - - - - - - -")
            print("Finding optimal parameters for model " + cfg.model_name)
            parameter_grid_search.grid_search(cfg)
            print("")
        print("Initiating parameter grid search.\n\nEXECUTION COMPLETED")
        return

    group_name = config.group_name
    for model_num in range(len(config.model_keys)):
        for sys_num in range(len(config.systems)):
            print("\n********************************************************")
            print("Model  number:", model_num)
            print("System number:", sys_num)
            print("********************************************************\n")

            # -------------------------------------------------------------------------------------------------------------------
            # Configuration setup.
            cfg = config.Config(
                group_name = group_name,
                run_name   = config.run_names[model_num][sys_num],
                system     = config.systems[sys_num],
                data_tag   = config.data_tags[sys_num],
                model_key  = config.model_keys[model_num],
                do_train   = args.train,
                do_test    = args.test
            )

            #-------------------------------------------------------------------------------------------------------------------
            # Ensure directories exist.
            #os.makedirs(config.datasets_dir, exist_ok=True)
            #os.makedirs(config.raw_data_dir, exist_ok=True)
            #os.makedirs(config.results_dir,  exist_ok=True)
            #os.makedirs(cfg.group_dir,    exist_ok=True)
            os.makedirs(cfg.run_dir,      exist_ok=False)
            #os.makedirs(config.tb_dir,       exist_ok=True)
            #if config.is_train:
            #    os.makedirs(config.tb_run_dir,   exist_ok=False)
            #os.makedirs(config.cp_load_dir,  exist_ok=True)
            #os.makedirs(config.cp_save_dir,  exist_ok=True)
            #os.makedirs(config.eval_im_dir,  exist_ok=True)
            #os.makedirs(config.metrics_dir,  exist_ok=True)

            #-------------------------------------------------------------------------------------------------------------------
            # Save configurations.
            config.save_config(cfg)

            #-------------------------------------------------------------------------------------------------------------------
            # Create datasets.
            if model_num == 0:
                if args.dataset:
                    print("----------------------------")
                    print("Initiating dataset creation.\n")
                    print("Data tag:", cfg.data_tag)
                    datasets.main(cfg)
                    print("\nCompleted dataset creation.")
                    print("----------------------------\n")

            #-------------------------------------------------------------------------------------------------------------------
            # Define network model(s).

            ensemble = []
            print("----------------------------")
            print("Initiating model definition.")
            for i in range(cfg.ensemble_size):
                model = models.create_new_model(cfg, cfg.model_specific_params)
                ensemble.append(model)
                if i == 0 and sys_num == 0:
                    print("\n" + cfg.model_name + "\n")
                    if cfg.model_name[:8] == 'Ensemble':
                        print("Ensemble model containing " + str(len(model.nets)) + " networks as shown below.")
                        print(model.nets[0].net)
                    elif cfg.model_name[:5] == 'Local':
                        print("Ensemble model containing " + str(len(model.net.nets)) + " networks as shown below.")
                        print(model.net.nets[0])
                    else:
                        print(model.net)
            print("\nCompleted model definition.")
            print("----------------------------\n")


            # -------------------------------------------------------------------------------------------------------------------
            # Train model(s).

            if args.train:
                print("----------------------------")
                print("Initiating training")
                dataset_train, dataset_val, _ = datasets.load_datasets(cfg, True, True, False)

                dataloader_train = torch.utils.data.DataLoader(
                    dataset=dataset_train,
                    batch_size=cfg.batch_size_train,
                    shuffle=True,
                    num_workers=0,
                    pin_memory=True
                )
                dataloader_val = torch.utils.data.DataLoader(
                    dataset=dataset_val,
                    batch_size=cfg.batch_size_val,
                    shuffle=True,
                    num_workers=0,
                    pin_memory=True
                )
                for i, model in enumerate(ensemble):
                    print("\nTraining instance " + str(i))
                    _ = train.train(cfg, model, i, dataloader_train, dataloader_val)
                print("\nCompleted training.")
                print("----------------------------\n")

            #-------------------------------------------------------------------------------------------------------------------
            # Test model(s).

            if args.test:
                print("----------------------------")
                print("Initiating testing.")
                error_dicts = []
                plot_data_dicts = []
                for i, model in enumerate(ensemble):
                    print("\nTesting instance " + str(i))
                    if cfg.do_simulation_test:
                        error_dict, plot_data_dict = test.simulation_test(cfg, model, i)
                    else:
                        error_dict, plot_data_dict = test.single_step_test(cfg, model, i)
                    error_dicts.append(error_dict)
                    plot_data_dicts.append(plot_data_dict)
                print("")
                error_stats_dict, plot_stats_dict = test.save_test_data(cfg, error_dicts, plot_data_dicts)
                test.visualize_test_data(cfg, error_stats_dict, plot_stats_dict)
                print("\nCompleted testing.")
                print("----------------------------\n")

            # ------------------------------------------------------------------------------------------------------------------
            # Use pre-trained network to make predictions.

            if args.use:
                print("Prediction is currently not implemented.") # TODO: Implement prediction in 'predict.py'

    print("EXECUTION COMPLETED\n")
Exemple #15
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 20 01:54:02 2021
@author: fatemeh tahrirchi
"""
import datasets, net
from preprocessing import Preprocessing, CharVectorizer
from net import VDCNN, train, save
import lmdb
import numpy as np
from tqdm import tqdm
import argparse
import torch
from torch.utils.data import DataLoader, Dataset
import os, subprocess
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
MODELS_FOLDER = 'models/vdcnn'
DATA_FOLDER = 'datasets'
DATASET = 'yelp_review_full'  #['yelp_review_full','yelp_review_polarity']
PREPROCES_TYPE = 'lower'  #['lower','denoiser','add_pos','add_hashtag','add_NOT']

# get device to calculate on (either CPU or GPU with minimum memory load)
def get_gpu_memory_map():
    result = subprocess.check_output([
        'nvidia-smi', '--query-gpu=memory.used',
        '--format=csv,nounits,noheader'
    ],
                                     encoding='utf-8')
Exemple #16
0
    dataset( "JetHT_G1"    , JetHT_G1    , DATA , 20  , 100000000 , splitting='LumiBased' , priority=199 , label='signal' , doHLT=1 )  ,
    # dataset( "JetHT_H1"    , JetHT_H1    , DATA , 20  , 100000000 , splitting='LumiBased' , priority=199 , label='signal' , doHLT=1 )  , # No jobs generated with json file
    dataset( "JetHT_H2"    , JetHT_H2    , DATA , 10  , 100000000 , splitting='LumiBased' , priority=199 , label='signal' , doHLT=1 )  ,
    dataset( "JetHT_H3"    , JetHT_H3    , DATA , 10  , 100000000 , splitting='LumiBased' , priority=199 , label='signal' , doHLT=1 )  ,
    # dataset( "JetHT_H3-testing"    , JetHT_H3    , DATA , 10  , 10 , splitting='LumiBased' , priority=199 , label='signal' , doHLT=1 )  ,
    # For trigger testing
    # dataset( "HLT_QCD_HT300to500"    , QCD_HT300to500    , MC , 1  , 100 , splitting='FileBased' , priority=199 , label='signal' , doHLT=0 )  ,
    # dataset( "HLT_QCD_HT500to700"    , QCD_HT500to700    , MC , 1  , 100 , splitting='FileBased' , priority=199 , label='signal' , doHLT=0 )  ,
    # dataset( "HLT_QCD_HT700to1000"   , QCD_HT700to1000   , MC , 1  , 100 , splitting='FileBased' , priority=199 , label='signal' , doHLT=0 )  ,
    # dataset( "HLT_QCD_HT1000to1500"  , QCD_HT1000to1500  , MC , 1  , 100 , splitting='FileBased' , priority=199 , label='signal' , doHLT=0 )  ,
    # dataset( "HLT_QCD_HT1500to2000"  , QCD_HT1500to2000  , MC , 1  , 100 , splitting='FileBased' , priority=199 , label='signal' , doHLT=0 )  ,
    # dataset( "HLT_QCD_HT2000toInf"   , QCD_HT2000toInf   , MC , 1  , 100 , splitting='FileBased' , priority=199 , label='signal' , doHLT=0 )  ,
]

template = dataset( "ALIAS"  , "/FULL/PATH-TO/DATSET"  , MC , 1 , 1000000 , splitting='FileBased' , priority=99 , inputDBS='phys03' , label='signal' , doHLT=0 )
dataset_list = load_datasets('crabUtils/dataset_lists/list_RunIISummer16DR80Premix_private-AODSIM-v2017-09-11-longlifetime.txt', template)
print 'dataset_list'
for i in dataset_list:
    print i.alias, i.fullpath
datasets = dataset_list
# datasets.extend(datasets_part2)


import os
crabTaskDir = 'crabTasks'
if not os.path.exists(crabTaskDir):
    os.makedirs(crabTaskDir)

if __name__ == '__main__':

    ############################################################
            optimizer.zero_grad()
        epoch_loss += loss.item()
    return epoch_loss / len(data_loader)


if not os.path.exists(args.weight_dir):
    os.mkdir(args.weight_dir)


if __name__ == '__main__':
    transform = transforms.Compose([transforms.Resize(args.img_size),
                                    transforms.CenterCrop(args.img_size),
                                    transforms.ToTensor()])

    train_dataset, valid_dataset, test_dataset = \
        load_datasets(args.train_json, args.test_json, transform)

    train_loader = DataLoader(train_dataset, batch_size=args.batch_size)
    valid_loader = DataLoader(valid_dataset, batch_size=args.batch_size)
    test_loader = DataLoader(test_dataset, batch_size=args.batch_size)

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    model = TripletResNet(args.output_dim)
    model = model.to(device)
    criterion = TripletAngularLoss()
    # criterion = TripletLoss()
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)
Exemple #18
0
#!/usr/bin/python3.8

import datasets as ds
import training as tr
import testing as ts
import graphics as gr
import sa
import grasp
import genetic

datasets = ds.load_datasets()

sa_hyper_param_list = [
    sa.HyperParams(500, 0, 350, .95),
    sa.HyperParams(500, 0, 350, .85),
    sa.HyperParams(500, 0, 350, .7),
    sa.HyperParams(500, 0, 500, .95),
    sa.HyperParams(500, 0, 500, .85),
    sa.HyperParams(500, 0, 500, .7),
    sa.HyperParams(100, 0, 350, .95),
    sa.HyperParams(100, 0, 350, .85),
    sa.HyperParams(100, 0, 350, .7),
    sa.HyperParams(100, 0, 500, .95),
    sa.HyperParams(100, 0, 500, .85),
    sa.HyperParams(100, 0, 500, .7),
    sa.HyperParams(50, 0, 350, .95),
    sa.HyperParams(50, 0, 350, .85),
    sa.HyperParams(50, 0, 350, .7),
    sa.HyperParams(50, 0, 500, .95),
    sa.HyperParams(50, 0, 500, .85),
    sa.HyperParams(50, 0, 500, .7)
Exemple #19
0
def evaluate(model: nn.Module,
             do_val: bool,
             do_test: bool,
             num_visuals: int,
             comparisons: list,
             metrics: list,
             call_tag: str,
             train_it=None,
             tb_writer=None):
    with torch.no_grad():
        if do_val == do_test:
            raise ValueError("Invalid evaluation configuration.")

        metric_values = {}

        # Create data loader.
        dataset = None
        dataloader = None
        if do_val:
            _, dataset, _ = datasets.load_datasets()
            dataloader = torch.utils.data.DataLoader(
                dataset=dataset,
                batch_size=config.val_batch_size,
                shuffle=False,
                num_workers=config.dataset_val_num_workers,
                pin_memory=True)
        elif do_test:
            _, _, dataset = datasets.load_datasets()
            dataloader = torch.utils.data.DataLoader(
                dataset=dataset,
                batch_size=config.val_batch_size,
                shuffle=False,
                num_workers=config.dataset_val_num_workers,
                pin_memory=True)

        dataset_length = dataset.__len__()
        if num_visuals == -1:
            num_visuals = dataset_length

        summed_G_losses = {"total": 0.0}
        summed_D_losses = {"total": 0.0}
        for key in config.G_loss_scales.keys():
            summed_G_losses[key] = 0.0
        for key in config.D_loss_scales.keys():
            summed_D_losses[key] = 0.0

        summed_metrics = dict()
        for metric in metrics:
            summed_metrics[metric] = 0.0

        HR_num_correct = 0
        SR_num_correct = 0

        it = 0
        for epoch, data in enumerate(dataloader):
            real_lr = data[0].to(config.device, dtype=torch.float)
            real_hr = data[1].to(config.device, dtype=torch.float)

            current_batch_size = real_hr.size(0)

            real_labels, fake_labels = get_labels(current_batch_size)
            real_labels = real_labels.to(config.device).squeeze()
            fake_labels = fake_labels.to(config.device).squeeze()

            # Feed forward.
            fake_hr = model.G(real_lr)
            real_pred = model.D(real_hr).squeeze()
            fake_pred = model.D(fake_hr).squeeze(
            )  # Squeeze to go from shape [batch_sz, 1] to [batch_sz].

            # Compute losses.
            loss_G, loss_dict_G = get_G_loss(real_hr, fake_hr, real_pred,
                                             fake_pred, real_labels,
                                             fake_labels)
            loss_D, loss_dict_D = get_D_loss(real_pred, fake_pred, real_labels,
                                             fake_labels)
            #with open(os.path.join(config.run_dir, "eval_debug.txt"), 'a') as f:
            #    print("Real pred:", real_pred.cpu().detach().numpy(), file=f)
            #    print("Fake pred:", fake_pred.cpu().detach().numpy(), file=f)
            #    print("Real labels:", real_labels.cpu().detach().numpy(), file=f)
            #    print("Fake labels:", fake_labels.cpu().detach().numpy(), file=f)
            #    print("D_loss:", loss_D.item(), file=f)
            #    print("Batch size:", current_batch_size, file=f)

            # Unreduce losses.
            if do_val:
                summed_G_losses["total"] += (loss_G.item() *
                                             current_batch_size)
                for key in config.G_loss_scales.keys():
                    summed_G_losses[key] += (loss_dict_G[key].item() *
                                             current_batch_size)
                summed_D_losses["total"] += (loss_D.item() *
                                             current_batch_size)
                #with open(os.path.join(config.run_dir, "eval_debug.txt"), 'a') as f:
                #    print("Accumulated D_loss:", summed_D_losses, file=f)
                for key in config.D_loss_scales.keys():
                    summed_D_losses[key] += (loss_dict_D[key].item() *
                                             current_batch_size)

            # Calculate metrics.
            for j in range(current_batch_size):
                single_im_metrics = calculate_metrics(hr=real_hr[j],
                                                      sr=fake_hr[j],
                                                      metrics_list=metrics)
                for metric in metrics:
                    summed_metrics[metric] += single_im_metrics[metric]

            # Calculate number of correct discriminator predictions.
            HR_num_correct += torch.sum(real_pred > 0).item()
            SR_num_correct += torch.sum(fake_pred < 0).item()

            # Visualize and save the first num_visuals images in dataset, and the corresponding predictions.
            os.makedirs(config.eval_im_dir, exist_ok=True)
            for j in range(current_batch_size):
                if it >= num_visuals:
                    break
                #print("it:", it)
                #print("num_visuals:", num_visuals)
                filename = call_tag + '_' + 'im_' + str(it)
                make_and_save_images(lr=real_lr[j],
                                     hr=real_hr[j],
                                     sr=fake_hr[j],
                                     folder_path=config.eval_im_dir,
                                     filename=filename,
                                     comparisons=comparisons)

                with open(config.val_pred_file, "a") as data_file:
                    data_file.write("iteration " + str(train_it) + ", image " +
                                    str(j) + "\n")
                    data_file.write("HR prediction:" +
                                    str(torch.sigmoid(real_pred[j]).item()) +
                                    "\n")
                    data_file.write("SR prediction:" +
                                    str(torch.sigmoid(fake_pred[j]).item()) +
                                    "\n")
                it += 1

        val_data_str = ""

        val_data_str += "," + str(summed_G_losses['total'] / dataset_length)
        for key in config.G_loss_scales.keys():
            val_data_str += "," + str(summed_G_losses[key] / dataset_length)

        val_data_str += "," + str(summed_D_losses['total'] / dataset_length)
        for key in config.D_loss_scales.keys():
            val_data_str += "," + str(summed_D_losses[key] / dataset_length)

        #with open(os.path.join(config.run_dir, "eval_debug.txt"), 'a') as f:
        #    print("Averaged D_loss:", summed_D_losses['total'] / dataset_length, file=f)
        #    print("", file=f)

        for metric in config.val_metrics:
            val_data_str += "," + str(summed_metrics[metric] / dataset_length)

        val_data_str += "," + str(HR_num_correct / dataset_length)
        val_data_str += "," + str(SR_num_correct / dataset_length)

        with open(config.val_loss_metrics_file, "a") as data_file:
            data_file.write(str(train_it) + val_data_str + "\n")
        """
Exemple #20
0
from datasets import load_datasets
from collections import OrderedDict
from models.full_model import *
import pdb
from time import time

os.environ["CUDA_VISIBLE_DEVICES"] = "0"
args = args_parser()
best_acc = 0

if __name__ == '__main__':
    # load datasets feat
    train_list = args.train_list.replace('dataset', args.dataset)
    val_list = args.val_list.replace('dataset', args.dataset)
    train_loader, val_loader = load_datasets(args.data_dir, train_list,
                                             val_list, args.mode,
                                             args.batch_size, args.img_size,
                                             args.n_workers)

    # bulid model
    resume_path = args.resume_path.replace('dataset', args.dataset)  \
                                  .replace('arch', args.arch)
    if args.dataset == 'AID':
        n_classes = 30
    elif args.dataset == 'UCM':
        n_classes = 21
    elif args.dataset == 'NWPU-RESISC45':
        n_classes = 45
    elif args.dataset == 'RSSCN7':
        n_classes = 7

    net = FullModel(arch=args.arch,
Exemple #21
0
run = get_run_id(out_dir)
out_name = "%04d_" %(run) + out_name
copy_src_files(out_name)
config_init["num_iter_per_epoch"] = 400 * 200 // (config_init["b_source"] + config_init["b_target"])
config_init['nb_class'] = 9 if args.source_dataset in ["CIFAR", "STL"] else 10
config_init['out_path'] = out_dir + out_name + ".txt"
config_init['source_dataset'] = args.source_dataset
config_init['target_dataset'] = args.target_dataset

# log experiment settings:
for key in sorted(config_init):
    conf = key + " -> " + str(config_init[key])
    log(conf, config_init['out_path'])

# load datasets:
trainloader_source, testloader_source, trainloader_target, testloader_target = load_datasets(config_init)

# load network:
net = load_net(config_init)

# load optimizer:
my_optimizer = torch_optimizer(config_init, net, is_encoder=False)

# to keep initial loss weights (lambda):
config = deepcopy(config_init)

for epoch in range(config["num_epochs"]):
    st_time = time.time()

    lr, optimizer = my_optimizer.update_lr(epoch)
    config = lambda_schedule(config, config_init, epoch)
Exemple #22
0
save_ops = False
add_bias = True
remove_mean = True
do_pca = False
pca_dims = 2

op_dir = 'output/' + str(experiment_id) + '/'
if save_ops:
    print 'saving output to', op_dir
    if not os.path.isdir(op_dir):
        os.makedirs(op_dir)

# load data
dataset_train, dataset_test = ds.load_datasets(dataset_name, dataset_dir,
                                               do_pca, pca_dims, add_bias,
                                               remove_mean, density_sigma,
                                               interp_sigma)
if len(np.unique(dataset_train['Y'])) > 2:
    one_v_all = True  # multi class
else:
    one_v_all = False  # binary

# generate set of hypotheses
hyps, prior_h = hp.generate_hyps(dataset_train, alpha, num_init_hyps, hyp_type,
                                 one_v_all)
print len(hyps), hyp_type, 'hypotheses\n'

# remove examples that are inconsistent with best hypothesis
if one_v_all:
    err_hyp = hp.compute_hyps_error_one_vs_all(hyps, dataset_train['X'],
                                               dataset_train['Y'], alpha)
Exemple #23
0
def train():
    print("CUDA availability:", torch.cuda.is_available())
    print("Current device:", torch.cuda.current_device(), "- num devices:",
          torch.cuda.device_count(), "- device name:",
          torch.cuda.get_device_name(0))

    torch.backends.cudnn.benchmark = True
    tb_train_writer = tensorboardX.SummaryWriter(
        log_dir=os.path.join(config.tb_run_dir, 'train'))
    tb_eval_writer = tensorboardX.SummaryWriter(
        log_dir=os.path.join(config.tb_run_dir, 'eval'))
    # TODO: Add support for tensorboard.

    # Load datasets for training and validation.
    dataset_train, dataset_val, _ = load_datasets()

    # Create data loader.
    dataloader_train = torch.utils.data.DataLoader(
        dataset=dataset_train,
        batch_size=config.train_batch_size,
        shuffle=True,
        num_workers=config.dataset_train_num_workers,
        pin_memory=True)

    start_epoch = 0
    it = 0
    it_per_epoch = len(dataloader_train)
    num_epochs = config.num_train_it // it_per_epoch

    # Build networks.
    model = ESRGAN()

    with open(os.path.join(config.run_dir, "networks.txt"), "w") as data_file:
        data_file.write(str(model.G))
        data_file.write("\n\n")
        data_file.write(str(model.D))

    # Load from save.
    if config.load_model_from_save:
        print(
            f"Loading model from from saves. G: {config.generator_load_path}, D: {config.discriminator_load_path}"
        )
        _, __ = model.load_model(
            generator_load_path=config.generator_load_path,
            discriminator_load_path=config.discriminator_load_path,
            state_load_path=None)

        if config.resume_training_from_save:
            print(
                f"Resuming training from save. State: {config.state_load_path}"
            )
            loaded_epoch, loaded_it = model.load_model(
                generator_load_path=None,
                discriminator_load_path=None,
                state_load_path=config.state_load_path)
            print(f"Loaded epoch {loaded_epoch}, it {loaded_it}.")
            if loaded_it:
                start_epoch = loaded_epoch
                it = loaded_it

    # Create saving files.
    with open(config.training_data_file, "w") as data_file:
        num_labels = 3 + len(config.G_loss_scales.keys()) + len(
            config.D_loss_scales.keys())
        data_file.write(str(num_labels) + "\n")

        label_str = "it"
        label_str += ",G_total_loss"
        for key in config.G_loss_scales.keys():
            label_str += ",G_" + key + "_loss"
        label_str += ",D_total_loss"
        for key in config.D_loss_scales.keys():
            label_str += ",D_" + key + "_loss"
        data_file.write(label_str + "\n")

    with open(config.val_loss_metrics_file, "w") as data_file:
        num_labels = 3 + len(config.G_loss_scales.keys()) + len(
            config.D_loss_scales.keys()) + len(config.val_metrics) + 2
        data_file.write(str(num_labels) + "\n")

        label_str = "it"
        label_str += ",G_total_loss"
        for key in config.G_loss_scales.keys():
            label_str += ",G_" + key + "_loss"
        label_str += ",D_total_loss"
        for key in config.D_loss_scales.keys():
            label_str += ",D_" + key + "_loss"
        for metric in config.val_metrics:
            label_str += "," + metric
        label_str += ",D_HR_acc"
        label_str += ",D_SR_acc"
        data_file.write(label_str + "\n")

    with open(config.val_pred_file, "w") as data_file:
        data_file.write("")

    #with open(os.path.join(config.run_dir, "eval_debug.txt"), "w") as data_file:
    #    data_file.write("")

    with open(config.val_w_and_grad_file, "w") as data_file:
        data_file.write("26\n")
        data_file.write("it" + ",G_grad_start_mean" +
                        ",G_grad_start_abs_mean" + ",G_grad_start_variance" +
                        ",G_grad_mid_mean" + ",G_grad_mid_abs_mean" +
                        ",G_grad_mid_variance" + ",G_grad_end_mean" +
                        ",G_grad_end_abs_mean" + ",G_grad_end_variance" +
                        ",G_weight_start_abs_mean" +
                        ",G_weight_start_variance" + ",G_weight_mid_abs_mean" +
                        ",G_weight_mid_variance" + ",G_weight_end_abs_mean" +
                        ",G_weight_end_variance" + ",D_grad_start_mean" +
                        ",D_grad_start_abs_mean" + ",D_grad_start_variance" +
                        ",D_grad_end_mean" + ",D_grad_end_abs_mean" +
                        ",D_grad_end_variance" + ",D_weight_start_abs_mean" +
                        ",D_weight_start_variance" + ",D_weight_end_abs_mean" +
                        ",D_weight_end_variance" + "\n")

    # Training loop.
    for epoch in range(start_epoch, num_epochs + 1):
        print("--------------------------------")
        print(f"Beginning epoch number {epoch}.")
        for i, data in enumerate(dataloader_train):
            if it >= config.num_train_it:
                break
            it += 1

            model.G.train()
            model.D.train()

            #---------------------
            # Load training data.
            real_lr = data[0].to(config.device, dtype=torch.float)
            real_hr = data[1].to(config.device, dtype=torch.float)

            # Define labels for real and fake data
            current_batch_size = real_hr.size(0)
            real_labels, fake_labels = get_labels(current_batch_size)
            real_labels = real_labels.to(config.device).squeeze()
            fake_labels = fake_labels.to(config.device).squeeze()

            # Forward pass through generator.
            fake_hr = model.G(real_lr)

            # For storing training data.
            training_data_str = ""

            #---------------------
            # Train generator.
            if it % config.d_g_train_ratio == 0:
                for param in model.D.parameters():
                    param.required_grad = False
                #for param in model.G.parameters():
                #    param.required_grad = True
                model.G.zero_grad()

                # Forward pass through discriminator.
                if config.use_inst_noise:
                    real_pred = model.D(
                        real_hr +
                        instance_noise(0.1, real_hr.size(), it,
                                       config.num_train_it).to(config.device)
                    ).squeeze().detach()  # Detach to avoid backprop through D.
                    fake_pred = model.D(fake_hr + instance_noise(
                        0.1, fake_hr.size(), it, config.num_train_it).to(
                            config.device)).squeeze()
                else:
                    real_pred = model.D(real_hr).squeeze().detach(
                    )  # Detach to avoid backprop through D.
                    fake_pred = model.D(fake_hr).squeeze(
                    )  # Squeeze to go from shape [batch_sz, 1] to [batch_sz].

                # Compute generator loss.
                loss_G, loss_dict_G = get_G_loss(real_hr, fake_hr, real_pred,
                                                 fake_pred, real_labels,
                                                 fake_labels)

                # Do backpropagation using generator loss.
                loss_G.backward()

                # Make optimization step for generator.
                model.optimizer_G.step()

                # TODO: Maybe save some loss information and other stuff here.
                if it % config.print_train_loss_period == 0:
                    training_data_str += ("," + str(loss_G.item()))
                    for key in config.G_loss_scales.keys(
                    ):  # Use the keys of this dict to ensure same order of access across iterations.
                        training_data_str += ("," +
                                              str(loss_dict_G[key].item()))

                    #print(f"Generator losses for iteration {it}.")
                    #print("Total generator loss:", loss_G)
                    #print("I am trying to write to tensorboard now.")
                    #loss_dict_G["total"] = loss_G
                    #tb_train_writer.add_scalars('G_losses_train', loss_dict_G, it)
                    #for key in loss_dict_G.keys():
                    #    print("Generator loss ", key, ": ", loss_dict_G[key], sep="")
                    # for hist_name, val in hist_vals.items():
                    #    tb_writer.add_histogram(f"data/hist/{hist_name}", val, it)

            #---------------------
            # Train discriminator.
            for param in model.D.parameters():
                param.requires_grad = True

            #for param in model.G.parameters():
            #    param.requires_grad = False
            # TODO: Why not set requires_grad to False for generator parameters? Because then it cannot be printed during validation.

            model.D.zero_grad()

            # Forward pass through discriminator.
            if config.use_inst_noise:
                real_pred = model.D(
                    real_hr + instance_noise(0.1, real_hr.size(
                    ), it, config.num_train_it).to(config.device)).squeeze(
                    )  # Squeeze to go from shape [batch_sz, 1] to [batch_sz].
                fake_pred = model.D(
                    fake_hr.detach() +
                    instance_noise(0.1, real_hr.size(), it,
                                   config.num_train_it).to(config.device)
                ).squeeze()  # Detach to avoid backprop through G.
            else:
                real_pred = model.D(real_hr).squeeze(
                )  # Squeeze to go from shape [batch_sz, 1] to [batch_sz].
                fake_pred = model.D(fake_hr.detach()).squeeze(
                )  # Detach to avoid backprop through G.

            # Compute discriminator loss.
            loss_D, loss_dict_D = get_D_loss(real_pred, fake_pred, real_labels,
                                             fake_labels)

            # Do backpropagation using discriminator loss.
            loss_D.backward()

            # Make optimization step for discriminator.
            model.optimizer_D.step()

            # TODO: Maybe save some loss information and other stuff here.
            if it % config.print_train_loss_period == 0:
                training_data_str += ("," + str(loss_D.item()))
                for key in config.D_loss_scales.keys(
                ):  # Use the keys of this dict to ensure same order of access across iterations.
                    training_data_str += ("," + str(loss_dict_D[key].item()))
                #print(f"Discriminator losses for iteration {it}.")
                #print("Total discriminator loss:", loss_D)
                #loss_dict_D["total"] = loss_D
                #tb_train_writer.add_scalars('D_losses_train', loss_dict_D, it)
                #for key in loss_dict_D.keys():
                #    print("Discriminator loss ", key, ": ", loss_dict_D[key], sep="")

                #print("Real pred:", real_pred.cpu().detach().numpy())
                #print("Fake pred:", fake_pred.cpu().detach().numpy())
                #print("Real labels:", real_labels.cpu().detach().numpy())
                #print("Fake labels:", fake_labels.cpu().detach().numpy())
                #print("D_loss:", loss_D.item())

            # Update learning rate schedulers.
            if i > 0:
                for s in model.schedulers:
                    s.step()

            #---------------------
            # Save model.
            if it % config.save_model_period == 0:
                print(f"saving model (it {it})")
                model.save_model(epoch, it)

            #---------------------
            # Store training data.
            if it % config.print_train_loss_period == 0:
                with open(config.training_data_file, "a") as data_file:
                    data_file.write(str(it) + training_data_str + "\n")

            #---------------------
            # Validation.
            if it % config.val_period == 0:

                G_grad_start = model.G.network[0].weight.grad.detach()
                G_grad_mid = model.G.network[1].module[
                    7].RDB2.conv1.weight.grad.detach(
                    )  # First index chooses the skip_block, second index chooses 8th RRDB
                G_grad_end = model.G.network[-1].weight.grad.detach()
                G_weight_start = model.G.network[0].weight.detach()
                G_weight_mid = model.G.network[1].module[
                    7].RDB2.conv1.weight.detach()
                G_weight_end = model.G.network[-1].weight.detach()

                G_grad_start_mean = G_grad_start.mean().item()
                G_grad_start_abs_mean = (torch.abs(G_grad_start)).mean().item()
                G_grad_start_variance = G_grad_start.var(unbiased=False).item()

                G_grad_mid_mean = G_grad_mid.mean().item()
                G_grad_mid_abs_mean = (torch.abs(G_grad_mid)).mean().item()
                G_grad_mid_variance = G_grad_mid.var(unbiased=False).item()

                G_grad_end_mean = G_grad_end.mean().item()
                G_grad_end_abs_mean = (torch.abs(G_grad_end)).mean().item()
                G_grad_end_variance = G_grad_end.var(unbiased=False).item()

                G_weight_start_abs_mean = (
                    torch.abs(G_weight_start)).mean().item()
                G_weight_start_variance = G_weight_start.var(
                    unbiased=False).item()

                G_weight_mid_abs_mean = (torch.abs(G_weight_mid)).mean().item()
                G_weight_mid_variance = G_weight_mid.var(unbiased=False).item()

                G_weight_end_abs_mean = (torch.abs(G_weight_end)).mean().item()
                G_weight_end_variance = G_weight_end.var(unbiased=False).item()

                D_grad_start = model.D.features[0].block[0].weight.grad.detach(
                )
                D_grad_end = model.D.classifier[-1].weight.grad.detach()
                D_weight_start = model.D.features[0].block[0].weight.detach()
                D_weight_end = model.D.classifier[-1].weight.detach()

                D_grad_start_mean = D_grad_start.mean().item()
                D_grad_start_abs_mean = (torch.abs(D_grad_start)).mean().item()
                D_grad_start_variance = D_grad_start.var(unbiased=False).item()

                D_grad_end_mean = D_grad_end.mean().item()
                D_grad_end_abs_mean = (torch.abs(D_grad_end)).mean().item()
                D_grad_end_variance = D_grad_end.var(unbiased=False).item()

                D_weight_start_abs_mean = (
                    torch.abs(D_weight_start)).mean().item()
                D_weight_start_variance = D_weight_start.var(
                    unbiased=False).item()

                D_weight_end_abs_mean = (torch.abs(D_weight_end)).mean().item()
                D_weight_end_variance = D_weight_end.var(unbiased=False).item()

                val_data_str = "," + str(G_grad_start_mean) \
                             + "," + str(G_grad_start_abs_mean) \
                             + "," + str(G_grad_start_variance) \
                             + "," + str(G_grad_mid_mean) \
                             + "," + str(G_grad_mid_abs_mean) \
                             + "," + str(G_grad_mid_variance) \
                             + "," + str(G_grad_end_mean) \
                             + "," + str(G_grad_end_abs_mean) \
                             + "," + str(G_grad_end_variance) \
                             + "," + str(G_weight_start_abs_mean) \
                             + "," + str(G_weight_start_variance) \
                             + "," + str(G_weight_mid_abs_mean) \
                             + "," + str(G_weight_mid_variance) \
                             + "," + str(G_weight_end_abs_mean) \
                             + "," + str(G_weight_end_variance) \
                             + "," + str(D_grad_start_mean) \
                             + "," + str(D_grad_start_abs_mean) \
                             + "," + str(D_grad_start_variance) \
                             + "," + str(D_grad_end_mean) \
                             + "," + str(D_grad_end_abs_mean) \
                             + "," + str(D_grad_end_variance) \
                             + "," + str(D_weight_start_abs_mean) \
                             + "," + str(D_weight_start_variance) \
                             + "," + str(D_weight_end_abs_mean) \
                             + "," + str(D_weight_end_variance)

                with open(config.val_w_and_grad_file, "a") as data_file:
                    data_file.write(str(it) + val_data_str + "\n")

                model.G.eval()
                model.D.eval()
                num_visuals = 0
                if it % config.val_visual_period == 0:
                    num_visuals = config.num_val_visualizations
                evaluate.evaluate(model=model,
                                  do_val=True,
                                  do_test=False,
                                  num_visuals=num_visuals,
                                  comparisons=config.val_comparisons,
                                  metrics=config.val_metrics,
                                  call_tag='val_it_' + str(it),
                                  train_it=it,
                                  tb_writer=tb_eval_writer)

    tb_train_writer.close()
    tb_eval_writer.close()


#----------------------------------------------------------------------------
def simulation_test(cfg, model, num):
    model.net.eval()

    # Get target temperature profile of last validation example. This will be IC for test simulation.
    _, dataset_val, dataset_test = load_datasets(cfg, False, True, True)

    # Get stats used for normalization/unnormalization.
    stats = dataset_test[:6][3].detach().numpy()

    unc_mean = stats[0]
    unc_std = stats[3]
    ref_mean = stats[1]
    ref_std = stats[4]
    src_mean = stats[2]
    src_std = stats[5]

    # Use last reference data example of validation set as IC for test simulation.
    IC = util.z_unnormalize(dataset_val[-1][0].detach().numpy(), ref_mean,
                            ref_std)

    L2_errors_unc = np.zeros(cfg.N_test_examples)
    L2_errors_cor = np.zeros(cfg.N_test_examples)

    plot_steps = cfg.profile_save_steps
    plot_data_dict = {
        'x': cfg.nodes_coarse,
        'unc': np.zeros((plot_steps.shape[0], cfg.nodes_coarse.shape[0])),
        'ref': np.zeros((plot_steps.shape[0], cfg.nodes_coarse.shape[0])),
        'cor': np.zeros((plot_steps.shape[0], cfg.nodes_coarse.shape[0]))
    }
    if cfg.model_is_hybrid and cfg.exact_solution_available:
        plot_data_dict['src'] = np.zeros(
            (plot_steps.shape[0], cfg.nodes_coarse.shape[0] - 2))
    plot_num = 0
    old_unc = IC
    old_cor = IC
    t0 = (cfg.train_examples_ratio + cfg.test_examples_ratio) * cfg.t_end
    diffs = []
    for i in range(cfg.N_test_examples):
        old_time = np.around(t0 + cfg.dt_coarse * i, decimals=10)
        new_time = np.around(t0 + cfg.dt_coarse * (i + 1), decimals=10)

        # new_unc  = new uncorrected profile given old uncorrected profile.
        # new_unc_ = new uncorrected profile given old   corrected profile.
        new_unc = physics.simulate(cfg.nodes_coarse, cfg.faces_coarse, old_unc,
                                   cfg.get_T_a, cfg.get_T_b, cfg.get_k_approx,
                                   cfg.get_cV, cfg.rho,
                                   cfg.A, cfg.get_q_hat_approx,
                                   np.zeros(cfg.N_coarse), cfg.dt_coarse,
                                   old_time, new_time, False)
        new_unc_ = torch.from_numpy(
            util.z_normalize(
                physics.simulate(cfg.nodes_coarse, cfg.faces_coarse, old_cor,
                                 cfg.get_T_a, cfg.get_T_b, cfg.get_k_approx,
                                 cfg.get_cV, cfg.rho,
                                 cfg.A, cfg.get_q_hat_approx,
                                 np.zeros(cfg.N_coarse), cfg.dt_coarse,
                                 old_time, new_time, False), unc_mean,
                unc_std))

        new_cor = np.zeros_like(old_cor)
        if cfg.model_is_hybrid:
            new_src = util.z_unnormalize(
                model.net(new_unc_).detach().numpy(), src_mean, src_std)
            new_cor = physics.simulate(cfg.nodes_coarse, cfg.faces_coarse,
                                       old_cor, cfg.get_T_a, cfg.get_T_b,
                                       cfg.get_k_approx, cfg.get_cV, cfg.rho,
                                       cfg.A, cfg.get_q_hat_approx, new_src,
                                       cfg.dt_coarse, old_time, new_time,
                                       False)
        else:
            new_cor[0] = cfg.get_T_a(new_time)  # Since BCs are not ...
            new_cor[-1] = cfg.get_T_b(new_time)  # predicted by the NN.
            new_cor[1:-1] = util.z_unnormalize(
                model.net(new_unc_).detach().numpy(), ref_mean, ref_std)

        #lin_unc = lambda x: util.linearize_between_nodes(x, cfg.nodes_coarse, new_unc)
        #lin_cor = lambda x: util.linearize_between_nodes(x, cfg.nodes_coarse, new_cor)

        if cfg.exact_solution_available:
            #ref_func = lambda x: cfg.get_T_exact(x, new_time)
            new_ref = cfg.get_T_exact(cfg.nodes_coarse, new_time)
        else:
            new_ref_tensor = dataset_test[i][1]
            new_ref = util.z_unnormalize(new_ref_tensor.detach().numpy(),
                                         ref_mean, ref_std)
            #ref_func = lambda x: util.linearize_between_nodes(x, cfg.nodes_coarse, new_ref)

        #ref_norm = util.get_L2_norm(cfg.faces_coarse, ref_func)
        #unc_error_norm = util.get_L2_norm(cfg.faces_coarse, lambda x: lin_unc(x) - ref_func(x)) / ref_norm
        #cor_error_norm = util.get_L2_norm(cfg.faces_coarse, lambda x: lin_cor(x) - ref_func(x)) / ref_norm
        ref_norm = util.get_disc_L2_norm(new_ref)
        unc_error_norm = util.get_disc_L2_norm(new_unc - new_ref) / ref_norm
        cor_error_norm = util.get_disc_L2_norm(new_cor - new_ref) / ref_norm

        L2_errors_unc[i] = unc_error_norm
        L2_errors_cor[i] = cor_error_norm

        if i in plot_steps:
            plot_data_dict['unc'][plot_num] = new_unc
            plot_data_dict['ref'][plot_num] = new_ref
            plot_data_dict['cor'][plot_num] = new_cor
            if cfg.model_is_hybrid and cfg.exact_solution_available:
                plot_data_dict['src'][plot_num] = new_src
            plot_data_dict['time'][plot_num] = new_time
            plot_num += 1

        if i % 10 == 0 and i <= 50:
            diffs.append(np.asarray(new_cor - new_ref))

        old_unc = new_unc
        old_cor = new_cor

    error_dict = {'unc': L2_errors_unc, 'cor': L2_errors_cor}

    plt.figure()
    for i in range(len(diffs)):
        plt.plot(cfg.nodes_coarse, diffs[i], label=str(10 * i))
    plt.grid()
    plt.legend()
    plt.savefig(os.path.join(cfg.run_dir, "differences.pdf"),
                bbox_inches='tight')
    plt.close()

    return error_dict, plot_data_dict
Exemple #25
0
                        help='random crop size',
                        default=128,
                        type=int)

    return parser.parse_args()


if __name__ == '__main__':
    """Trains Noise2Noise."""

    # Parse training parameters
    params = parse_args()

    # Train/valid datasets
    train_loader = load_datasets(params.train_dir,
                                 params.train_size,
                                 params.batch_size,
                                 params.noise_param,
                                 params.crop_size,
                                 shuffled=True)
    valid_loader = load_datasets(params.valid_dir,
                                 params.valid_size,
                                 params.batch_size,
                                 params.noise_param,
                                 params.crop_size,
                                 shuffled=False)

    # Initialize model and train
    n2n = TextRemoval(params, trainable=True)
    n2n.train(train_loader, valid_loader)
Exemple #26
0
@Description: 
'''
from datasets import load_datasets
from net import NetWork
from utils import get_num_correct
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
import torchvision
from torch.utils.data.distributed import DistributedSampler

tb = SummaryWriter()
net = NetWork()
optimizer = optim.Adam(net.parameters(), lr=0.01)
train_loader = load_datasets(1)
# images, labels = next(iter(train_loader))
# grid = torchvision.utils.make_grid(images)
# tb.add_image("image", grid)
# tb.add_graph(net, grid)
torch.distributed.init_process_group(backend='nccl')
# net = torch.nn.parallel.DistributedDataParallel(net)
local_rank = torch.distributed.get_rank()

epoch = 6
for e in range(epoch):
    total_loss = 0
    total_correct = 0
    for batch in train_loader:
        images, labels = batch
        preds = net(images)
Exemple #27
0
mc = args.mc

#dataset = 'pcmac'
#iteration = '0 1 2 3 4'
#generation = 'first'
#mc = 1000
#datasize = 0.3
#save = True
#verbose = True

if __name__ == '__main__':
    data_path = os.path.join(result_path, dataset)
    if not os.path.isdir(data_path):
        os.makedirs(data_path)

    X, Y = load_datasets(dataset)
    X, Y = X.values, Y.values
    if datasize is not None:
        X, _, Y, _ = train_test_split(X,
                                      Y,
                                      train_size=datasize,
                                      stratify=Y,
                                      random_state=seed)

    scaler = MinMaxScaler()
    X = scaler.fit_transform(X)

    m, n = X.shape

    for id_iter in iterations:
        print("DATASET: {0}, ITER: {1}".format(dataset, str(id_iter)))
Exemple #28
0
def teaching():
    plt.close('all')
    dataset_root = '../../data/'
    datasets = ['blobs_2_class', '2d_outlier', 'blobs_3_class', '3blobs',
                'iris', 'breast_cancer', 'wine',
                'oct', 'butterflies_crop', 'chinese_chars', 'chinese_chars_crowd', 'butterflies_crop_monarch',
        'chinese_chars_binary', 'woodpecker']
    dataset_name = datasets[11]

    experiment_id = 4
    num_teaching_itrs = 10
    num_random_test_ims = 20
    num_init_hyps = 200
    density_sigma = 1.0
    interp_sigma = 1.0
    dist_sigma = 0.001 #beta in AAAI21 paper
    alpha = 0.5
    phi = 3  # eta in AAAI21 paper
    beta = 1  # gamma in AAAI21 paper
    image_scale = 2.0
    hyp_type = 'rand'  # rand, cluster, cluster_rand, sparse
    dataset_dir = dataset_root + dataset_name + '/'
    url_root = ''  # set this to the location of the images on the web

    save_ops = False
    add_bias = True
    remove_mean = True
    do_pca = False
    pca_dims = 2


    op_dir = 'output/' + str(experiment_id) + '/'
    if save_ops:
        print('saving output to', op_dir)
        if not os.path.isdir(op_dir):
            os.makedirs(op_dir)

    # load data
    dataset_train, dataset_test = ds.load_datasets(dataset_name, dataset_dir, do_pca, pca_dims, add_bias, remove_mean, density_sigma, interp_sigma, dist_sigma)
    if len(np.unique(dataset_train['Y'])) > 2:
        one_v_all = True  # multi class
    else:
        one_v_all = False # binary


    # generate set of hypotheses
    hyps, prior_h = hp.generate_hyps(dataset_train, alpha, num_init_hyps, hyp_type, one_v_all)
    print(len(hyps), hyp_type, 'hypotheses\n')

    # remove examples that are inconsistent with best hypothesis
    if one_v_all:
        err_hyp = hp.compute_hyps_error_one_vs_all(hyps, dataset_train['X'], dataset_train['Y'], alpha)
    else:
        err_hyp = hp.compute_hyps_error(hyps, dataset_train['X'], dataset_train['Y'], alpha)
    dataset_train = ds.remove_exs(dataset_train, hyps, err_hyp, alpha, 'train', one_v_all)

    # re compute hypothesis errors - after removing inconsistent examples
    if one_v_all:
        err_hyp = hp.compute_hyps_error_one_vs_all(hyps, dataset_train['X'], dataset_train['Y'], alpha)
        err_hyp_test = hp.compute_hyps_error_one_vs_all(hyps, dataset_test['X'], dataset_test['Y'], alpha)
    else:
        err_hyp = hp.compute_hyps_error(hyps, dataset_train['X'], dataset_train['Y'], alpha)
        err_hyp_test = hp.compute_hyps_error(hyps, dataset_test['X'], dataset_test['Y'], alpha)

    # compute the likelihood for each datapoint according to each hypothesis
    if one_v_all:
        likelihood  = ut.compute_likelihood_one_vs_all(hyps, dataset_train['X'], dataset_train['Y'], alpha)
    else:
        likelihood = ut.compute_likelihood(hyps, dataset_train['X'], dataset_train['Y'], alpha)

    # teachers
    teachers = {}
    if one_v_all:
        teachers['rand_1vall'] = teach.RandomImageTeacherOneVsAll(dataset_train, alpha, prior_h)
        teachers['strict_1vall'] = teach.StrictTeacherOneVsAll(dataset_train, alpha, prior_h)
        teachers['explain_1vall'] = teach.ExplainTeacherOneVsAll(dataset_train, alpha, prior_h)
    else:
        teachers['RANDOM'] = teach.RandomImageTeacher(dataset_train, alpha, prior_h)
        teachers['STRICT'] = teach.StrictTeacher(dataset_train, alpha, prior_h)
        teachers['EXPLAIN'] = teach.ExplainTeacher(dataset_train, alpha, prior_h)
        teachers['ALTA'] = teach.ALTATeacher(dataset_train, alpha, prior_h, phi, beta)

    # run teaching
    for alg_name in list(teachers.keys()):
        print(alg_name)
        teachers[alg_name].run_teaching(num_teaching_itrs, dataset_train, likelihood, hyps, err_hyp, err_hyp_test)

    return teachers
def single_step_test(cfg, model, num):
    if cfg.model_name[:8] == "Ensemble":
        for m in range(len(model.nets)):
            model.nets[m].net.eval()
    else:
        model.net.eval()

    _, _, dataset_test = load_datasets(cfg, False, False, True)

    unc_tensor = dataset_test[:][0].detach()
    ref_tensor = dataset_test[:][1].detach()
    stats = dataset_test[:6][3].detach().numpy()
    ICs = dataset_test[:][4].detach().numpy()
    times = dataset_test[:][5].detach().numpy()

    unc_mean = stats[0]
    unc_std = stats[3]
    ref_mean = stats[1]
    ref_std = stats[4]
    src_mean = stats[2]
    src_std = stats[5]

    unc = util.z_unnormalize(unc_tensor.numpy(), unc_mean, unc_std)
    ref = util.z_unnormalize(ref_tensor.numpy(), unc_mean, unc_std)

    L2_errors_unc = np.zeros(cfg.N_test_examples)
    L2_errors_cor = np.zeros(cfg.N_test_examples)

    num_profile_plots = cfg.profile_save_steps.shape[0]
    plot_data_dict = {
        'x': cfg.nodes_coarse,
        'unc': np.zeros((num_profile_plots, cfg.nodes_coarse.shape[0])),
        'ref': np.zeros((num_profile_plots, cfg.nodes_coarse.shape[0])),
        'cor': np.zeros((num_profile_plots, cfg.nodes_coarse.shape[0])),
        'time': np.zeros(num_profile_plots)
    }
    if cfg.model_is_hybrid and cfg.exact_solution_available:
        plot_data_dict['src'] = np.zeros(
            (num_profile_plots, cfg.nodes_coarse.shape[0] - 2))
    for i in range(cfg.N_test_examples):
        old_time = np.around(times[i] - cfg.dt_coarse, decimals=10)
        new_time = np.around(times[i], decimals=10)
        #print("New time:", new_time)

        new_unc = unc[i]
        new_unc_tensor = torch.unsqueeze(unc_tensor[i], 0)
        IC = ICs[
            i]  # The profile at old_time which was used to generate new_unc, which is a profile at new_time.

        new_cor = np.zeros_like(new_unc)
        if cfg.model_name[:8] == "Ensemble":
            if cfg.model_is_hybrid:
                new_src = np.zeros(new_unc.shape[0] - 2)
                for m in range(len(model.nets)):
                    new_src[m] = util.z_unnormalize(
                        torch.squeeze(
                            model.nets[m].net(new_unc_tensor[:, m:m + 3]),
                            0).detach().numpy(), src_mean, src_std)
                new_cor = physics.simulate(
                    cfg.nodes_coarse, cfg.faces_coarse, IC, cfg.get_T_a,
                    cfg.get_T_b, cfg.get_k_approx, cfg.get_cV, cfg.rho, cfg.A,
                    cfg.get_q_hat_approx, new_src, cfg.dt_coarse, old_time,
                    new_time, False)
            else:
                new_cor[0] = cfg.get_T_a(new_time)  # Since BCs are not ...
                new_cor[-1] = cfg.get_T_b(new_time)  # predicted by the NN.
                for m in range(len(model.nets)):
                    new_cor[m + 1] = util.z_unnormalize(
                        torch.squeeze(
                            model.nets[m].net(new_unc_tensor[:, m:m + 3]),
                            0).detach().numpy(), src_mean, src_std)
        else:
            if cfg.model_is_hybrid:
                new_src = util.z_unnormalize(
                    torch.squeeze(model.net(new_unc_tensor),
                                  0).detach().numpy(), src_mean, src_std)
                new_cor = physics.simulate(
                    cfg.nodes_coarse, cfg.faces_coarse, IC, cfg.get_T_a,
                    cfg.get_T_b, cfg.get_k_approx, cfg.get_cV, cfg.rho, cfg.A,
                    cfg.get_q_hat_approx, new_src, cfg.dt_coarse, old_time,
                    new_time, False)
            else:
                new_cor[0] = cfg.get_T_a(new_time)  # Since BCs are not ...
                new_cor[-1] = cfg.get_T_b(new_time)  # predicted by the NN.
                new_cor[1:-1] = util.z_unnormalize(
                    model.net(unc_tensor).detach().numpy(), ref_mean, ref_std)

        #lin_unc = lambda x: util.linearize_between_nodes(x, cfg.nodes_coarse, new_unc)
        #lin_cor = lambda x: util.linearize_between_nodes(x, cfg.nodes_coarse, new_cor)

        if cfg.exact_solution_available:
            #ref_func = lambda x: cfg.get_T_exact(x, new_time)
            new_ref = cfg.get_T_exact(cfg.nodes_coarse, new_time)
        else:
            new_ref = util.z_unnormalize(ref_tensor[i].detach().numpy(),
                                         ref_mean, ref_std)
            #ref_func = lambda x: util.linearize_between_nodes(x, cfg.nodes_coarse, new_ref)

        #ref_norm = util.get_L2_norm(cfg.faces_coarse, ref_func)
        #unc_error_norm = util.get_L2_norm(cfg.faces_coarse, lambda x: lin_unc(x) - ref_func(x)) / ref_norm
        #cor_error_norm = util.get_L2_norm(cfg.faces_coarse, lambda x: lin_cor(x) - ref_func(x)) / ref_norm
        ref_norm = util.get_disc_L2_norm(new_ref)
        unc_error_norm = util.get_disc_L2_norm(new_unc - new_ref) / ref_norm
        cor_error_norm = util.get_disc_L2_norm(new_cor - new_ref) / ref_norm

        L2_errors_unc[i] = unc_error_norm
        L2_errors_cor[i] = cor_error_norm

        if i < num_profile_plots:
            plot_data_dict['unc'][i] = new_unc
            plot_data_dict['ref'][i] = new_ref
            plot_data_dict['cor'][i] = new_cor
            if cfg.model_is_hybrid and cfg.exact_solution_available:
                plot_data_dict['src'][i] = new_src
            plot_data_dict['time'][i] = new_time

    error_dict = {
        'unc': L2_errors_unc,
        'cor': L2_errors_cor,
        'unc_mean': np.mean(L2_errors_unc),
        'unc_std': np.std(L2_errors_unc),
        'cor_mean': np.mean(L2_errors_cor),
        'cor_std': np.std(L2_errors_cor)
    }

    return error_dict, plot_data_dict