예제 #1
0
def postprocess(configs,
                dataset,
                model,
                criterion,
                logger,
                node=None,
                waveform_transforms=None,
                save_dir=None,
                name="train"):
    results = {}
    with torch.no_grad():
        model.eval()
        inputs, targets = dataset[:]
        indices = torch.argsort(targets[:, 0], dim=0)
        inputs, targets = inputs[indices], targets[indices]
        if waveform_transforms is not None:
            inputs, targets = waveform_transforms([inputs, targets])
        if inputs.device != TorchUtils.get_accelerator_type():
            inputs = inputs.to(device=TorchUtils.get_accelerator_type())
        if targets.device != TorchUtils.get_accelerator_type():
            targets = targets.to(device=TorchUtils.get_accelerator_type())
        predictions = model(inputs)
        results["performance"] = criterion(predictions, targets)

    # results['gap'] = dataset.gap
    results["inputs"] = inputs
    results["targets"] = targets
    results["best_output"] = predictions
    results["accuracy"] = get_accuracy(
        predictions, targets, configs, node=node
    )  # accuracy(predictions.squeeze(), targets.squeeze(), plot=None, return_node=True)
    results["correlation"] = pearsons_correlation(predictions, targets)
    # results['accuracy_fig'] = plot_perceptron(results['accuracy'], save_dir, name=name)

    return results
예제 #2
0
    def is_equal_to_numpy(self):
        a = np.random.rand(25)
        b = np.random.rand(25)

        # Same matrix as torch.Tensor:
        at = torch.from_numpy(a)
        bt = torch.from_numpy(b)

        coef1 = np.corrcoef(a, b)
        coef2 = pearsons_correlation(at, bt)
        eq = np.allclose(coef1[0, 1], coef2.cpu().numpy())
        print("Numpy & Torch complex covariance results equal? > {}".format(eq))
        return eq
예제 #3
0
def postprocess(results,
                model,
                node_configs,
                logger=None,
                node=None,
                save_dir=None):
    if (torch.isnan(results["predictions"]).any()
            or torch.isinf(results["predictions"]).any()):
        print(
            "Nan values detected in the predictions. It is likely that the gradients of the model exploded. Skipping.."
        )
        results["veredict"] = False
        return results
    results["accuracy"] = get_accuracy(
        results["predictions"], results["targets"], node_configs, node
    )  # accuracy(predictions.squeeze(), targets.squeeze(), plot=None, return_node=True)
    results["correlation"] = pearsons_correlation(results["predictions"],
                                                  results["targets"])

    if (results["accuracy"]["accuracy_value"] / 100) >= results["threshold"]:
        results["veredict"] = True
    else:
        results["veredict"] = False
    results["summary"] = ("VC Dimension: " + str(len(results["targets"])) +
                          " Gate: " + results["gate"] + " Veredict: " +
                          str(results["veredict"]) +
                          "\n Accuracy (Simulation): " +
                          str(results["accuracy"]["accuracy_value"]) + "/" +
                          str(results["threshold"]))

    # results["results_fig"] =
    plot_results(results, save_dir)
    # results["performance_fig"] =
    plot_performance(results, save_dir=save_dir)
    # results["accuracy_fig"] =
    plot_perceptron(results["accuracy"], save_dir)
    print(results["summary"])
    # if logger is not None:
    #     logger.log.add_figure(
    #         "Results/VCDim" + str(len(results["targets"])) + "/" + results["gate"],
    #         results["results_fig"],
    #     )
    #     logger.log.add_figure(
    #         "Accuracy/VCDim" + str(len(results["targets"])) + "/" + results["gate"],
    #         results["accuracy_fig"],
    #     )
    return results
예제 #4
0
def _validate(model, results, criterion, hw_processor_configs):
    with torch.no_grad():
        model.hw_eval(hw_processor_configs)
        predictions = model(results["inputs"])
        results["performance"] = criterion(predictions, results["targets"])

    # results['gap'] = dataset.gap
    results["best_output"] = predictions
    print(
        f"Simulation accuracy {results['accuracy']['accuracy_value'].item()}: "
    )
    print(f"Hardware accuracy: ")
    results['accuracy'] = get_accuracy(
        predictions,
        results["targets"],
        configs=results['accuracy']['configs'],
        node=results['accuracy']['node']
    )  # accuracy(predictions.squeeze(), targets.squeeze(), plot=None, return_node=True)

    results["correlation"] = pearsons_correlation(predictions,
                                                  results["targets"])
    return results
예제 #5
0
def train(
    model,
    dataloaders,
    criterion,
    optimizer,
    configs,
    logger=None,
    save_dir=None,
    waveform_transforms=None,
    return_best_model=True,
):

    # Evolution loop
    looper = trange(configs["epochs"], desc="Initialising", leave=False)
    pool = optimizer.pool
    best_fitness = -np.inf
    best_correlation = -np.inf
    best_result_index = -1
    genome_history = []
    performance_history = []
    correlation_history = []
    clipping_value = model.get_clipping_value()
    with torch.no_grad():
        model.eval()
        for epoch in looper:
            inputs, targets = dataloaders[0].dataset[:]
            inputs, targets = process_data(waveform_transforms, inputs, targets)
            outputs, criterion_pool = evaluate_population(
                inputs, targets, pool, model, criterion, clipvalue=clipping_value
            )

            # log results
            current_best_index = torch.argmax(
                criterion_pool
            )  # Best output index ignoring nan values

            best_current_output = outputs[current_best_index]
            performance_history.append(
                criterion_pool[current_best_index].detach().cpu()
            )

            genome_history.append(pool[current_best_index].detach().cpu())
            correlation_history.append(
                pearsons_correlation(best_current_output, targets).detach().cpu()
            )
            looper.set_description(
                "  Gen: "
                + str(epoch + 1)
                + ". Max fitness: "
                + str(performance_history[-1].item())
                + ". Corr: "
                + str(correlation_history[-1].item())
            )
            if performance_history[-1] > best_fitness:
                best_fitness = performance_history[-1]
                best_result_index = epoch
                best_correlation = correlation_history[-1].detach().cpu()
                best_output = best_current_output.detach().cpu()
                model.set_control_voltages(genome_history[best_result_index])
                if save_dir is not None:
                    if model.is_hardware():
                        torch.save(model.state_dict(), os.path.join(save_dir, "model.pt"))
                    else:
                        torch.save(model, os.path.join(save_dir, "model.pt"))

            # Check if the best correlation has reached the desired threshold
            if best_correlation >= configs["stop_threshold"]:
                looper.set_description(
                    f"  STOPPED: Correlation {best_correlation} > {configs['stop_threshold']} stopping threshold. "
                )
                looper.close()
                # Close the model adequately if it is on hardware
                if model.is_hardware() and "close" in dir(model):
                    model.close()
                break

            pool = optimizer.step(criterion_pool)

        if return_best_model:  # Return the best model
            if model.is_hardware():
                model.load_state_dict(torch.load(os.path.join(save_dir, "model.pt")))
            else:
                model = torch.load(os.path.join(save_dir, "model.pt"))

        print("Best fitness: " + str(best_fitness.item()))
        return model, {
            "best_result_index": best_result_index,
            "genome_history": genome_history,
            "performance_history": [TorchUtils.get_tensor_from_list(performance_history), TorchUtils.get_tensor_from_list([])],
            "correlation_history": correlation_history,
            "best_output": best_output,
        }