Exemple #1
0
def default_validate_gate(gate_base_dir, validation_processor_configs):
    model = torch.load(
        os.path.join(gate_base_dir, 'reproducibility', 'model.pt'),
        map_location=torch.device(TorchUtils.get_accelerator_type()))
    results = torch.load(
        os.path.join(gate_base_dir, 'reproducibility', 'results.pickle'),
        map_location=torch.device(TorchUtils.get_accelerator_type()))
    experiment_configs = load_configs(
        os.path.join(gate_base_dir, 'reproducibility', 'configs.yaml'))

    results_dir = init_dirs(gate_base_dir, is_main=True)

    criterion = manager.get_criterion(experiment_configs["algorithm"])

    waveform_transforms = transforms.Compose([
        PlateausToPoints(
            experiment_configs['processor']["data"]['waveform']
        ),  # Required to remove plateaus from training because the perceptron cannot accept less than 10 values for each gate
        PointsToPlateaus(validation_processor_configs["data"]["waveform"])
    ])

    validate_gate(model,
                  results,
                  validation_processor_configs,
                  criterion,
                  results_dir=results_dir,
                  transforms=waveform_transforms)
def data_loader(data_directory):
    config_path = os.path.join(data_directory, 'sampler_configs.json')
    configs = load_configs(config_path)
    data_path = os.path.join(data_directory, 'IO.dat')
    data = np.loadtxt(data_path)
    inputs = data[:, :configs["input_data"]["input_electrodes"]]
    outputs = data[:, -configs["input_data"]["output_electrodes"]:]
    return inputs, outputs, configs
Exemple #3
0
def consistency_check(configs_path):

    configs = load_configs(configs_path)
    sampler = ConsistencyChecker(configs)

    outputs, deviations, correlation, deviation_chargeup = sampler.get_data()

    mean_output = np.mean(outputs, axis=0)
    std_output = np.std(outputs, axis=0)

    plt.figure()
    plt.plot(mean_output, 'k', label='mean over repetitions')
    plt.plot(mean_output + std_output, ':k')
    plt.plot(mean_output - std_output, ':k', label='stdev over repetitions')
    plt.plot(sampler.reference_outputs, 'r', label='reference signal')
    plt.title(
        f'Consistency over {sampler.configs_checker["repetitions"]} trials with same input'
    )
    plt.legend()
    plt.savefig(
        os.path.join(configs["path_to_reference_data"], 'consistency_check'))

    plt.figure()
    plt.plot(mean_output - sampler.reference_outputs,
             "b",
             label="mean - reference")
    plt.plot(std_output, ":k", label="stdev over repetitions")
    plt.plot(-std_output, ":k")
    plt.title("Difference Mean Signal and Reference Signal")
    plt.legend()
    plt.savefig(
        os.path.join(configs["path_to_reference_data"], 'diff_mean-ref'))

    plt.figure()
    plt.hist(deviations)
    plt.title("Deviations of Reference Signal")
    plt.savefig(
        os.path.join(configs["path_to_reference_data"], 'hist_deviations'))

    plt.figure()
    plt.plot(deviation_chargeup)
    plt.title("DEVIATIONS WHILE CHARGING UP")
    plt.savefig(
        os.path.join(configs["path_to_reference_data"],
                     'deviations_while_charging_up'))

    plt.show()
    print("DONE!")
Exemple #4
0
def validate_vcdim(vcdim_base_dir, validation_processor_configs, is_main=True):
    base_dir = init_dirs(vcdim_base_dir, is_main=is_main)
    dirs = [
        os.path.join(vcdim_base_dir, o) for o in os.listdir(vcdim_base_dir)
        if os.path.isdir(os.path.join(vcdim_base_dir, o))
    ]

    for d in dirs:
        if os.path.split(d)[1] != "validation":
            gate_dir = create_directory(
                os.path.join(base_dir,
                             d.split(os.path.sep)[-1]))
            model = torch.load(os.path.join(d, 'reproducibility', 'model.pt'),
                               map_location=torch.device(
                                   TorchUtils.get_accelerator_type()))
            results = torch.load(
                os.path.join(d, 'reproducibility', "results.pickle"),
                map_location=torch.device(TorchUtils.get_accelerator_type()))
            experiment_configs = load_configs(
                os.path.join(d, 'reproducibility', "configs.yaml"))
            #results_dir = init_dirs(d, is_main=is_main)

            criterion = manager.get_criterion(experiment_configs["algorithm"])

            waveform_transforms = transforms.Compose([
                PlateausToPoints(
                    experiment_configs['processor']["data"]['waveform']
                ),  # Required to remove plateaus from training because the perceptron cannot accept less than 10 values for each gate
                PointsToPlateaus(
                    validation_processor_configs["data"]["waveform"])
            ])

            # validate_gate(os.path.join(d, "reproducibility"), base_dir, is_main=False)
            validate_gate(model,
                          results,
                          validation_processor_configs,
                          criterion,
                          results_dir=gate_dir,
                          transforms=waveform_transforms,
                          is_main=False)
Exemple #5
0
    def __init__(self, configs):
        super().__init__(load_configs(configs['path_to_sampler_configs']))
        _, batch_size, _ = self.init_configs()
        self.configs_checker = configs
        path_to_file = os.path.join(
            self.configs_checker['path_to_reference_data'],
            self.configs_checker['reference_batch_name'])
        with np.load(path_to_file) as data:
            self.reference_outputs = data['outputs']
            self.reference_inputs = data['inputs'].T
        path_to_file = os.path.join(
            self.configs_checker['path_to_reference_data'],
            self.configs_checker['data_name'])
        with np.load(path_to_file) as data:
            self.chargingup_outputs = data['outputs']
            self.chargingup_inputs = data['inputs'].T
        self.nr_samples = len(self.reference_outputs)
        assert self.nr_samples % batch_size == 0, f"Batch length {batch_size} is not a multiple of the reference signal length {self.nr_samples}; possible data missmatch!"
        self.configs_checker['batch_size'] = batch_size

        self.results_filename = os.path.join(
            self.configs_checker['path_to_reference_data'],
            'consistency_results.npz')
    from torchvision import transforms

    from brainspy.utils import manager
    from bspytasks.boolean.logger import Logger
    from brainspy.utils.io import load_configs
    from brainspy.utils.transforms import (
        DataToTensor,
        DataPointsToPlateau,
        DataToVoltageRange,
    )
    from brainspy.processors.dnpu import DNPU

    V_MIN = [-1.2, -1.2]
    V_MAX = [0.6, 0.6]

    configs = load_configs("configs/boolean.yaml")
    data_transforms = transforms.Compose(
        [DataToVoltageRange(V_MIN, V_MAX, -1, 1), DataToTensor('cpu')]
    )

    waveform_transforms = transforms.Compose(
        [DataPointsToPlateau(configs["processor"]["data"]["waveform"])]
    )

    criterion = manager.get_criterion(configs["algorithm"])
    algorithm = manager.get_algorithm(configs["algorithm"])

    configs["current_dimension"] = 4
    results = vc_dimension_test(
        configs,
        DNPU,
Exemple #7
0
        PointsToPlateaus(validation_processor_configs["data"]["waveform"])
    ])

    validate_gate(model,
                  results,
                  validation_processor_configs,
                  criterion,
                  results_dir=results_dir,
                  transforms=waveform_transforms)


if __name__ == "__main__":
    from torchvision import transforms

    from brainspy.utils.io import load_configs
    from brainspy.utils.transforms import PointsToPlateaus, PlateausToPoints
    from brainspy.utils import manager
    from brainspy.utils.pytorch import TorchUtils

    validation_processor_configs = load_configs(
        "configs/defaults/processors/hw.yaml")

    capacity_base_dir = "tmp/TEST/output/boolean/capacity_test_2020_09_21_155613"
    vcdim_base_dir = '/home/unai/Documents/3-programming/brainspy-tasks/tmp/TEST/output/boolean/vc_dimension_4_2020_09_24_190737'
    gate_base_dir = '/home/unai/Documents/3-programming/brainspy-tasks/tmp/TEST/output/boolean/[0, 0, 0, 1]_2020_09_24_181148'

    #default_validate_gate(gate_base_dir, validation_processor_configs)
    validate_vcdim(vcdim_base_dir, validation_processor_configs)

    # validate_capacity(capacity_base_dir, validation_processor_configs)
        base_dir = os.path.join(base_dir, gate)
        create_directory(base_dir)
    return base_dir


if __name__ == "__main__":
    from torchvision import transforms

    from brainspy.utils.io import load_configs
    from brainspy.utils.transforms import PointsToPlateaus
    from brainspy.algorithms.modules.signal import fisher

    base_dir = "tmp/TEST/output/ring/ring_classification_gap_0.00625_2020_09_23_140014"
    model, results = load_reproducibility_results(base_dir)

    configs = load_configs("configs/ring.yaml")
    hw_processor_configs = load_configs("configs/defaults/processors/hw.yaml")
    waveform_transforms = transforms.Compose(
        [PointsToPlateaus(hw_processor_configs["data"]["waveform"])])

    results_dir = init_dirs(os.path.join(base_dir, "validation"))

    validate(
        model,
        results,
        hw_processor_configs,
        fisher,
        results_dir,
        transforms=waveform_transforms,
    )
Exemple #9
0
import matplotlib.pyplot as plt
from brainspy.utils.io import load_configs
from bspysmg.measurement.data.output.sampler_mgr import Sampler
from bspysmg.measurement.data.processing.postprocessing import post_process

CONFIGS = load_configs(
    'configs/sampling/sampling_configs_template_cdaq_to_cdaq.yaml')
sampler = Sampler(CONFIGS)
path_to_data = sampler.get_data()

INPUTS, OUTPUTS, INFO_DICT = post_process(path_to_data,
                                          clipping_value=[-110, 110])

print(f"max out {OUTPUTS.max()} max min {OUTPUTS.min()} shape {OUTPUTS.shape}")
plt.show()
Exemple #10
0
if __name__ == "__main__":
    from brainspy.utils.io import load_configs
    from bspysmg.model.data.outputs.train_model import generate_surrogate_model

    configs = load_configs("configs/training/smg_configs_template.yaml")

    generate_surrogate_model(configs)
Exemple #11
0
                        axs[i, j].set_ylabel('input (V)')
                        axs[i, j].set_xlabel('points', labelpad=1)
                        axs[i, j].set_title("Input Waveform")
                        axs[i, j].xaxis.grid(True)
                        axs[i, j].yaxis.grid(True)
        plt.show()


if __name__ == '__main__':

    from brainspy.utils.io import load_configs
    configs = {}
    configs['results_base_dir'] = 'tmp/tests/iv'
    configs['show_plots'] = True
    configs['devices'] = ['A', 'B', 'C', "D", 'E']
    configs['shape'] = 500  # length of the experiment
    configs['waveform'] = {}
    configs['waveform']['V_high'] = 0.75
    configs['waveform']['V_low'] = -0.75
    configs['waveform']['input_type'] = 'sine'
    configs['waveform']['time'] = 5
    configs['waveform']['direction'] = 'up'

    configs['processor'] = load_configs(
        'C:/Users/braml/Documents/Github/ring-example/processor_iv_curves.yaml'
    )

    suite = unittest.TestSuite()
    suite.addTest(IVtest(configs))
    unittest.TextTestRunner().run(suite)
Exemple #12
0
            'c_dnpu_output':
            self.dnpu_output.clone().detach(),
            'd_clamped_dnpu_output':
            self.clamped_dnpu_output.clone().detach(),
            'e_batch_norm_output':
            self.batch_norm_output.clone().detach()
        })


if __name__ == "__main__":
    from brainspy.utils.io import load_configs
    import matplotlib.pyplot as plt
    import time

    NODE_CONFIGS = load_configs(
        "/home/hruiz/Documents/PROJECTS/DARWIN/Code/packages/brainspy/brainspy-processors/configs/configs_nn_model.json"
    )
    node = DNPU(NODE_CONFIGS)
    # linear_layer = nn.Linear(20, 3).to(device=TorchUtils.get_accelerator_type())
    # dnpu_layer = DNPU_Channels([[0, 3, 4]] * 1000, node)
    linear_layer = nn.Linear(20,
                             300).to(device=TorchUtils.get_accelerator_type())
    dnpu_layer = DNPU_Layer([[0, 3, 4]] * 100, node)

    model = nn.Sequential(linear_layer, dnpu_layer)

    data = torch.rand((200, 20)).to(device=TorchUtils.get_accelerator_type())
    start = time.time()
    output = model(data)
    end = time.time()
Exemple #13
0
                        axs[i, j].set_ylabel('input (V)')
                        axs[i, j].set_xlabel('points', labelpad=1)
                        axs[i, j].set_title("Input Waveform")
                        axs[i, j].xaxis.grid(True)
                        axs[i, j].yaxis.grid(True)
        plt.show()


if __name__ == '__main__':

    from brainspy.utils.io import load_configs
    configs = {}
    configs['results_base_dir'] = 'tmp/tests/iv'
    configs['show_plots'] = True
    configs['devices'] = ['A']  #, 'B', 'C', "D", 'E']
    configs['shape'] = 500  # length of the experiment
    configs['waveform'] = {}
    configs['waveform']['V_high'] = 1.25
    configs['waveform']['V_low'] = -1.25
    configs['waveform']['input_type'] = 'sine'
    configs['waveform']['time'] = 5
    configs['waveform']['direction'] = 'up'

    configs['processor'] = load_configs(
        'C:/Users/bram/Documents/Github/examples-multiple-devices/processor_iv_curves.yaml'
    )

    suite = unittest.TestSuite()
    suite.addTest(IVtest(configs))
    unittest.TextTestRunner().run(suite)
Exemple #14
0
import torch
from brainspy.processors.simulation.model import NeuralNetworkModel
from brainspy.utils.io import load_configs
from brainspy.utils.pytorch import TorchUtils

from bspysmg.model.data.outputs.train_model import train_surrogate_model

#TorchUtils.force_cpu = True

CONFIGS = load_configs('configs/training/smg_configs_template.yaml')

MODEL = NeuralNetworkModel(CONFIGS['processor'])
OPTIMIZER = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                    MODEL.parameters()),
                             lr=CONFIGS['hyperparameters']['learning_rate'])
CRITERION = torch.nn.MSELoss()
train_surrogate_model(CONFIGS, MODEL, CRITERION, OPTIMIZER)
Exemple #15
0
    plt.xlabel("Accuracy values")
    plt.ylabel("Counts")
    plt.savefig(os.path.join(save_dir, "accuracy_histogram_" + label + "." + extension))

    if show_plots:
        plt.show()


if __name__ == "__main__":

    from torchvision import transforms

    from brainspy.utils import manager
    from brainspy.utils.io import load_configs
    from brainspy.utils.transforms import DataToTensor, DataToVoltageRange
    from brainspy.processors.dnpu import DNPU

    V_MIN = [-1.2, -1.2]
    V_MAX = [0.6, 0.6]

    transforms = transforms.Compose(
        [DataToVoltageRange(V_MIN, V_MAX, -1, 1), DataToTensor(torch.device('cpu'))]
    )

    configs = load_configs("configs/ring.yaml")

    criterion = manager.get_criterion(configs["algorithm"])
    algorithm = manager.get_algorithm(configs["algorithm"])

    search_solution(configs, DNPU, criterion, algorithm, transforms=transforms)
        print('Repeating the experiment...')
        count = 0
        while nr_samples > count * batch:
            indices = list(range(batch))
            if None in indices:
                indices = [index for index in indices if index is not None]
            yield indices
            count += 1


if __name__ == '__main__':

    from brainspy.utils.io import load_configs
    import matplotlib.pyplot as plt

    CONFIGS = load_configs('configs/sampling/sampling_configs_template.json')
    sampler = Sampler(CONFIGS)
    # CONFIGS = load_configs('configs/sampling/toy_sampling_configs_template.json')
    # sampler = Sampler(CONFIGS)
    # CONFIGS = load_configs('configs/sampling/toy_sampling_configs_template.json')
    # sampler = Repeater(CONFIGS)

    path_to_data = sampler.get_data()

    INPUTS, OUTPUTS, INFO_DICT = sampler.load_data(path_to_data)

    # OUTPUTS = OUTPUTS.reshape((-1,INFO_DICT['input_data']["batch_points"])).T
    plt.figure()
    plt.hist(OUTPUTS, 500)
    plt.show()
Exemple #17
0
import torch
from brainspy.processors.simulation.model import NeuralNetworkModel
from brainspy.utils.io import load_configs
from brainspy.utils.pytorch import TorchUtils

from bspysmg.model.data.outputs.train_model import train_surrogate_model

#TorchUtils.force_cpu = True

CONFIGS = load_configs(
    'configs/training/smg_configs_template_multiple_outputs.yaml')

MODEL = NeuralNetworkModel(CONFIGS['processor'])
OPTIMIZER = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                    MODEL.parameters()),
                             lr=CONFIGS['hyperparameters']['learning_rate'])
CRITERION = torch.nn.MSELoss()
train_surrogate_model(CONFIGS, MODEL, CRITERION, OPTIMIZER)
Exemple #18
0
    for i in range(len(configs['readout_channels'])):
        readout_channel_list.append(configs['readout_instrument'] + "/ai" +
                                    str(configs['readout_channels'][i]))

    return readout_channel_list


def get_mask(configs):
    if 'activation_channel_mask' in configs:
        return np.array(configs['activation_channel_mask'])
    else:
        return None


def add_uniquely(original_list, value):
    if value not in original_list:
        original_list.append(value)
    return original_list


if __name__ == "__main__":
    from brainspy.utils.io import load_configs

    configs = load_configs(
        '/home/unai/Documents/3-programming/brainspy-tasks/configs/defaults/processors/hw.yaml'
    )

    a, r, ins, vr = init_channel_data(configs['driver'])
    print(a)
    print(r)