Exemplo n.º 1
0
def test_resample_r_off_r_on(debug_networks):
    networks = debug_networks
    for network in networks:
        with pytest.raises(Exception):
            patched_network = patch_model(
                copy.deepcopy(network),
                memristor_model=memtorch.bh.memristor.LinearIonDrift,
                memristor_model_params={
                    'r_off': memtorch.bh.StochasticParameter(loc=1, scale=0),
                    'r_on': memtorch.bh.StochasticParameter(loc=1, scale=0)
                },
                module_parameters_to_patch=[type(network.layer)],
                mapping_routine=naive_map,
                transistor=True,
                programming_routine=None,
                scheme=memtorch.bh.Scheme.SingleColumn)
        with pytest.raises(Exception):
            patched_network = patch_model(
                copy.deepcopy(network),
                memristor_model=memtorch.bh.memristor.LinearIonDrift,
                memristor_model_params={
                    'r_off': 1,
                    'r_on': 1
                },
                module_parameters_to_patch=[type(network.layer)],
                mapping_routine=naive_map,
                transistor=True,
                programming_routine=None,
                scheme=memtorch.bh.Scheme.SingleColumn)
Exemplo n.º 2
0
    def debug_patched_networks_(tile_shape, quant_method):
        if quant_method is not None:
            ADC_resolution = 8
        else:
            ADC_resolution = None

        networks = debug_networks
        device = torch.device("cpu" if "cpu" in
                              memtorch.__version__ else "cuda")
        patched_networks = []
        for network in networks:
            patched_networks.append(
                patch_model(
                    network,
                    memristor_model=memtorch.bh.memristor.LinearIonDrift,
                    memristor_model_params={"time_series_resolution": 0.1},
                    module_parameters_to_patch=[type(network.layer)],
                    mapping_routine=naive_map,
                    transistor=True,
                    programming_routine=None,
                    scheme=memtorch.bh.Scheme.SingleColumn,
                    tile_shape=tile_shape,
                    max_input_voltage=1.0,
                    ADC_resolution=ADC_resolution,
                    quant_method=quant_method,
                ))

        return patched_networks
Exemplo n.º 3
0
def test_networks():
    for network in networks:
        patched_network = patch_model(
            copy.deepcopy(network),
            memristor_model=memtorch.bh.memristor.LinearIonDrift,
            memristor_model_params={},
            module_parameters_to_patch=[type(network.layer)],
            mapping_routine=naive_map,
            transistor=True,
            programming_routine=None,
            scheme=memtorch.bh.Scheme.SingleColumn)
        patched_network.tune_()
Exemplo n.º 4
0
def test_schemes(debug_networks):
    networks = debug_networks
    for scheme in memtorch.bh.Scheme:
        for network in networks:
            patched_network = patch_model(copy.deepcopy(network),
                                          memristor_model=memtorch.bh.memristor.LinearIonDrift,
                                          memristor_model_params={},
                                          module_parameters_to_patch=[type(network.layer)],
                                          mapping_routine=naive_map,
                                          transistor=True,
                                          programming_routine=None,
                                          scheme=scheme)
            assert patched_network.layer.crossbars is not None
Exemplo n.º 5
0
def debug_patched_networks(debug_networks):
    networks = debug_networks
    device = torch.device('cpu' if 'cpu' in memtorch.__version__ else 'cuda')
    patched_networks = []
    for network in networks:
        patched_networks.append(
            patch_model(network,
                        memristor_model=memtorch.bh.memristor.LinearIonDrift,
                        memristor_model_params={},
                        module_parameters_to_patch=[type(network.layer)],
                        mapping_routine=naive_map,
                        transistor=True,
                        programming_routine=None,
                        scheme=memtorch.bh.Scheme.SingleColumn))

    return patched_networks
Exemplo n.º 6
0
def test_networks(debug_networks, tile_shape, quant_method):
    networks = debug_networks
    if quant_method is not None:
        ADC_resolution = 8
    else:
        ADC_resolution = None

    for network in networks:
        patched_network = patch_model(copy.deepcopy(network),
                                      memristor_model=memtorch.bh.memristor.LinearIonDrift,
                                      memristor_model_params={},
                                      module_parameters_to_patch=[type(network.layer)],
                                      mapping_routine=naive_map,
                                      transistor=True,
                                      programming_routine=None,
                                      scheme=memtorch.bh.Scheme.SingleColumn,
                                      tile_shape=tile_shape,
                                      max_input_voltage=1.0,
                                      ADC_resolution=ADC_resolution,
                                      quant_method=quant_method)
        patched_network.tune_()
        patched_network.disable_legacy()
r_off = 300000
reference_memristor_params = {
    'time_series_resolution': 1e-10,
    'r_off': r_off,
    'r_on': r_on
}
model = MobileNetV2().to(device)
model.load_state_dict(torch.load('trained_model.pt'), strict=False)
model.eval()
patched_model = patch_model(
    model,
    memristor_model=reference_memristor,
    memristor_model_params=reference_memristor_params,
    module_parameters_to_patch=[torch.nn.Linear, torch.nn.Conv2d],
    mapping_routine=naive_map,
    transistor=True,
    programming_routine=None,
    scheme=memtorch.bh.Scheme.DoubleColumn,
    tile_shape=(128, 128),
    max_input_voltage=0.3,
    ADC_resolution=8,
    ADC_overflow_rate=0.,
    quant_method='linear')
del model
patched_model.tune_()
times_to_reprogram = 10**np.arange(1, 10, dtype=np.float64)
v_stop_values = np.linspace(1.3, 1.9, 10, endpoint=True)
df = pd.DataFrame(columns=['times_reprogramed', 'v_stop', 'test_set_accuracy'])
for time_to_reprogram in times_to_reprogram:
    cycle_count = time_to_reprogram
    for v_stop in v_stop_values:
        print('time_to_reprogram: %f, v_stop: %f' %
Exemplo n.º 8
0
from memtorch.mn.Module import patch_model
from memtorch.map.Parameter import naive_map
from memtorch.bh.crossbar.Program import naive_program

from conv_net import ConvNet
from model import Model

# Create new reference memristor
reference_memristor = memtorch.bh.memristor.VTEAM
reference_memristor_params = {"time_series_resolution": 1e-10}
memristor = reference_memristor(**reference_memristor_params)
memristor.plot_hysteresis_loop()

memristor_model = ConvNet()
memristor_model.load_state_dict(torch.load("model.ckpt"), strict=False)

patched_model = patch_model(copy.deepcopy(memristor_model),
                            memristor_model=reference_memristor,
                            memristor_model_params=reference_memristor_params,
                            module_parameters_to_patch=[torch.nn.Linear],
                            mapping_routine=naive_map,
                            transistor=True,
                            programming_routine=None,
                            tile_shape=(128, 128),
                            max_input_voltage=1.0,
                            ADC_resolution=8,
                            ADC_overflow_rate=0.,
                            quant_method='linear')

print("Hello world")