plt.title('pool3 weights') if __name__ == '__main__': netParams = snn.params('network.yaml') # Define the cuda device to run the code on. device = torch.device('cuda') # deviceIds = [2, 3] # Create network instance. net = Network(netParams).to(device) # net = torch.nn.DataParallel(Network(netParams).to(device), device_ids=deviceIds) # Create snn loss instance. error = snn.loss(netParams, spikeLayer).to(device) # Define optimizer module. # optimizer = torch.optim.Adam(net.parameters(), lr = 0.01, amsgrad = True) optimizer = optimizer.Nadam(net.parameters(), lr=0.01, amsgrad=True) # Dataset and dataLoader instances. trainingSet = IBMGestureDataset( datasetPath=netParams['training']['path']['in'], sampleFile=netParams['training']['path']['train'], samplingTime=netParams['simulation']['Ts'], sampleLength=netParams['simulation']['tSample']) trainLoader = DataLoader(dataset=trainingSet, batch_size=4, shuffle=True, num_workers=1)
spikeLayer1 = self.slayer.spike(self.slayer.psp(self.fc1(spikeInput))) spikeLayer2 = self.slayer.spike(self.slayer.psp(self.fc2(spikeLayer1))) spikeLayer3 = self.slayer.spike(self.slayer.psp(self.fc3(spikeLayer2))) spikeLayer4 = self.slayer.spike(self.slayer.psp(self.fc4(spikeLayer3))) spikeLayer5 = self.slayer.spike(self.slayer.psp(self.fc5(spikeLayer4))) spikeLayer6 = self.slayer.spike(self.slayer.psp(self.fc6(spikeLayer5))) return spikeLayer6 # Definicia zariadenia, na ktorom sa bude vykonavat trenovania device = torch.device('cuda') # Vytvorenie instancie triedy Network a presunutie na zariadenie net = Network(netParams).to(device) # Vytvorenie instancie chyby error = snn.loss(netParams).to(device) # Definicia optimalizacneho algoritmu a parametrov optimizer = torch.optim.Adam(net.parameters(), lr = 0.001, amsgrad = False) # Vytvorenie instancii dataset, zvlast pre trenovaciu a testovaciu cast trainingSet = nmnistDataset(datasetPath =netParams['training']['path']['in'], sampleFile =netParams['training']['path']['train'], samplingTime=netParams['simulation']['Ts'], sampleLength=netParams['simulation']['tSample']) trainLoader = DataLoader(dataset=trainingSet, batch_size=8, shuffle=True, num_workers=4) testingSet = nmnistDataset(datasetPath =netParams['training']['path']['in'], sampleFile =netParams['training']['path']['test'], samplingTime=netParams['simulation']['Ts'], sampleLength=netParams['simulation']['tSample'])
model_args = { "params": params, "tact_input_size": 156 if args.fingers == "both" else 78, "vis_input_size": (50, 63, 2), "tact_output_size": 50, "vis_output_size": 10, "output_size": output_size, } device = torch.device("cuda") writer = SummaryWriter(".") net = model(**model_args).to(device) if args.loss == "NumSpikes": params["training"]["error"]["type"] = "NumSpikes" error = snn.loss(params).to(device) criteria = error.numSpikes elif args.loss == "WeightedNumSpikes": params["training"]["error"]["type"] = "WeightedNumSpikes" error = snn.loss(params).to(device) criteria = error.weightedNumSpikes optimizer = torch.optim.RMSprop(net.parameters(), lr=args.lr, weight_decay=0.5) train_dataset = ViTacDataset(path=args.data_dir, sample_file=f"train_80_20_{args.sample_file}.txt", output_size=output_size, spiking=True, mode=args.mode, fingers=args.fingers) train_loader = DataLoader(
if __name__ == '__main__': netParams = snn.params('network.yaml') # Define the cuda device to run the code on. device = torch.device('cuda') # deviceIds = [1, 2] # Create network instance. net = Network(netParams, 'emg', 'dvsCropped').to(device) # net = torch.nn.DataParallel(Network(netParams).to(device), device_ids=deviceIds) # Create snn loss instance. error = snn.loss(netParams, snn.loihi).to(device) # Define optimizer module. optimizer = optim.Nadam(net.parameters(), lr=0.01) # Dataset and dataLoader instances. trainingSet = fusionDataset( samples=np.loadtxt('train.txt').astype(int), samplingTime=netParams['simulation']['Ts'], sampleLength=netParams['simulation']['tSample'], # sampleLength=2000, ) testingSet = fusionDataset( samples=np.loadtxt('test.txt').astype(int), samplingTime=netParams['simulation']['Ts'],
model_args = { "params": params, "tact_input_size": 156, "vis_input_size": 50 * 63, "tact_output_size": 50, "vis_output_size": 10, "output_size": output_size, } device = torch.device("cuda") writer = SummaryWriter(".") net = model(**model_args).to(device) if args.loss == "NumSpikes": params["training"]["error"]["type"] = "NumSpikes" error = snn.loss(params, spikeLayer).to(device) criteria = error.numSpikes elif args.loss == "WeightedNumSpikes": params["training"]["error"]["type"] = "WeightedNumSpikes" error = snn.loss(params, spikeLayer).to(device) criteria = error.weightedNumSpikes optimizer = torch.optim.RMSprop(net.parameters(), lr=args.lr) train_dataset = ViTacDataset( path=args.data_dir, sample_file=f"train_80_20_{args.sample_file}.txt", output_size=output_size, spiking=True, mode=args.mode, loihi=True,
netParams = snn.params('network.yaml') print('vDecay:', netParams['neuron']['vDecay']) print('iDecay:', netParams['neuron']['iDecay']) # Define the cuda device to run the code on. device = torch.device('cuda:2') # deviceIds = [1, 2] # Create network instance. net = Network(netParams).to(device) # net = torch.nn.DataParallel(Network(netParams).to(device), device_ids=deviceIds) # Create snn loss instance. error = snn.loss(netParams, snn.loihi).to(device) # test for whole 2 seconds of data testLength = 200 netParamsTest = snn.params('network.yaml') netParamsTest['training']['error']['tgtSpikeRegion']['start'] = 0 netParamsTest['training']['error']['tgtSpikeRegion']['stop'] = testLength netParamsTest['training']['error']['tgtSpikeCount'][ False] *= testLength / netParams['simulation']['tSample'] netParamsTest['training']['error']['tgtSpikeCount'][ True] *= testLength / netParams['simulation']['tSample'] testError = snn.loss(netParamsTest, snn.loihi).to(device) # Define optimizer module. # optimizer = torch.optim.Adam(net.parameters(), lr = 0.01, amsgrad = True)