Beispiel #1
0
def main():
    # Training settings
    parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
    parser.add_argument('--batch-size',
                        type=int,
                        default=64,
                        metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size',
                        type=int,
                        default=1000,
                        metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs',
                        type=int,
                        default=3,
                        metavar='N',
                        help='number of epochs to train (default: 14)')
    parser.add_argument('--lr',
                        type=float,
                        default=1,
                        metavar='LR',
                        help='learning rate (default: 1.0)')
    parser.add_argument('--gamma',
                        type=float,
                        default=0.7,
                        metavar='M',
                        help='Learning rate step gamma (default: 0.7)')
    parser.add_argument('--no-cuda',
                        action='store_true',
                        default=False,
                        help='disables CUDA training')
    parser.add_argument('--dry-run',
                        action='store_true',
                        default=False,
                        help='quickly check a single pass')
    parser.add_argument('--seed',
                        type=int,
                        default=1,
                        metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument(
        '--log-interval',
        type=int,
        default=10,
        metavar='N',
        help='how many batches to wait before logging training status')
    parser.add_argument('--save-model',
                        action='store_true',
                        default=False,
                        help='For Saving the current Model')
    parser.add_argument('--T',
                        type=int,
                        default=300,
                        metavar='N',
                        help='SNN time window')
    parser.add_argument('--resume',
                        type=str,
                        default=None,
                        metavar='RESUME',
                        help='Resume model from checkpoint')

    args = parser.parse_args()
    use_cuda = not args.no_cuda and torch.cuda.is_available()

    torch.manual_seed(args.seed)

    device = torch.device("cuda" if use_cuda else "cpu")

    kwargs = {'batch_size': args.batch_size}
    if use_cuda:
        kwargs.update({
            'num_workers': 1,
            'pin_memory': True,
            'shuffle': True
        }, )

    transform = transforms.Compose([
        transforms.ToTensor(),
        #transforms.Normalize((0.1307,), (0.3081,))
    ])
    dataset1 = datasets.MNIST('../data',
                              train=True,
                              download=True,
                              transform=transform)
    dataset2 = datasets.MNIST('../data', train=False, transform=transform)
    snn_dataset = SpikeDataset(dataset2, T=args.T)
    #print(type(dataset1[0][0]))
    train_loader = torch.utils.data.DataLoader(dataset1, **kwargs)
    test_loader = torch.utils.data.DataLoader(dataset2, **kwargs)
    #print(test_loader[0])
    snn_loader = torch.utils.data.DataLoader(snn_dataset, **kwargs)

    model = Net().to(device)
    snn_model = CatNet(args.T).to(device)

    if args.resume != None:
        load_model(torch.load(args.resume), model)
    for param_tensor in snn_model.state_dict():
        print(param_tensor, "\t", snn_model.state_dict()[param_tensor].size())
    optimizer = optim.Adadelta(model.parameters(), lr=args.lr)

    scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
    for epoch in range(1, args.epochs + 1):
        train(args, model, device, train_loader, optimizer, epoch)
        test(model, device, test_loader)
        scheduler.step()

    fuse_module(model)
    transfer_model(model, snn_model)
    test(snn_model, device, snn_loader)
Beispiel #2
0
def main():
    # Training settings
    parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
    parser.add_argument('--batch-size',
                        type=int,
                        default=2,
                        metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size',
                        type=int,
                        default=1000,
                        metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs',
                        type=int,
                        default=3,
                        metavar='N',
                        help='number of epochs to train (default: 14)')
    parser.add_argument('--lr',
                        type=float,
                        default=1,
                        metavar='LR',
                        help='learning rate (default: 1.0)')
    parser.add_argument('--gamma',
                        type=float,
                        default=0.7,
                        metavar='M',
                        help='Learning rate step gamma (default: 0.7)')
    parser.add_argument('--no-cuda',
                        action='store_true',
                        default=False,
                        help='disables CUDA training')
    parser.add_argument('--dry-run',
                        action='store_true',
                        default=False,
                        help='quickly check a single pass')
    parser.add_argument('--seed',
                        type=int,
                        default=1,
                        metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument(
        '--log-interval',
        type=int,
        default=10,
        metavar='N',
        help='how many batches to wait before logging training status')
    parser.add_argument('--save-model',
                        action='store_true',
                        default=False,
                        help='For Saving the current Model')
    parser.add_argument('--T',
                        type=int,
                        default=160,
                        metavar='N',
                        help='SNN time window')
    parser.add_argument('--resume',
                        type=str,
                        default=None,
                        metavar='RESUME',
                        help='Resume model from checkpoint')

    args = parser.parse_args()
    use_cuda = not args.no_cuda and torch.cuda.is_available()

    torch.manual_seed(args.seed)

    device = torch.device("cuda" if use_cuda else "cpu")

    f = np.load('dataeeg/eeg1201_r57_b_hamming_5s_2_c.npz')
    X_train_ = f['X_train_']
    y_train_ = f['y_train_']
    X_test = f['X_test']
    y_test = f['y_test']
    X_train_ = torch.FloatTensor(X_train_)
    y_train_ = torch.FloatTensor(y_train_)
    X_test = torch.FloatTensor(X_test)
    y_test = torch.FloatTensor(y_test)
    X_train_ = X_train_ * 3000 + (0.001) * torch.randn(
        len(X_train_), len(X_train_[0]), len(X_train_[0][0]),
        len(X_train_[0][0][0]))
    X_train_ = np.clip(X_train_, 0, 1)
    X_test = X_test * 3000
    X_test = np.clip(X_test, 0, 1)

    for i in range(4):
        X_train_ = torch.cat([X_train_, X_train_], axis=0)
        y_train_ = torch.cat([y_train_, y_train_], axis=0)

    torch_dataset_train = torch.utils.data.TensorDataset(X_train_, y_train_)
    torch_dataset_test = torch.utils.data.TensorDataset(X_test, y_test)
    snn_dataset = SpikeDataset(torch_dataset_test, T=args.T)
    train_loader = torch.utils.data.DataLoader(torch_dataset_train,
                                               shuffle=True,
                                               batch_size=512)
    test_loader = torch.utils.data.DataLoader(torch_dataset_test,
                                              shuffle=False,
                                              batch_size=64)
    snn_loader = torch.utils.data.DataLoader(snn_dataset,
                                             shuffle=False,
                                             batch_size=16)

    model = Net().to(device)
    snn_model = CatNet(args.T).to(device)

    if args.resume != None:
        load_model(torch.load(args.resume), model)

    optimizer = optim.Adadelta(model.parameters(), lr=args.lr)

    scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)

    Acc = 0
    for epoch in range(1, args.epochs + 1):
        train(args, model, device, train_loader, optimizer, epoch)
        test(model, device, train_loader)
        Acc_ = test(model, device, test_loader)
        if Acc_ > Acc:
            Acc = Acc_
            fuse_module(model)
            #torch.save(model.state_dict(), "eeg_1201_3_layers_final_1.pt")

        scheduler.step()
    print(Acc)
    fuse_module(model)
    test(model, device, test_loader)

    transfer_model(model, snn_model)
    test(snn_model, device, snn_loader)
Beispiel #3
0
def main():
    # Training settings
    parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
    parser.add_argument('--batch-size',
                        type=int,
                        default=64,
                        metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size',
                        type=int,
                        default=1000,
                        metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs',
                        type=int,
                        default=14,
                        metavar='N',
                        help='number of epochs to train (default: 14)')
    parser.add_argument('--lr',
                        type=float,
                        default=0.001,
                        metavar='LR',
                        help='learning rate (default: 1.0)')
    parser.add_argument('--gamma',
                        type=float,
                        default=0.7,
                        metavar='M',
                        help='Learning rate step gamma (default: 0.7)')
    parser.add_argument('--no-cuda',
                        action='store_true',
                        default=False,
                        help='disables CUDA training')
    parser.add_argument('--dry-run',
                        action='store_true',
                        default=False,
                        help='quickly check a single pass')
    parser.add_argument('--seed',
                        type=int,
                        default=3,
                        metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument(
        '--log-interval',
        type=int,
        default=10,
        metavar='N',
        help='how many batches to wait before logging training status')
    parser.add_argument('--save-model',
                        action='store_true',
                        default=False,
                        help='For Saving the current Model')
    parser.add_argument('--resume',
                        type=str,
                        default=None,
                        metavar='RESUME',
                        help='Resume model from checkpoint')
    parser.add_argument('--T',
                        type=int,
                        default=40,
                        metavar='N',
                        help='SNN time window')
    args = parser.parse_args()
    use_cuda = not args.no_cuda and torch.cuda.is_available()

    #torch.manual_seed(args.seed)

    device = torch.device("cuda" if use_cuda else "cpu")

    kwargs = {'batch_size': args.batch_size}
    if use_cuda:
        kwargs.update({
            'num_workers': 1,
            'pin_memory': True,
            'shuffle': True
        }, )

    path = './ecg_data_normalize_smoke_2c.npz'
    #path = './all_0810_paper_smoke_normalize_01.npz'
    f = np.load(path)
    train_x, train_y = f['x_train'], f['y_train']
    test_x, test_y = f['x_test'], f['y_test']
    """
    for i in range(len(train_x)):
        train_x[i] = minmaxscaler (train_x[i])

    for i in range(len(test_x)):
        test_x[i] = minmaxscaler (test_x[i])
    """

    y_test_ = test_y
    X_train_ = torch.FloatTensor(train_x)
    print(X_train_.shape)

    X_train_ = X_train_.reshape(-1, 1, 180, 1)
    X_train_ = torch.clamp(X_train_, min=0, max=1)
    X_train_ = torch.div(torch.ceil(torch.mul(X_train_, 8)), 8)
    y_train_ = torch.FloatTensor(train_y)

    X_test = torch.FloatTensor(test_x)
    X_test = X_test.reshape(-1, 1, 180, 1)
    X_test = torch.clamp(X_test, min=0, max=1)
    X_test = torch.div(torch.ceil(torch.mul(X_test, 4)), 4)

    y_test = torch.FloatTensor(test_y)

    torch_dataset_train = torch.utils.data.TensorDataset(X_train_, y_train_)
    torch_dataset_test = torch.utils.data.TensorDataset(X_test, y_test)
    snn_dataset = SpikeDataset(torch_dataset_test, T=args.T)

    train_loader = torch.utils.data.DataLoader(torch_dataset_train,
                                               shuffle=True,
                                               batch_size=256 * 3)
    test_loader = torch.utils.data.DataLoader(torch_dataset_test,
                                              shuffle=False,
                                              batch_size=64)
    snn_loader = torch.utils.data.DataLoader(snn_dataset,
                                             shuffle=False,
                                             batch_size=1)

    model = Net().to(device)
    snn_model = CatNet(args.T).to(device)

    if args.resume != None:
        load_model(torch.load(args.resume), model)
    optimizer = optim.Adadelta(model.parameters(), lr=args.lr)

    scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
    Acc = 0
    for epoch in range(1, args.epochs + 1):
        train(args, model, device, train_loader, optimizer, epoch)
        test(model, device, train_loader)
        Acc_ = test(model, device, test_loader)
        if Acc_ > Acc:
            Acc = Acc_
            torch.save(model.state_dict(), "ecg2c_1.pt")
        scheduler.step()

    fuse_module(model)
    transfer_model(model, snn_model)
    test(snn_model, device, snn_loader)
Beispiel #4
0
def main():
    # Training settings
    parser = argparse.ArgumentParser(description='PyTorch Cifar10 LeNet Example')
    parser.add_argument('--batch-size', type=int, default=128, metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs', type=int, default=14, metavar='N',
                        help='number of epochs to train (default: 14)')
    parser.add_argument('--lr', type=float, default=1e-5, metavar='LR',
                        help='learning rate (default: 1)')
    parser.add_argument('--gamma', type=float, default=0.7, metavar='M',
                        help='Learning rate step gamma (default: 0.7)')
    parser.add_argument('--no-cuda', action='store_true', default=False,
                        help='disables CUDA training')
    parser.add_argument('--dry-run', action='store_true', default=False,
                        help='quickly check a single pass')
    parser.add_argument('--seed', type=int, default=1, metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument('--log-interval', type=int, default=10, metavar='N',
                        help='how many batches to wait before logging training status')
    parser.add_argument('--save-model', action='store_true', default=False,
                        help='For Saving the current Model')
    parser.add_argument('--resume', type=str, default=None, metavar='RESUME',
                        help='Resume model from checkpoint')
    parser.add_argument('--T', type=int, default=500, metavar='N',
                        help='SNN time window')
    args = parser.parse_args()
    use_cuda = not args.no_cuda and torch.cuda.is_available()

    torch.manual_seed(args.seed)
    #device = torch.device("cpu")
    device = torch.device("cuda" if use_cuda else "cpu")

    train_loader, val_loader , val_dataset= data_loader()

    snn_dataset = SpikeDataset(val_dataset, T = args.T)
    snn_loader = torch.utils.data.DataLoader(snn_dataset, batch_size=1, shuffle=False)

    from models.vgg_imagenet import VGG,CatVGG

    model = VGG('VGG11',bias = True).to(device)
    #model.load_state_dict(torch.load("YOUR MODEL HERE.pt"), strict=False)
    
    snn_model = CatVGG('VGG11', args.T, bias = True).to(device)
    #for param_tensor in snn_model.state_dict():
    #    print(param_tensor, "\t", snn_model.state_dict()[param_tensor].size())
    
    src_dict_ = model.state_dict()
    dst_dict_ = snn_model.state_dict()

    if args.resume != None:
        load_model(torch.load(args.resume), snn_model)
        model.load_state_dict(torch.load(args.resume), strict=False)

    optimizer = optim.Adam(model.parameters(), lr=args.lr)
    scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
    for epoch in range(1, args.epochs + 1):
        train(args, model, device, train_loader, optimizer, epoch)
        test(model, device, train_loader)
        test(model, device, val_loader)
        scheduler.step()

    test(snn_model, device, snn_loader)
Beispiel #5
0
def main():
    # Training settings
    parser = argparse.ArgumentParser(description='PyTorch Cifar10 LeNet Example')
    parser.add_argument('--batch-size', type=int, default=64, metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs', type=int, default=14, metavar='N',
                        help='number of epochs to train (default: 14)')
    parser.add_argument('--lr', type=float, default=1e-5, metavar='LR',
                        help='learning rate (default: 1)')
    parser.add_argument('--gamma', type=float, default=0.7, metavar='M',
                        help='Learning rate step gamma (default: 0.7)')
    parser.add_argument('--no-cuda', action='store_true', default=False,
                        help='disables CUDA training')
    parser.add_argument('--dry-run', action='store_true', default=False,
                        help='quickly check a single pass')
    parser.add_argument('--seed', type=int, default=1, metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument('--log-interval', type=int, default=10, metavar='N',
                        help='how many batches to wait before logging training status')
    parser.add_argument('--save-model', action='store_true', default=False,
                        help='For Saving the current Model')
    parser.add_argument('--resume', type=str, default=None, metavar='RESUME',
                        help='Resume model from checkpoint')
    parser.add_argument('--T', type=int, default=60, metavar='N',
                        help='SNN time window')
    args = parser.parse_args()
    use_cuda = not args.no_cuda and torch.cuda.is_available()

    torch.manual_seed(args.seed)

    device = torch.device("cuda" if use_cuda else "cpu")

    kwargs = {'batch_size': args.batch_size}
    if use_cuda:
        kwargs.update({'num_workers': 1,
                       'pin_memory': True,
                       'shuffle': True},
                     )
    mean = [0.4913997551666284, 0.48215855929893703, 0.4465309133731618]
    std = [0.24703225141799082, 0.24348516474564, 0.26158783926049628]

    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=6),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        AddGaussianNoise(std=0.01)
        ])
    im_aug = transforms.Compose([
        #transforms.ColorJitter(brightness=0.5, contrast=0.5, hue=0.5),
        transforms.RandomRotation(10),
        transforms.RandomCrop(32, padding = 6),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        AddGaussianNoise(std=0.01)
        ])

    transform_test = transforms.Compose([
        transforms.ToTensor()
        #transforms.Normalize(mean, std)
        ])

    trainset = datasets.CIFAR10(
        root='./data', train=True, download=True, transform=transform_train)
    
    for i in range(100):
        trainset = trainset + datasets.CIFAR10(root='./data', train=True, download=True, transform=im_aug)
        
    train_loader = torch.utils.data.DataLoader(
        trainset, batch_size=128, shuffle=True)

    testset = datasets.CIFAR10(
        root='./data', train=False, download=True, transform=transform_test)
    test_loader = torch.utils.data.DataLoader(
        testset, batch_size=100, shuffle=False)

    snn_dataset = SpikeDataset(testset, T = args.T)
    snn_loader = torch.utils.data.DataLoader(snn_dataset, batch_size=10, shuffle=False)

    from models.vgg import VGG, CatVGG

    model = VGG('VGG19', clamp_max=1, quantize_bit=32).to(device)
    snn_model = CatVGG('VGG19', args.T).to(device)
    if args.resume != None:
        model.load_state_dict(torch.load(args.resume), strict=False)
        load_model(torch.load(args.resume), model)
        load_model(torch.load(args.resume), snn_model)
    optimizer = optim.Adam(model.parameters(), lr=args.lr)
    scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
    for epoch in range(1, args.epochs + 1):
        train(args, model, device, train_loader, optimizer, epoch)
        #test(model, device, train_loader)
        test(model, device, test_loader)

        #transfer_model(model, snn_model)
        #test(snn_model, device, snn_loader)
        if args.save_model:
            torch.save(model.state_dict(), "cifar_cnn_19.pt")
        
        scheduler.step()
    #test(model, device, train_loader)
    test(model, device, test_loader)
    transfer_model(model, snn_model)
    with torch.no_grad():
        normalize_weight(snn_model.features, quantize_bit=8)
    test(snn_model, device, snn_loader)
    if args.save_model:
        torch.save(model.state_dict(), "cifar_cnn_19.pt")