Example #1
0
def main():

    args = arguments()
    use_cuda = not args.no_cuda and torch.cuda.is_available()

    torch.manual_seed(args.seed)

    device = torch.device("cuda" if use_cuda else "cpu")

    kwargs = {'num_workers': 1, 'pin_memory': 1} if use_cuda else {}
    transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize((0.1307, ), (0.3081, ))])

    train_loader = torch.utils.data.DataLoader(datasets.MNIST(
        '../data', train=True, download=True, transform=transform),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               **kwargs)
    valid_loader = torch.utils.data.DataLoader(datasets.MNIST(
        '../data', train=False, transform=transform),
                                               batch_size=args.test_batch_size,
                                               shuffle=True,
                                               **kwargs)

    model = ConvNet().to(device)
    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum)

    for epoch in range(1, args.epochs + 1):
        train(args, model, device, train_loader, optimizer, epoch)
        valid(args, model, device, valid_loader)
Example #2
0
           'ship', 'truck')

# Verify if CUDA is available
device = 'cuda' if torch.cuda.is_available() else 'cpu'

# Definte net
net = ConvNet(widen_factor=32)
if device == 'cuda':
    net = net.cuda()
    net = torch.nn.DataParallel(net)
    cudnn.benchmark = True

# Define loss function and optimizer
criterion = nn.CrossEntropyLoss()
# optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)
optimizer = torch.optim.SGD(net.parameters(), lr=0.025, momentum=0.9)


# Training
def train(epoch):
    print('\nEpoch: %d' % epoch)
    net.train()
    train_loss = 0
    correct = 0
    total = 0
    for batch_idx, (inputs, targets) in enumerate(trainloader):
        if device == 'cuda':
            inputs, targets = Variable(inputs.cuda()), Variable(targets.cuda())
        else:
            inputs, targets = Variable(inputs), Variable(targets)
        optimizer.zero_grad()
Example #3
0
def main():
    parser = argparse.ArgumentParser(description='PyTorch FGSM')
    parser.add_argument('--batch-size',
                        type=int,
                        default=64,
                        metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size',
                        type=int,
                        default=1000,
                        metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs',
                        type=int,
                        default=5,
                        metavar='N',
                        help='number of epochs to train (default: 5)')
    parser.add_argument('--lr',
                        type=float,
                        default=0.001,
                        metavar='LR',
                        help='learning rate (default: 0.001)')
    parser.add_argument('--gamma',
                        type=float,
                        default=0.7,
                        metavar='M',
                        help='Learning rate step gamma (default: 0.7)')
    parser.add_argument('--no-cuda',
                        action='store_true',
                        default=False,
                        help='disables CUDA training')
    parser.add_argument(
        '--log-interval',
        type=int,
        default=640,
        metavar='N',
        help='how many batches to wait before logging training status')
    parser.add_argument('--epsilon',
                        type=float,
                        default=0.25,
                        help='epsilon(perturbation) of adversarial attack')
    parser.add_argument('--dataset-normalize',
                        action='store_true',
                        default=False,
                        help='input whether normalize or not (default: False)')
    parser.add_argument(
        '--network',
        type=str,
        default='fc',
        help=
        'input Network type (Selected: fc, conv, drop, googlenet / default: \'fc\')'
    )
    parser.add_argument('--save-model',
                        action='store_true',
                        default=False,
                        help='For Saving the current Model')
    parser.add_argument('--dataset',
                        type=str,
                        default='mnist',
                        help='choose dataset : mnist or cifar')
    args = parser.parse_args()

    use_cuda = not args.no_cuda and torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")

    transformation = transforms.ToTensor()
    # Dataset normalize
    if args.dataset_normalize:
        transformation = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307, ), (0.3081, ))
        ])

    # dataset

    if args.dataset == 'cifar':  # cifar10
        train_dataset = datasets.CIFAR10('../data',
                                         train=True,
                                         download=True,
                                         transform=transformation)
        test_dataset = datasets.CIFAR10('../data',
                                        train=False,
                                        download=True,
                                        transform=transformation)
    else:  # mnist(default)
        train_dataset = datasets.MNIST('../data',
                                       train=True,
                                       download=True,
                                       transform=transformation)
        test_dataset = datasets.MNIST('../data',
                                      train=False,
                                      download=True,
                                      transform=transformation)

    # MNIST dataset
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True)
    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=args.test_batch_size,
                                              shuffle=True)

    # Network Type
    if args.network == 'conv':
        model = ConvNet().to(device)
    elif args.network == 'drop':
        model = DropNet().to(device)
    elif args.network == 'googlenet' or args.dataset == 'cifar':
        model = GoogleNet().to(device)
    elif args.network == 'fc':  # default
        model = FcNet().to(device)

    optimizer = optim.Adam(model.parameters(), lr=args.lr)

    train(args, model, device, train_loader, optimizer)

    test(args, model, device, test_loader)
Example #4
0
def main(args):
    if args.name:
        args.name += '_'
    logname = f'{args.name}{args.t_tuple[0]}_{args.t_tuple[1]}_soft{args.soft}' \
              f'c{args.channels}b{args.blocks}_p{args.patience}_' \
              f'bs{args.batch_size}lr{args.lr}d{args.decay}_s{args.seed}'
    print(logname)

    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print('Using {}'.format(device))
    torch.backends.cudnn.benchmark = True

    train_set = OneHotConvGameDataset(args.path, args.t_tuple[0], args.t_tuple[1], device, soft=args.soft)
    train_dat = DataLoader(train_set, batch_size=args.batch_size, shuffle=True)

    m = ConvNet(channels=args.channels, blocks=args.blocks)
    if args.pretrained:
        m.load_state_dict(torch.load('models/{}.pt'.format(args.pretrained), map_location=device))
        print('Loaded ' + args.pretrained)
        logname = 'pre_'+logname
    m.to(device)
    loss_fn = nn.KLDivLoss(reduction='batchmean')
    optimizer = torch.optim.Adam(m.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.decay)
    t_loss = []
    min_move = []
    best = 0.0
    timer = 0
    if args.patience == 0:
        stop = args.epochs
    else:
        stop = args.patience

    data_len = len(train_dat)
    for epoch in range(args.epochs):
        print('-' * 10)
        print('Epoch: {}'.format(epoch))
        timer += 1

        m.train()
        running_loss = 0
        for x, y in tqdm(train_dat):
            optimizer.zero_grad()
            pred = m(x)
            loss = loss_fn(pred, y)
            running_loss += loss.data.item()
            loss.backward()
            optimizer.step()
        running_loss /= data_len
        if epoch == 2 and running_loss > 210/1000:
            stop = 0
        print('Train mLoss: {:.3f}'.format(1e3 * running_loss))
        t_loss.append(running_loss)
        
        m.eval()
        time1 = time()
        ave_min_move = eval_nn_min(m, number=10, repeats=40, device=device)
        time_str = ', took {:.0f} seconds'.format(time()-time1)
        min_move.append(ave_min_move)
        if ave_min_move >= best:
            tqdm.write(str(ave_min_move) + ' ** Best' + time_str)
            best = ave_min_move
            timer = 0
            torch.save(m.state_dict(), 'models/' + logname + '_best.pt')
        else:
            tqdm.write(str(ave_min_move) + time_str)

        if timer >= stop:
            print('Ran out of patience')
            print(f'Best score: {best}')
            # torch.save(m.state_dict(), 'models/'+logname+f'_e{epoch}.pt')
            break
        else:
            print(f'{stop - timer} epochs remaining')

    np.savez('logs/'+logname,
             t_loss=t_loss,
             min_move=min_move,
             params=args)
Example #5
0
def run_experiments():

    ###################################EXPERIMENT_1##############################################################
    '''
    DESCRIPTION
    Training and testing set both contain all the recordings. 80-20 split random state =  42
    '''
    '''
    ID: 5924295
    '''

    list_IDs = []
    train_list_IDs = []
    test_list_IDs = []
    y = []
    IDs = [1, 2, 3, 4]
    list_IDs, y = separate_data_by_mic_id_train(IDs)
    train_list_IDs, test_list_IDs, y_train, y_test = train_test_split(
        list_IDs, y, test_size=0.2, random_state=42)  # 100

    ######HYPERPARAMETERS#############################################
    num_epochs = 10
    num_classes = 2
    learning_rate = 1e-3
    batch_size = 1
    #################################################################

    training_set = TrainDataset(train_list_IDs, y_train)
    train_loader = torch.utils.data.DataLoader(dataset=training_set,
                                               batch_size=batch_size,
                                               shuffle=True)

    test_set = TestDataset(test_list_IDs, y_test)  # test_list_Ids
    test_loader = torch.utils.data.DataLoader(dataset=test_set,
                                              batch_size=batch_size,
                                              shuffle=True)
    if use_cuda:
        model = ConvNet(num_classes).cuda()
    else:
        model = ConvNet(num_classes)

    # Loss and optimizer

    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

    # training
    train_mode = True
    print('starting training')
    fit(train_loader, test_loader, model, criterion, optimizer, num_epochs,
        use_cuda, train_mode)

    PATH = '/home/shasvatmukes/project/audio_classification/weights/simple_CNN_weights_log1.pth'  # unique names
    torch.save(model.state_dict(), PATH)

    model.load_state_dict(torch.load(PATH))
    # Test
    train_mode = False
    fit(train_loader, test_loader, model, criterion, optimizer, num_epochs,
        use_cuda, train_mode)
    '''
Example #6
0
NTEST = NCLASSES * NFILES - NTRAIN
# Create random indices
indicesTrain = list(np.random.choice(NCLASSES * NFILES, NTRAIN, replace=False))
indicesTest  = [i for i in range(0, NCLASSES * NFILES) if i not in indicesTrain]
# Train sets, move to device
dataTrain   = torch.FloatTensor([data[i]     for i in indicesTrain]).to(device)
labelsTrain = torch.FloatTensor([iClasses[i] for i in indicesTrain]).long().to(device)
# Test sets, keep on cpu
dataTest   = torch.FloatTensor([data[i]     for i in indicesTest])
labelsTest = torch.FloatTensor([iClasses[i] for i in indicesTest]).long()
## .long() fixes : RuntimeError: Expected object of scalar type Long but got scalar type Float for argument #2 'target'

### Create model ###
model = ConvNet(NCLASSES, imageSize=IMAGE_SIZE, nConvLayers=NLAYERS, nchannels=NCHANNELS).to(device)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
batchSize = math.ceil(NTRAIN / NBATCHES)

### Create directory to store model weights ###
nowStr = datetime.now().strftime("%y%m%d-%H%M%S")
modelDir = "model_%s_%d_%d_%d_%d_%d_%d" % (nowStr, NCLASSES, NFILES, NBATCHES, NLAYERS, NCHANNELS, IMAGE_SIZE)
os.mkdir(modelDir)
copyfile("./network.py", modelDir + "/network.py")
f = open(modelDir + "/classes.txt", "w")
f.write(" ".join(classes))
f.close()

f = open(modelDir + "/indices.txt", "w")
f.write("train " + " ".join(map(str, indicesTrain)) + "\n")
f.write("test " + " ".join(map(str, indicesTest)))
f.close()
Example #7
0
    parser.add_argument('--lr', type=float, default=0.01)
    parser.add_argument('--step_size', type=int, default=5)
    parser.add_argument('--gamma', type=float, default=0.5)    
    args = parser.parse_args()
    
    trainset = MyDataset('train')
    train_loader = DataLoader(dataset=trainset, num_workers=4, batch_size=args.batch_size, shuffle=True, drop_last=True, pin_memory=True)

    valset = MyDataset('val')
    val_loader = DataLoader(dataset=valset, num_workers=4, batch_size=args.batch_size, pin_memory=True)

    testset = MyDataset('test')
    test_loader = DataLoader(dataset=testset, num_workers=4, batch_size=args.batch_size, pin_memory=True)    
    
    model = ConvNet()   
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)   
    lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=args.gamma)        
    model = model.cuda()

    best_acc = 0.0
    for epoch in range(args.max_epoch):
        lr_scheduler.step()
        model.train()
        for i, batch in enumerate(train_loader):
            imgs, labels = batch[0].cuda(), batch[1].cuda()
            optimizer.zero_grad()
            logits = model(imgs)
            loss = F.cross_entropy(logits, labels)
            loss.backward()
            optimizer.step()
            acc = count_acc(logits, labels)
Example #8
0
# The dataset
train_dataset = NumbersDataSet('../new_data/', transform=transformOpt)


# Data loader
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                           batch_size=batch_size,
                                           shuffle=True)


# Initialize our network
model = ConvNet(num_classes).to(device)

# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

# Train the model
total_step = len(train_loader)

plot_training = []

for epoch in range(num_epochs):
    loss_training = 0.0
    epoch_loss = 0.0
    for i, (images, labels) in enumerate(train_loader):
        images = images.to(device)
        labels = labels.to(device)

        # Forward pass
        outputs = model(images)