Пример #1
0
def main(args):
    if args.name:
        args.name += '_'
    logname = f'{args.name}{args.t_tuple[0]}_{args.t_tuple[1]}_soft{args.soft}' \
              f'c{args.channels}b{args.blocks}_p{args.patience}_' \
              f'bs{args.batch_size}lr{args.lr}d{args.decay}_s{args.seed}'
    print(logname)

    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print('Using {}'.format(device))
    torch.backends.cudnn.benchmark = True

    train_set = OneHotConvGameDataset(args.path, args.t_tuple[0], args.t_tuple[1], device, soft=args.soft)
    train_dat = DataLoader(train_set, batch_size=args.batch_size, shuffle=True)

    m = ConvNet(channels=args.channels, blocks=args.blocks)
    if args.pretrained:
        m.load_state_dict(torch.load('models/{}.pt'.format(args.pretrained), map_location=device))
        print('Loaded ' + args.pretrained)
        logname = 'pre_'+logname
    m.to(device)
    loss_fn = nn.KLDivLoss(reduction='batchmean')
    optimizer = torch.optim.Adam(m.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.decay)
    t_loss = []
    min_move = []
    best = 0.0
    timer = 0
    if args.patience == 0:
        stop = args.epochs
    else:
        stop = args.patience

    data_len = len(train_dat)
    for epoch in range(args.epochs):
        print('-' * 10)
        print('Epoch: {}'.format(epoch))
        timer += 1

        m.train()
        running_loss = 0
        for x, y in tqdm(train_dat):
            optimizer.zero_grad()
            pred = m(x)
            loss = loss_fn(pred, y)
            running_loss += loss.data.item()
            loss.backward()
            optimizer.step()
        running_loss /= data_len
        if epoch == 2 and running_loss > 210/1000:
            stop = 0
        print('Train mLoss: {:.3f}'.format(1e3 * running_loss))
        t_loss.append(running_loss)
        
        m.eval()
        time1 = time()
        ave_min_move = eval_nn_min(m, number=10, repeats=40, device=device)
        time_str = ', took {:.0f} seconds'.format(time()-time1)
        min_move.append(ave_min_move)
        if ave_min_move >= best:
            tqdm.write(str(ave_min_move) + ' ** Best' + time_str)
            best = ave_min_move
            timer = 0
            torch.save(m.state_dict(), 'models/' + logname + '_best.pt')
        else:
            tqdm.write(str(ave_min_move) + time_str)

        if timer >= stop:
            print('Ran out of patience')
            print(f'Best score: {best}')
            # torch.save(m.state_dict(), 'models/'+logname+f'_e{epoch}.pt')
            break
        else:
            print(f'{stop - timer} epochs remaining')

    np.savez('logs/'+logname,
             t_loss=t_loss,
             min_move=min_move,
             params=args)
Пример #2
0
def run_experiments():

    ###################################EXPERIMENT_1##############################################################
    '''
    DESCRIPTION
    Training and testing set both contain all the recordings. 80-20 split random state =  42
    '''
    '''
    ID: 5924295
    '''

    list_IDs = []
    train_list_IDs = []
    test_list_IDs = []
    y = []
    IDs = [1, 2, 3, 4]
    list_IDs, y = separate_data_by_mic_id_train(IDs)
    train_list_IDs, test_list_IDs, y_train, y_test = train_test_split(
        list_IDs, y, test_size=0.2, random_state=42)  # 100

    ######HYPERPARAMETERS#############################################
    num_epochs = 10
    num_classes = 2
    learning_rate = 1e-3
    batch_size = 1
    #################################################################

    training_set = TrainDataset(train_list_IDs, y_train)
    train_loader = torch.utils.data.DataLoader(dataset=training_set,
                                               batch_size=batch_size,
                                               shuffle=True)

    test_set = TestDataset(test_list_IDs, y_test)  # test_list_Ids
    test_loader = torch.utils.data.DataLoader(dataset=test_set,
                                              batch_size=batch_size,
                                              shuffle=True)
    if use_cuda:
        model = ConvNet(num_classes).cuda()
    else:
        model = ConvNet(num_classes)

    # Loss and optimizer

    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

    # training
    train_mode = True
    print('starting training')
    fit(train_loader, test_loader, model, criterion, optimizer, num_epochs,
        use_cuda, train_mode)

    PATH = '/home/shasvatmukes/project/audio_classification/weights/simple_CNN_weights_log1.pth'  # unique names
    torch.save(model.state_dict(), PATH)

    model.load_state_dict(torch.load(PATH))
    # Test
    train_mode = False
    fit(train_loader, test_loader, model, criterion, optimizer, num_epochs,
        use_cuda, train_mode)
    '''
Пример #3
0
        tmp_acc = 0.0
        tmp_num = 0.0
        with torch.no_grad():
            for i, batch in enumerate(val_loader):
                imgs, labels = batch[0].cuda(), batch[1].cuda()
                logits = model(imgs)
                acc = count_acc(logits, labels)
                tmp_acc += acc
                tmp_num += 1.0
                print('epoch {}, val {}/{}, acc={:.4f}'
                      .format(epoch, i, len(val_loader), acc))    
            tmp_acc /= tmp_num
            print('val acc = {}'.format(tmp_acc))
            if tmp_acc > best_acc:
                print('saving model...')
                torch.save(model.state_dict(), "best.pth")           
    

    model.load_state_dict(torch.load("best.pth"))
    model.eval()
    tmp_acc = 0.0
    tmp_num = 0.0
    with torch.no_grad():
        for i, batch in enumerate(test_loader):
            imgs, labels = batch[0].cuda(), batch[1].cuda()
            logits = model(imgs)
            acc = count_acc(logits, labels)
            tmp_acc += acc
            tmp_num += 1.0
            print('epoch {}, test {}/{}, acc={:.4f}'
                  .format(epoch, i, len(test_loader), acc))    
Пример #4
0
		### Calculate accuracy
		acc, loss = 0, 0
		for nB in range(0, NTRAIN, batchSize):
			print("\r  Accuracy %d/%d" % (nB+batchSize, NTRAIN), end=" "*20, flush=True)
			# Forward pass
			y = model(dataTrain[nB:nB+batchSize])
			loss += criterion(y, labelsTrain[nB:nB+batchSize])
			_, predicted = torch.max(y.data, 1)
			correct = (predicted == labelsTrain[nB:nB+batchSize]).sum().item()
			acc += correct / batchSize
		acc = acc / NBATCHES

		print("\r  epoch=%d" % i, "loss=%4.4f" % loss, "accuracy=%0.4f" % acc)
		
		### Store current model
		torch.save(model.state_dict(), modelDir + "/%d_%0.2f.model" % (i, acc))

		### Break on good performance
		if loss < 0.001 and 0.999 < acc:
			break
except KeyboardInterrupt:
	print("\n\nTraining stopped prematurely")

### Calculate accuracy
print("\nTesting CNN on cpu..")
model = model.to("cpu")
model.eval()
correct = 0
for nB in range(0, NTEST, batchSize):
	y = model(dataTest[nB:nB+batchSize])
	_, predicted = torch.max(y.data, 1)
Пример #5
0
        labels = labels.to(device)

        # Forward pass
        outputs = model(images)
        loss = criterion(outputs, labels)

        # Backward and optimize
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        loss_item = loss.item()
        loss_training += loss_item
        epoch_loss += loss_item

        if (i + 1) % 100 == 0:
            print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
                  .format(epoch + 1, num_epochs, i + 1, total_step, loss_training / 100))
            loss_training = 0.0

    plot_training.append(epoch_loss / i)

# Save the model checkpoint
torch.save(model.state_dict(), '../model_final.ckpt')

plt.figure(figsize = (8, 5))
plt.plot(plot_training)
plt.legend(['Train Set'], loc='upper right')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.show()