Пример #1
0
def copynet(net):
    newnet = ConvNet(channels=64, blocks=3)
    newnet.load_state_dict(net.state_dict())
    return newnet
import numpy as np
import torch
import cv2
import imutils
from network import ConvNet

# Device configuration
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

# Initialize the network
model = ConvNet().to(device)

# Loading the weights
model.load_state_dict(torch.load('../model_final.ckpt'))

# Eval mode (batchnorm uses moving mean/variance instead of mini-batch mean/variance)
model.eval()

image = cv2.imread("../test.png")

gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# gray = cv2.copyMakeBorder(gray, 8, 8, 8, 8, cv2.BORDER_REPLICATE)

thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]

# find the contours (continuous blobs of pixels) the image
contours = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

# Hack for compatibility with different OpenCV versions
contours = contours[1] if imutils.is_cv3() else contours[0]
Пример #3
0
                b = [b == i for i in range(16)]
                b = torch.stack(b, dim=1).float()
                preds = model(b.view(-1, 16, 4, 4))
                preds = torch.argsort(preds.cpu(), dim=1, descending=True)
                dead_s = games.move_batch(preds)
                if dead_s:
                    result.append(count)
                    break
                count += 1
    return np.mean(result)


if __name__ == '__main__':
    from time import time
    from network import ConvNet
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    print(f'Using {device}')

    names = [
    ]
    for name in names:
        print(name)
        m = ConvNet(**{'channels': 64, 'blocks': 3})
        m.load_state_dict(torch.load('models/{}.pt'.format(name), map_location=device))
        m.to(device)
        t = time()
        print(eval_nn(m, name, number=5000, device=device, verbose=True))
        t = time() - t
        print('{0:.3f} seconds'.format(t))
        print('-'*10)
Пример #4
0
def main(args):
    if args.name:
        args.name += '_'
    logname = f'{args.name}{args.t_tuple[0]}_{args.t_tuple[1]}_soft{args.soft}' \
              f'c{args.channels}b{args.blocks}_p{args.patience}_' \
              f'bs{args.batch_size}lr{args.lr}d{args.decay}_s{args.seed}'
    print(logname)

    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print('Using {}'.format(device))
    torch.backends.cudnn.benchmark = True

    train_set = OneHotConvGameDataset(args.path, args.t_tuple[0], args.t_tuple[1], device, soft=args.soft)
    train_dat = DataLoader(train_set, batch_size=args.batch_size, shuffle=True)

    m = ConvNet(channels=args.channels, blocks=args.blocks)
    if args.pretrained:
        m.load_state_dict(torch.load('models/{}.pt'.format(args.pretrained), map_location=device))
        print('Loaded ' + args.pretrained)
        logname = 'pre_'+logname
    m.to(device)
    loss_fn = nn.KLDivLoss(reduction='batchmean')
    optimizer = torch.optim.Adam(m.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.decay)
    t_loss = []
    min_move = []
    best = 0.0
    timer = 0
    if args.patience == 0:
        stop = args.epochs
    else:
        stop = args.patience

    data_len = len(train_dat)
    for epoch in range(args.epochs):
        print('-' * 10)
        print('Epoch: {}'.format(epoch))
        timer += 1

        m.train()
        running_loss = 0
        for x, y in tqdm(train_dat):
            optimizer.zero_grad()
            pred = m(x)
            loss = loss_fn(pred, y)
            running_loss += loss.data.item()
            loss.backward()
            optimizer.step()
        running_loss /= data_len
        if epoch == 2 and running_loss > 210/1000:
            stop = 0
        print('Train mLoss: {:.3f}'.format(1e3 * running_loss))
        t_loss.append(running_loss)
        
        m.eval()
        time1 = time()
        ave_min_move = eval_nn_min(m, number=10, repeats=40, device=device)
        time_str = ', took {:.0f} seconds'.format(time()-time1)
        min_move.append(ave_min_move)
        if ave_min_move >= best:
            tqdm.write(str(ave_min_move) + ' ** Best' + time_str)
            best = ave_min_move
            timer = 0
            torch.save(m.state_dict(), 'models/' + logname + '_best.pt')
        else:
            tqdm.write(str(ave_min_move) + time_str)

        if timer >= stop:
            print('Ran out of patience')
            print(f'Best score: {best}')
            # torch.save(m.state_dict(), 'models/'+logname+f'_e{epoch}.pt')
            break
        else:
            print(f'{stop - timer} epochs remaining')

    np.savez('logs/'+logname,
             t_loss=t_loss,
             min_move=min_move,
             params=args)
Пример #5
0
def run_experiments():

    ###################################EXPERIMENT_1##############################################################
    '''
    DESCRIPTION
    Training and testing set both contain all the recordings. 80-20 split random state =  42
    '''
    '''
    ID: 5924295
    '''

    list_IDs = []
    train_list_IDs = []
    test_list_IDs = []
    y = []
    IDs = [1, 2, 3, 4]
    list_IDs, y = separate_data_by_mic_id_train(IDs)
    train_list_IDs, test_list_IDs, y_train, y_test = train_test_split(
        list_IDs, y, test_size=0.2, random_state=42)  # 100

    ######HYPERPARAMETERS#############################################
    num_epochs = 10
    num_classes = 2
    learning_rate = 1e-3
    batch_size = 1
    #################################################################

    training_set = TrainDataset(train_list_IDs, y_train)
    train_loader = torch.utils.data.DataLoader(dataset=training_set,
                                               batch_size=batch_size,
                                               shuffle=True)

    test_set = TestDataset(test_list_IDs, y_test)  # test_list_Ids
    test_loader = torch.utils.data.DataLoader(dataset=test_set,
                                              batch_size=batch_size,
                                              shuffle=True)
    if use_cuda:
        model = ConvNet(num_classes).cuda()
    else:
        model = ConvNet(num_classes)

    # Loss and optimizer

    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

    # training
    train_mode = True
    print('starting training')
    fit(train_loader, test_loader, model, criterion, optimizer, num_epochs,
        use_cuda, train_mode)

    PATH = '/home/shasvatmukes/project/audio_classification/weights/simple_CNN_weights_log1.pth'  # unique names
    torch.save(model.state_dict(), PATH)

    model.load_state_dict(torch.load(PATH))
    # Test
    train_mode = False
    fit(train_loader, test_loader, model, criterion, optimizer, num_epochs,
        use_cuda, train_mode)
    '''
Пример #6
0
        with torch.no_grad():
            for i, batch in enumerate(val_loader):
                imgs, labels = batch[0].cuda(), batch[1].cuda()
                logits = model(imgs)
                acc = count_acc(logits, labels)
                tmp_acc += acc
                tmp_num += 1.0
                print('epoch {}, val {}/{}, acc={:.4f}'
                      .format(epoch, i, len(val_loader), acc))    
            tmp_acc /= tmp_num
            print('val acc = {}'.format(tmp_acc))
            if tmp_acc > best_acc:
                print('saving model...')
                torch.save(model.state_dict(), "best.pth")           
    

    model.load_state_dict(torch.load("best.pth"))
    model.eval()
    tmp_acc = 0.0
    tmp_num = 0.0
    with torch.no_grad():
        for i, batch in enumerate(test_loader):
            imgs, labels = batch[0].cuda(), batch[1].cuda()
            logits = model(imgs)
            acc = count_acc(logits, labels)
            tmp_acc += acc
            tmp_num += 1.0
            print('epoch {}, test {}/{}, acc={:.4f}'
                  .format(epoch, i, len(test_loader), acc))    
        tmp_acc /= tmp_num
        print('test acc = {}'.format(tmp_acc))
Пример #7
0
        raise ValueError('Seed is {}'.format(seed))
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    name = str(seed).zfill(5)

    start_time = time()
    use_network = False
    if not use_network:
        selfplay_fixed(name, number=10, times=4, verbose=args.verbose)
    else:
        torch.backends.cudnn.benchmark = True
        m_name = 'models/20200207/best_s7_jit.pth'
        if not os.path.exists(m_name):
            full_name = '20200207/0_1000_soft3.0c64b3_p10_bs2048lr0.1d0.0_s7_best'
            print('Tracing: {}'.format(full_name))
            model = ConvNet(channels=64, blocks=3)
            model.load_state_dict(torch.load('models/{}.pt'.format(full_name)))
            model.to('cuda')
            model.eval()
            model = torch.jit.trace(model, torch.randn(10, 16, 4, 4, dtype=torch.float32, device='cuda'))
            torch.jit.save(model, m_name)
            print('Jit model saved')
        else:
            print(m_name)
            model = torch.jit.load(m_name)
            selfplay(name, model, number=10, times=4, verbose=args.verbose)

    total_time = time() - start_time
    print(f'Took {total_time/60:.1f} minutes')