Exemplo n.º 1
0
def vgg_eval_model(dataset_root_dir, restore_model: str):
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print("DEVICE WILL BE USED: ", device)

    net = VggNet(torchvision.models.vgg16_bn(True))
    net = net.to(device)

    classes = ('not hotdog', 'hotdog')

    if restore_model is not None and len(restore_model) > 0:
        # original saved file with DataParallel
        state_dict = torch.load(restore_model, map_location={'cuda:0': 'cpu'})
        net.load_state_dict(state_dict)
        print("Model {} restored".format(restore_model))
    else:
        print("ERROR: no restore model file found!")
        return

    #from torch.autograd import Variable
    #dummy_input = Variable(torch.randn(1, 3, 224, 224), requires_grad=True)
    # input_names = ["actual_input_1"] + ["learned_%d" % i for i in range(12)]
    # output_names = ["output1"]
    #torch.onnx.export(net, dummy_input, "vgg_hot_dog.onnx", export_params=True, verbose=True)
    #print("SUCCESS")

    hot_dog_dataset_test = HotDogsDatasetEval(
        root_dir=dataset_root_dir,
        transform=transforms.Compose([
            Rescale((224, 224)),  #normalize,
            ToTensor(),
        ]))
    test_dataloader = DataLoader(hot_dog_dataset_test,
                                 batch_size=4,
                                 shuffle=True,
                                 num_workers=4)

    for dl, type in zip([test_dataloader], ['test']):
        correct = 0
        total = 0
        with torch.no_grad():
            for data in dl:
                images, names = data['image'].float(), data['name']
                images = images.to(device)

                outputs = net(images)
                total += len(names)
                for id, prediction in enumerate(outputs.data):
                    res = torch.nn.functional.softmax(prediction, dim=0)
                    _, rid = torch.max(res, 0)
                    print('{} is {}'.format(names[id], classes[rid]))
                    imshow(torchvision.utils.make_grid(images[id]))
Exemplo n.º 2
0
model_path = './ocr-model/crnn_address.pth'
img_path = './ocr_address.jpg'
# alphabet = '0123456789X'
alphabet = alphabet = ''.join(json.load(open('./cn-alphabet.json', 'rb')))

model = CRNN(IMAGE_HEIGHT, 1, len(alphabet) + 1, 256)
if torch.cuda.is_available():
    model = model.cuda()
print('loading pretrained model from %s' % model_path)
model.load_state_dict(torch.load(model_path))

converter = LabelConverter(alphabet)

image_transform = transforms.Compose(
    [Rescale(IMAGE_HEIGHT),
     transforms.ToTensor(),
     Normalize()])
image = cv2.imread(img_path, 0)
image = image_transform(image)
if torch.cuda.is_available():
    image = image.cuda()
image = image.view(1, *image.size())
image = Variable(image)

model.eval()
preds = model(image)

_, preds = preds.max(2)
preds = preds.transpose(1, 0).contiguous().view(-1)
Exemplo n.º 3
0
def train(field):
    alphabet = ''.join(json.load(open('./cn-alphabet.json', 'rb')))
    nclass = len(alphabet) + 1  # add the dash -
    batch_size = BATCH_SIZE
    if field == 'address' or field == 'psb':
        batch_size = 1  # image length varies

    converter = LabelConverter(alphabet)
    criterion = CTCLoss(zero_infinity=True)

    crnn = CRNN(IMAGE_HEIGHT, nc, nclass, number_hidden)
    crnn.apply(weights_init)

    image_transform = transforms.Compose([
        Rescale(IMAGE_HEIGHT),
        transforms.ToTensor(),
        Normalize()
    ])

    dataset = LmdbDataset(db_path, field, image_transform)
    dataloader = DataLoader(dataset, batch_size=batch_size,
                            shuffle=True, num_workers=4)

    image = torch.FloatTensor(batch_size, 3, IMAGE_HEIGHT, IMAGE_HEIGHT)
    text = torch.IntTensor(batch_size * 5)
    length = torch.IntTensor(batch_size)

    image = Variable(image)
    text = Variable(text)
    length = Variable(length)

    loss_avg = utils.averager()
    optimizer = optim.RMSprop(crnn.parameters(), lr=lr)

    if torch.cuda.is_available():
        crnn.cuda()
        crnn = nn.DataParallel(crnn)
        image = image.cuda()
        criterion = criterion.cuda()

    def train_batch(net, iteration):
        data = iteration.next()
        cpu_images, cpu_texts = data
        batch_size = cpu_images.size(0)
        utils.load_data(image, cpu_images)
        t, l = converter.encode(cpu_texts)
        utils.load_data(text, t)
        utils.load_data(length, l)

        preds = crnn(image)
        preds_size = Variable(torch.IntTensor([preds.size(0)] * batch_size))
        cost = criterion(preds, text, preds_size, length) / batch_size
        crnn.zero_grad()
        cost.backward()
        optimizer.step()
        return cost

    nepoch = 25
    for epoch in range(nepoch):
        train_iter = iter(dataloader)
        i = 0
        while i < len(dataloader):
            for p in crnn.parameters():
                p.requires_grad = True
            crnn.train()

            cost = train_batch(crnn, train_iter)
            loss_avg.add(cost)
            i += 1

            if i % 500 == 0:
                print('%s [%d/%d][%d/%d] Loss: %f' %
                        (datetime.datetime.now(), epoch, nepoch, i, len(dataloader), loss_avg.val()))
                loss_avg.reset()

            # do checkpointing
            if i % 500 == 0:
                torch.save(
                    crnn.state_dict(), f'{model_path}crnn_{field}_{epoch}_{i}.pth')
Exemplo n.º 4
0
            model_name = 'model_' + str(289) + '_0.0001_' + str(epoch) + '.pth'
            torch.save(net, model_name)
            eval(valloader, model_name)

    print('Finished Training')


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('path')
    args = parser.parse_args()

    traindataset = PhoneDataset(label_path='labels_train.txt',
                                root_dir=args.path,
                                transform=transforms.Compose(
                                    [Rescale((224, 224)),
                                     ToTensor()]))
    trainloader = torch.utils.data.DataLoader(
        traindataset,
        batch_size=4,
        shuffle=False,
    )

    valdataset = PhoneDataset(label_path='labels_val.txt',
                              root_dir=args.path,
                              transform=transforms.Compose(
                                  [Rescale((224, 224)),
                                   ToTensor()]))
    valloader = torch.utils.data.DataLoader(
        traindataset,
        batch_size=1,
Exemplo n.º 5
0
print(f"We are training on {device}")

load_path = "vgg16-conv.pth"
if load_path:
    checkpoint = torch.load(load_path)
    # TODO comment in when we want to load a from this trainer saved net
    # model_dict = checkpoint['model_state_dict']
    model = vgg16(checkpoint)
    print("We loaded a checkpoint")
else:
    model = vgg16()

model.to(device)

train_dataset = roboCupDatasets(transform=torchvision.transforms.Compose(
    [Rescale(), ToTensor(), Normalize()]))
train_dl = DataLoader(train_dataset, batch_size=2, shuffle=True)

epochs = 35

opt = torch.optim.Adam(model.parameters(), lr=0.0001)

with open(f"./{save_folder}/stats.csv", "w+") as f:
    f.write("train_loss\n")

for epoch in range(epochs):
    model.train()
    train_loss = []
    for i, sample in enumerate(train_dl):
        with torch.set_grad_enabled(True):
            opt.zero_grad()
Exemplo n.º 6
0
def simple_net(dataset_root_dir: str, restore_model: str):
    """ """
    """
        LOAD TRAIN & TEST DATASETS 
    """
    hot_dog_dataset_train = HotDogsDataset(
        train=True,
        root_dir=dataset_root_dir,
        transform=transforms.Compose([Rescale((256, 256)),
                                      ToTensor()]))
    train_dataloader = DataLoader(hot_dog_dataset_train,
                                  batch_size=4,
                                  shuffle=True,
                                  num_workers=1)

    hot_dog_dataset_test = HotDogsDataset(
        train=False,
        root_dir=dataset_root_dir,
        transform=transforms.Compose([Rescale((256, 256)),
                                      ToTensor()]))
    test_dataloader = DataLoader(hot_dog_dataset_test,
                                 batch_size=4,
                                 shuffle=True,
                                 num_workers=4)
    """
        SETUP CNN MODEL
    """
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print("DEVICE WILL BE USED: ", device)

    classes = ('not hotdog', 'hotdog')

    class Net(nn.Module):
        def __init__(self):
            super(Net, self).__init__()
            self.conv1 = nn.Conv2d(3, 16, 3)
            self.conv2 = nn.Conv2d(16, 32, 3, padding=1)
            self.conv3 = nn.Conv2d(32, 64, 3)

            self.pool = nn.MaxPool2d(2, 2)

            self.fc1 = nn.Linear(64 * 61 * 61, 100)
            self.fc2 = nn.Linear(100, 100)
            self.fc3 = nn.Linear(100, 2)

        def forward(self, x):
            x = self.pool(F.relu(self.conv1(x)))
            x = self.pool(F.relu(self.conv2(x)))
            x = F.relu(self.conv3(x))
            x = x.view(-1, 64 * 61 * 61)
            x = F.relu(self.fc1(x))
            x = F.relu(self.fc2(x))
            x = self.fc3(x)
            return x

    net = Net()
    net = net.to(device)

    if len(restore_model) > 0:
        net.load_state_dict(torch.load(restore_model))
        print("Model {} restored".format(restore_model))
    """
        SETUP LOSS FUNCTION
    """
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
    """
        START TRAINING
    """
    last_accuracy = None
    epoch = 0
    #for epoch in range(1):  # loop over the dataset multiple times
    while True:
        running_loss = 0.0
        for i, data in enumerate(train_dataloader, 0):
            # get the inputs
            inputs, labels = data['image'].float(), data['label']
            inputs, labels = inputs.to(device), labels.to(device)

            # zero the parameter gradients
            optimizer.zero_grad()

            # forward + backward + optimize
            outputs = net(inputs)
            loss = criterion(outputs, labels.long().view(4))
            loss.backward()
            optimizer.step()

            # print statistics
            print('.', end='')
            running_loss += loss.item()
            if i % 100 == 99:  # print every 100 mini-batches
                print('[%d, %5d] loss: %.3f' %
                      (epoch + 1, i + 1, running_loss / 100))
                running_loss = 0.0
        epoch += 1

        for dl, type in zip([test_dataloader, train_dataloader],
                            ['test', 'train']):
            correct = 0
            total = 0
            with torch.no_grad():
                for data in dl:
                    images, labels = data['image'].float(), data['label'].long(
                    ).view(4)
                    images, labels = images.to(device), labels.to(device)

                    outputs = net(images)
                    total += labels.size(0)
                    for id, prediction in enumerate(outputs.data):
                        res = torch.nn.functional.softmax(prediction, dim=0)
                        _, rid = torch.max(res, 0)
                        if rid == labels[id]:
                            correct += 1

                    #correct += (predicted == labels).sum().item()
            cur_accuracy = (100. * correct / float(total))
            print('Accuracy of the network on the ' + type + ' set with ' +
                  str(total) + ' test images: %f %%' % cur_accuracy)

        torch.save(net.state_dict(), './model-frozen-{}'.format(epoch))

    print('Finished Training')
    """
Exemplo n.º 7
0
    if download:
        download_data([
            "cat", "envelope", "eyeglasses", "mushroom", "star",
            "baseball bat", "t-shirt", "car", "fish", "snail"
        ])
        combine_data(100000)

    # We can use an image folder dataset the way we have it setup.
    # Create the dataset
    dataset = QuickdrawDataset(
        datapath="data/X.npy",
        targetpath="data/y.npy",
        transform=transforms.Compose([
            transforms.Resize((28, 28)),
            transforms.ToTensor(),
            Rescale(),
            #transforms.Normalize((0.1575,), (0.3113,)), # Mean and std of the dataset
            #         transforms.Normalize((-0.6849,), (0.6227,))
        ]))

    # Create the dataloader
    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=batch_size,
                                             shuffle=True,
                                             num_workers=workers)

    netG = Generator().to(device)
    netG.apply(weights_init)

    netD = Discriminator().to(device)
    netD.apply(weights_init)
Exemplo n.º 8
0
def vgg_train(dataset_root_dir: str, restore_model: str, dump_to_onnx: str):
    """ """
    """
        LOAD TRAIN & TEST DATASETS 
    """
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    hot_dog_dataset_train = HotDogsDataset(
        train=True,
        root_dir=dataset_root_dir,
        transform=transforms.Compose([
            Rescale((224, 224)),  #normalize,
            ToTensor(),
        ]))
    train_dataloader = DataLoader(hot_dog_dataset_train,
                                  batch_size=4,
                                  shuffle=True,
                                  num_workers=1)

    hot_dog_dataset_test = HotDogsDataset(
        train=False,
        root_dir=dataset_root_dir,
        transform=transforms.Compose([
            Rescale((224, 224)),  #normalize,
            ToTensor(),
        ]))
    test_dataloader = DataLoader(hot_dog_dataset_test,
                                 batch_size=4,
                                 shuffle=True,
                                 num_workers=4)
    """
        SETUP CNN MODEL
    """
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print("DEVICE WILL BE USED: ", device)

    classes = ('not hotdog', 'hotdog')

    net = VggNet(torchvision.models.vgg16_bn(True))
    for param in list(net.parameters())[:-2]:
        param.requiers_grad = False

    if torch.cuda.device_count() > 1:
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        net = nn.DataParallel(net)

    net.to(device)

    # if restore_model is not None:
    #     net.load_state_dict(torch.load(restore_model, map_location={'cuda:0': 'cpu'}))
    #     print("Model {} restored".format(restore_model))

    print(net)

    if len(dump_to_onnx) > 0:
        from torch.autograd import Variable
        dummy_input = Variable(torch.randn(1, 3, 224, 224), requires_grad=True)
        torch.onnx.export(net,
                          dummy_input,
                          "{}.onnx".format(dump_to_onnx),
                          export_params=True,
                          verbose=True)
        print("Saved ONNX model as {}.onnx".format(dump_to_onnx))
        return
    """
        SETUP LOSS FUNCTION
    """
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
    """
        START TRAINING
    """
    last_accuracy = None
    epoch = 0
    # for epoch in range(1):  # loop over the dataset multiple times
    while True:
        running_loss = 0.0
        for i, data in enumerate(train_dataloader, 0):
            # get the inputs
            inputs, labels = data['image'].float(), data['label']
            inputs, labels = inputs.to(device), labels.to(device)

            # zero the parameter gradients
            optimizer.zero_grad()

            # forward + backward + optimize
            outputs = net(inputs)

            print("Outside: input size", inputs.size(), "output_size",
                  outputs.size())

            loss = criterion(outputs, labels.long().view(4))
            loss.backward()
            optimizer.step()

            # print statistics
            print('.', end='')
            running_loss += loss.item()
            if i % 100 == 99:  # print every 100 mini-batches
                print('[%d, %5d] loss: %.3f' %
                      (epoch + 1, i + 1, running_loss / 100))
                running_loss = 0.0
        epoch += 1

        for dl, type in zip([test_dataloader, train_dataloader],
                            ['test', 'train']):
            correct = 0
            total = 0
            with torch.no_grad():
                for data in dl:
                    images, labels = data['image'].float(), data['label'].long(
                    ).view(4)
                    images, labels = images.to(device), labels.to(device)

                    outputs = net(images)
                    total += labels.size(0)
                    for id, prediction in enumerate(outputs.data):
                        res = torch.nn.functional.softmax(prediction, dim=0)
                        _, rid = torch.max(res, 0)
                        if rid == labels[id]:
                            correct += 1

            cur_accuracy = (100. * correct / float(total))
            print('Accuracy of the network on the ' + type + ' set with ' +
                  str(total) + ' test images: %f %%' % cur_accuracy)

        torch.save(net.state_dict(), './model-frozen-{}'.format(epoch))

    print('Finished Training')