Beispiel #1
0
def create_data(set_name):

    if set_name == "training" or set_name == "test":
        params = dicto.load_("params.yml")

        df = dataset.get_base_df("data/base")

        split = int(len(df) * params.preprocessing.split)
        training_df = df.iloc[:split]
        test_df = df.iloc[split:]

        if set_name == "training":
            df = training_df
            df = pr.grow_dataset(df, params=params)

            # df = df.iloc[:100]

            generator = utils.dataframe_batch_generator(
                df, params.preprocessing.batch_size)
            generator = pr.read_images(generator)
            generator = pr.data_augmentation(generator, params=params)

            pr.create_tfrecords(generator, "training_set.record", params)
        else:
            df = test_df

            generator = utils.dataframe_batch_generator(
                df, params.preprocessing.batch_size)
            generator = pr.read_images(generator)

            pr.create_tfrecords(generator, "training_set.record", params)
    else:
        raise Exception(
            "Please choose 'training' or 'test', got {set_name}".format(
                set_name=set_name))
Beispiel #2
0
def main():
    trainx, trainy = read_images('../input/omniglot/images_background/')
    testx, testy = read_images('../input/omniglot/images_evaluation/')
    use_gpu = torch.cuda.is_available()
    trainx = torch.from_numpy(trainx).float()
    testx = torch.from_numpy(testx).float()
    if use_gpu:
        trainx = trainx.cuda()
        testx = testx.cuda()
    print(trainx.size(), testx.size())
    num_episode = 16000
    frame_size = 1000
    trainx = trainx.permute(0, 3, 1, 2)
    testx = testx.permute(0, 3, 1, 2)
    frame_loss = 0
    frame_acc = 0
    for i in range(num_episode):
        loss, acc = train_step(trainx, trainy, 5, 60, 5)
        frame_loss += loss.data
        frame_acc += acc.data
        if((i+1) % frame_size == 0):
            print("Frame Number:", ((i+1) // frame_size), 'Frame Loss: ', frame_loss.data.cpu().numpy().tolist() /
                  frame_size, 'Frame Accuracy:', (frame_acc.data.cpu().numpy().tolist() * 100) / frame_size)
            frame_loss = 0
            frame_acc = 0
    num_test_episode = 2000
    avg_loss = 0
    avg_acc = 0
    for _ in range(num_test_episode):
        loss, acc = test_step(testx, testy, 5, 60, 15)
        avg_loss += loss.data
        avg_acc += acc.data
    print('Avg Loss: ', avg_loss.data.cpu().numpy().tolist() / num_test_episode,
          'Avg Accuracy:', (avg_acc.data.cpu().numpy().tolist() * 100) / num_test_episode)
Beispiel #3
0
def main():
    # Reading the data
    trainx, trainy = read_images('../input/omniglot/images_background/')
    testx, testy = read_images('../input/omniglot/images_evaluation/')
    # Checking if GPU is available
    use_gpu = torch.cuda.is_available()
    # Converting input to pytorch Tensor
    trainx = torch.from_numpy(trainx).float()
    testx = torch.from_numpy(testx).float()
    if use_gpu:
        trainx = trainx.cuda()
        testx = testx.cuda()
    # Priniting the data
    print(trainx.size(), testx.size())
    # Set training iterations and display period
    num_episode = 16000
    frame_size = 1000
    trainx = trainx.permute(0, 3, 1, 2)
    testx = testx.permute(0, 3, 1, 2)

    # Initializing prototypical net
    protonet = PrototypicalNet(use_gpu)
    # Training loop
    frame_loss = 0
    frame_acc = 0
    for i in range(num_episode):
        loss, acc = train_step(protonet, trainx, trainy, 5, 60, 5)
        frame_loss += loss.data
        frame_acc += acc.data
        if ((i + 1) % frame_size == 0):
            print("Frame Number:", ((i + 1) // frame_size), 'Frame Loss: ',
                  frame_loss.data.cpu().numpy().tolist() / frame_size,
                  'Frame Accuracy:',
                  (frame_acc.data.cpu().numpy().tolist() * 100) / frame_size)
            frame_loss = 0
            frame_acc = 0

    # Test loop
    num_test_episode = 2000
    avg_loss = 0
    avg_acc = 0
    for _ in range(num_test_episode):
        loss, acc = test_step(protonet, testx, testy, 5, 60, 15)
        avg_loss += loss.data
        avg_acc += acc.data
    print('Avg Loss: ',
          avg_loss.data.cpu().numpy().tolist() / num_test_episode,
          'Avg Accuracy:',
          (avg_acc.data.cpu().numpy().tolist() * 100) / num_test_episode)

    # Using Pretrained Model
    protonet = load_weights('./protonet.pt', protonet, use_gpu)
Beispiel #4
0
def main():
    tb_writer = SummaryWriter()

    # Reading the data
    print("Reading background images")
    trainx, trainy = read_images(
        r'D:\_hackerreborn\Prototypical-Networks\input\omniglot\images_background'
    )
    print(trainx.shape)
    print(trainy.shape)

    print("Reading background images")
    testx, testy = read_images(
        r'D:\_hackerreborn\Prototypical-Networks\input\omniglot\images_evaluation'
    )
    print(testx.shape)
    print(testy.shape)

    # Checking if GPU is available
    use_gpu = torch.cuda.is_available()
    # Converting input to pytorch Tensor
    trainx = torch.from_numpy(trainx).float()
    testx = torch.from_numpy(testx).float()
    if use_gpu:
        trainx = trainx.cuda()
        testx = testx.cuda()
    # Priniting the data
    print(trainx.size(), testx.size())
    # Set training iterations and display period
    num_episode = 16000
    frame_size = 1000
    trainx = trainx.permute(0, 3, 1, 2)
    testx = testx.permute(0, 3, 1, 2)

    # Initializing prototypical net
    protonet = PrototypicalNet(use_gpu)
    optimizer = optim.SGD(protonet.parameters(), lr=0.01, momentum=0.99)

    # Training loop
    frame_loss = 0
    frame_acc = 0

    for i in range(num_episode):
        print("Train Episode Number : {0}".format(i))
        # if i > 10:
        #     break

        loss, acc = train_step(protonet, trainx, trainy, 5, 60, 5, optimizer)
        frame_loss += loss.data
        frame_acc += acc.data
        if (i + 1) % frame_size == 0:
            print("Frame Number:", ((i + 1) // frame_size), 'Frame Loss: ',
                  frame_loss.data.cpu().numpy().tolist() / frame_size,
                  'Frame Accuracy:',
                  (frame_acc.data.cpu().numpy().tolist() * 100) / frame_size)

            tb_writer.add_scalar(
                'frame_loss',
                frame_loss.data.cpu().numpy().tolist() / frame_size,
                ((i + 1) // frame_size))
            tb_writer.add_scalar(
                'frame_accuracy',
                (frame_acc.data.cpu().numpy().tolist() * 100) / frame_size,
                ((i + 1) // frame_size))

            frame_loss = 0
            frame_acc = 0
Beispiel #5
0
import torch
import numpy as np  # linear algebra
import pandas as pd  # data processing, CSV file I/O (e.g. pd.read_csv)
from matplotlib import pyplot as plt
import cv2
from tqdm import tqdm
import multiprocessing as mp
from preprocessing import read_images
from prototypicalNet import PrototypicalNet, train_step, test_step, load_weights
tqdm.pandas(desc="my bar!")


#def main():
    # Reading the data
trainx, trainy = read_images('./omniglot/images_background')
testx, testy = read_images('./omniglot/images_evaluation')
# Checking if GPU is available
print('finish read image')
use_gpu = torch.cuda.is_available()
use_gpu = False
# Converting input to pytorch Tensor
trainx = torch.from_numpy(trainx).float()
testx = torch.from_numpy(testx).float()
if use_gpu:
    trainx = trainx.cuda()
    testx = testx.cuda()
# Priniting the data
print(trainx.size(), testx.size())
# Set training iterations and display period
num_episode = 16000