x = self.relu(x)
    x = self.maxpool(x)

    x = self.layer1(x)
    x = self.layer2(x)
    x = self.layer3(x)
    x = self.layer4(x)

    x = self.avgpool(x)
    x = x.view(x.size(0), -1)
    # x = self.fc(x)
    return x


data_set = INaturalistDataset(
    base_folder + "data_preprocessed_" + str(size) + "/",
    base_folder + "annotations/train2017_vis.json", transform, False)
data_loader = torch.utils.data.DataLoader(data_set,
                                          batch_size=batch_size,
                                          shuffle=True)

model = torch.load(open(model_file, "rb"))
model.cuda = False
model.forward = new_forward

N = len(data_loader) * batch_size

X = np.zeros((N, 2048))
y = np.zeros((N, ))
for i, (data, targets) in enumerate(data_loader):
Beispiel #2
0
from preprocessing.inaturalist_dataset import INaturalistDataset
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import transforms
from torch.autograd import Variable

data_dir = './data_min2/'
annotations_dir = './annotations/modular_network/Mammalia/'
train_annotations = '{}train2017_min.json'.format(annotations_dir)
val_annotations = '{}val2017_min.json'.format(annotations_dir)

inaturalist = INaturalistDataset(data_dir,
                                 train_annotations,
                                 transform=transforms.ToTensor())
all_ids = inaturalist.all_ids
# images, targets = inaturalist.get_images(all_ids)

batch_size = 10
train_loader = torch.utils.data.DataLoader(inaturalist,
                                           batch_size=batch_size,
                                           shuffle=True)


class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(3, 5, kernel_size=10)
        self.conv2 = nn.Conv2d(5, 10, kernel_size=10)
epochs = 1
log_interval = 10
loss = nn.CrossEntropyLoss()
output_categories = 3
optimizer = optim.Adam

# set directories
data_dir = './data_min_preprocessed_224/'
annotations_dir = './annotations/modular_network/Mammalia/'
train_annotations = '{}train2017_min.json'.format(annotations_dir)
val_annotations = '{}val2017_min.json'.format(annotations_dir)

# create data sets
applied_transformation = transforms.Compose([transforms.ToTensor()])
inaturalist_train = INaturalistDataset(data_dir,
                                       train_annotations,
                                       transform=applied_transformation,
                                       modular_network_remap=False)
inaturalist_val = INaturalistDataset(data_dir,
                                     val_annotations,
                                     transform=applied_transformation,
                                     modular_network_remap=False)

# create loaders for the data sets
train_loader = torch.utils.data.DataLoader(inaturalist_train,
                                           batch_size=batch_size,
                                           shuffle=True)
val_loader = torch.utils.data.DataLoader(inaturalist_val,
                                         batch_size=batch_size,
                                         shuffle=True)

# get pre-trained model, change FC layer
                                                            100. * batch_idx * len(data)/ len(dataset_loader.dataset)),
                  end='\r')

    loss_value /= len(val_loader.dataset)

    # final log
    print('\nEvaluation results:\nAverage loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
        loss_value, correct, len(dataset_loader.dataset),
        100. * correct / len(dataset_loader.dataset)))


if __name__ == '__main__':

    # training
    print("\n\nLoading training set...")
    inaturalist_train = INaturalistDataset(data_dir, train_annotations, transform=None,
                                           modular_network_remap=False)
    labels = numpy.zeros(len(inaturalist_train))    
    stacked = numpy.zeros((len(inaturalist_train),268203))
    for i, photo_data in enumerate(inaturalist_train):
        #print(numpy.shape(numpy.array(photo_data[0]).flatten()))
        stacked[i] = numpy.array(photo_data[0]).flatten()
        labels[i] = photo_data[1][1]
        #for component in (photo_data[1]):
        #    print("Class: ", component)
    print(numpy.shape(stacked))
    print(stacked[0])
    print("Labels: ", labels)
    """
    train_loader = torch.utils.data.DataLoader(inaturalist_train, batch_size=batch_size, shuffle=True)
    print("Starting training (%d epochs)" % epochs)
    for epoch_count in range(1, epochs + 1):
Beispiel #5
0
            model_parameters[parameter_name] = parameter_value

    return torch.load(open(models_base_folder + filename, "rb")), model_parameters


if __name__ == '__main__':

    print("Loading models...")
    models = []
    for path in pth_filenames:
        model, parameters = load_model(path)
        models.append(model)
    print("done.")

    # print(type(model))

    # produce test loader
    input_size = 224
    inaturalist_test = INaturalistDataset(test_dir(input_size), test_annotations,
                                          transform=applied_transformations,
                                          modular_network_remap=False)
    test_loader = torch.utils.data.DataLoader(inaturalist_test, batch_size=test_batch_size)

    # find loss
    if parameters['loss'] == 'CrossEntropyLoss':
        loss = torch.nn.CrossEntropyLoss()
    else:
        raise ValueError("Invalid loss '%s'" % parameters['loss'])

    evaluate(models, loss, test_loader)