x = F.relu(self.conv2(x))
        x = self.pool2(x)
        x = self.netvlad(x)
        x = self.fc(x)
        return x

PATH = "D:\\Coursework\\Sem 6\\CS6910\\Assignment 3\\"
classes = ["030.Fish_Crow", "041.Scissor_tailed_Flycatcher", "049.Boat_tailed_Grackle", "082.Ringed_Kingfisher", 
           "103.Sayornis", "114.Black_throated_Sparrow", "168.Kentucky_Warbler"]

DEVICE = torch.device("cpu" if torch.cuda.is_available() else "cpu")
BATCH_SIZE = 10
EPOCH = 5
LR = 1e-3

full_dataset = dataset(filename=os.path.join(PATH, "train.h5"))
train_size = int(len(full_dataset)*0.8)
valid_size = len(full_dataset) - train_size
train_dataset, valid_dataset = torch.utils.data.random_split(full_dataset, [train_size, valid_size])
print("Train Data Size:", len(train_dataset))
print("Valid Data Size:", len(valid_dataset))
train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, num_workers=0, shuffle=True)
valid_loader = DataLoader(valid_dataset, batch_size=1, num_workers=0, shuffle=False)

net = CNN()

criterion = nn.CrossEntropyLoss()
mse = nn.MSELoss()
optimizer = optim.Adam(net.parameters(), lr=LR)

net.to(DEVICE)
Esempio n. 2
0
#!/usr/bin/env python

from sklearn import model_selection
import load_dataset as load

# split-out validation dataset
array = load.dataset().values

# Random constant
SEED = 7
# 80% to train and 20% to validate
validation_size = 0.20
# values
X = array[:, 0:4]
# names
Y = array[:, 4]

X_train, X_validation, Y_train, Y_validation = model_selection.train_test_split(
    X, Y, test_size=validation_size, random_state=SEED)

if __name__ == '__main__':
    # lines extracted
    print(X_validation)
    # names extracted
    print(Y_validation)
Esempio n. 3
0
    x = goog.avgpool(x)
    # N x 1024 x 1 x 1
    x = torch.flatten(x, 1)
    # N x 1024
    return x

PATH = ""
classes = ["030.Fish_Crow", "041.Scissor_tailed_Flycatcher", "049.Boat_tailed_Grackle", "082.Ringed_Kingfisher", 
           "103.Sayornis", "114.Black_throated_Sparrow", "168.Kentucky_Warbler"]

DEVICE = torch.device("cpu" if torch.cuda.is_available() else "cpu")
BATCH_SIZE = 10
EPOCH = 5
LR = 1e-3

full_dataset = dataset("traindata.h5")
train_size = int(len(full_dataset)*0.8)
valid_size = len(full_dataset) - train_size
train_dataset, valid_dataset = torch.utils.data.random_split(full_dataset, [train_size, valid_size])
print("Train Data Size:", len(train_dataset))
print("Valid Data Size:", len(valid_dataset))
train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, num_workers=0, shuffle=True)
valid_loader = DataLoader(valid_dataset, batch_size=1, num_workers=0, shuffle=False)

vgg16 = models.vgg16(pretrained=True)
vgg16.eval()
googlenet = models.googlenet(pretrained=True)
googlenet.eval()

model = "vgg"
Esempio n. 4
0
#!/usr/bin/env python

from matplotlib import pyplot
from pandas.tools.plotting import scatter_matrix
import load_dataset as load

dataset = load.dataset()

# box and whisker plots
dataset.plot(kind='box', subplots=True, layout=(2, 2), sharex=False, sharey=False)
# histograms
dataset.hist()
# scatter plot matrix
scatter_matrix(dataset)

if __name__ == '__main__':
    # open ui with graphs
    pyplot.show()