Exemple #1
0
def train(model_name="InceptionV3") :
    csv_map = {"train": "trainingCSV.csv", "val": "validationCSV.csv"}

    data_transforms = {
        'train': transforms.Compose([
            transforms.ToTensor()
        ]),
        'val': transforms.Compose([
            transforms.ToTensor()
        ]),
    }

    image_datasets = {x: CircuitBoardImageDataset(
        annotations_file=csv_map[x],
        img_dir= home + "\\data\\" + x,
        transform=transforms.ToTensor()
        )
                    for x in ['train', 'val']}

    dataloaders = {x: DataLoader(image_datasets[x], batch_size=4, shuffle=True)
                for x in ['train', 'val']}

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    
    if model_name == "InceptionV3" :
        model = models.inception_v3(pretrained=True)
        model.aux_logits=False
    elif model_name == "resnet34" :
        model = models.resnet34(pretrained=True)
    elif model_name == "vgg16" :
        model = models.vgg16(pretrained=True)
        for param in model.parameters():
            param.requires_grad = False
        model.classifier[6] = nn.Linear(4096, 6)
    else :
        model = models.resnet18(pretrained=True)
    
    if model_name != "vgg16" :
        for param in model.parameters():
            param.requires_grad = False
        num_ftrs = model.fc.in_features
        model.fc = nn.Linear(num_ftrs, 6)

    model = model.to(device)
    criterion = nn.CrossEntropyLoss()

    optimizer_conv = torch.optim.SGD(filter(lambda x: x.requires_grad, model.parameters()), lr=0.001, momentum=0.9)
    exp_lr_scheduler = lr_scheduler.StepLR(optimizer_conv, step_size=7, gamma=0.1)


    dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
    training_model = nn_model(datasets, dataloaders, dataset_sizes, LABEL_MAP)
    model_conv = training_model.train_model(model, criterion, optimizer_conv,
                            exp_lr_scheduler, num_epochs=10)

    # Display Predictions for trained model
    # training_model.visualize_model(model_conv)
    # plt.show()

    save_model(model_conv, model_name)
Exemple #2
0
output_size = 1
path = 'projekt1/classification/'
path1 = 'projekt1-oddanie/clasification/'

#train_f_name=path+class_train_files[5]
#test_f_name=path+class_test_files[5]
#pliki na oddaniu:
train_f_name = path1 + pres_class_train[5]
test_f_name = path1 + pres_class_test[5]
np.random.seed(323)
train_data = csv_data_read(train_f_name)
test_data = csv_data_read(test_f_name)

m1 = nn_model([2, 100, 100, 100, 4],
              with_bias=True,
              act_f=ReLU0,
              act_fprim=ReLU0prim,
              learn_ratio=0.0005,
              noise_level=0.1,
              classifier=True,
              change_m_ratio=0.90)

epochs = 50
vis = Visualization(m1)
learning_error = m1.fit(train_data, epochs=epochs, vis=vis)
plt.ioff()
vis.add_drawing(m1, learning_error, train_data, test_data)

print('Mean error over test data set: ' + str(m1.score(test_data)))
plt.show()
from data_loader import data_container
from nn_model import *
from matplotlib import pyplot as plt

#debug flag
debug = False
#create a data container, with 70% (same as that used while training) as training data
#batch size doesn't matter here, since we will only look at test data
data = data_container(70, 1, debug)
#set the mode of the data container to 'test'
data.set_mode('test')
#name of the model being used
model_name = 'nn_model'
#load the saved network
checkpoint_file = model_name + '_best.pth.tar'
net = nn_model()
#verify that we are using the correct model
if (type(net).__name__ != model_name):
    print("The intended neural net model is not being used")
    exit()

#load checkpoint
checkpoint = t.load(checkpoint_file)
net.load_state_dict(checkpoint['model'])
#set the mode of the model to eval
net.eval()

#padding required for the input images given to the network
image_padding = t.nn.ZeroPad2d((1, 2, 1, 1))
#transform to Normalize the input image
normalize = tv.transforms.Compose([tv.transforms.Normalize((5.9, ), (1.0, ))])
Exemple #4
0
XY = np.zeros((len(X), len(X[0]) + 1), dtype=np.float64)
XYt = np.zeros((len(Xt), len(Xt[0]) + 1), dtype=np.float64)
for i in range(len(X)):
    XY[i][0:len(X[i])] = X[i]

    XY[i][-1] = y[i]

for i in range(len(Xt)):
    XYt[i][0:len(Xt[i])] = Xt[i]

    XYt[i][-1] = yt[i]

m1 = nn_model([len(X[0]), 800, 400, 200, 10],
              with_bias=True,
              act_f=Sigm2,
              act_fprim=Sigmprim2,
              learn_ratio=0.0005,
              with_noise=False,
              classifier=True,
              change_m_ratio=0.99)

epochs = 30

vis = SimpleVis(m1)

learning_error = m1.fit(XY, epochs=epochs, vis=vis)

plt.plot(learning_error)

sc = m1.score(XYt)
print('Średni błąd na zbiorze testowym: ' + str(sc))
print('Efektywność na zbiorze testowym: ' + str(1 - sc))
Exemple #5
0
path1='projekt1-oddanie/regression/'
train_f_name=path+regr_train_files[5]
test_f_name=path+regr_test_files[5]
#train_f_name=path1+pres_regr_train[5]
#test_f_name=path1+pres_regr_test[5]
np.random.seed(343)
train_data=csv_data_read(train_f_name)
test_data=csv_data_read(test_f_name)



#m1=nn_model([1,3,1],with_bias=True,act_f=Sigm2,act_fprim=Sigmprim2,
#            learn_ratio=0.6)
#m1=nn_model([1,3,3,1],with_bias=True,act_f=Sigm2,act_fprim=Sigmprim2,
#            learn_ratio=0.05)
m1=nn_model([1,3,3,3,1],with_bias=True,act_f=ReLU0,act_fprim=ReLU0prim,
            learn_ratio=0.002,noise_level=0.2)
#ładny do cube:
#m1=nn_model([1,2,3,2,1],with_bias=True,act_f=Sigm2,act_fprim=Sigmprim2,
#            learn_ratio=0.6)
#też dobry do cube
#m1=nn_model([1,4,4,4,1],with_bias=True,
#            learn_ratio=0.2,bias=0.5)
#m1=nn_model([1,4,4,1],with_bias=True,
#            learn_ratio=0.1,bias=0.5)

#epochs=max(1,int(60000/len(train_data)))
epochs=50
vis=Visualization(m1)
learning_error=m1.fit(train_data,epochs=epochs,vis=vis)
plt.ioff()
vis.add_drawing(m1,learning_error,train_data,test_data)