Exemplo n.º 1
0
def evalSet(filepath, beta, network_path=None):
    #get data
    loader, v_loader = getDatasets(filepath,
                                   8,
                                   validation_split=0,
                                   reduction=1,
                                   raw=False,
                                   augment=False)
    #get model
    if network_path:
        model = torch.load(network_path)
    else:
        model = facenetVAE()
    #calculate loss
    total_loss = 0
    total_mse = 0
    total_kld = 0
    model.eval()
    for inputs in loader:
        inputs = inputs.float().cuda()
        outputs, mu, logvar = model(inputs)
        lost, mselost, kldlost = loss_fn(outputs, inputs, mu, logvar, beta)
        total_loss += lost.detach().cpu().item()
        total_mse += mselost.detach().cpu().item()
        total_kld += kldlost.detach().cpu().item()
    print("Total loss for testset is: " + str(total_loss) + " MSE loss: " +
          str(total_mse) + " KLD loss: " + str(total_kld))
Exemplo n.º 2
0
def evalSet(filepath, network_path=None):
    #get data
    loader, v_loader = getDatasets(filepath,
                                   8,
                                   validation_split=0,
                                   reduction=1,
                                   raw=False,
                                   augment=False)
    #get model
    if network_path:
        model = torch.load(network_path)
    else:
        model = facenetAE()
    #calculate loss
    total_loss = 0
    model.eval()
    for inputs in loader:
        inputs = inputs.float().cuda()
        outputs = model(inputs)
        lost = loss_fn(outputs, inputs)
        total_loss += lost.detach().cpu().item()
    print("Total loss for testset is: " + str(total_loss))
Exemplo n.º 3
0
def objective(trial):
    epochs = 1000
    data_path = "../data/trainingset_final_v2.csv"
    #FINDING VALUES TO TEST IN THIS TRIAL
    learning_rate = 0.0001  #trial.suggest_loguniform("learning_rate", 1e-5, 1e-3)
    batch_size = 8  #trial.suggest_int("batch_size",8,32,8)
    layer_amount = trial.suggest_int("layer_amount", 4, 6, 1)
    layer_size = trial.suggest_categorical("layer_size",
                                           [32, 64, 128])  #,256])
    extradense = False  #trial.suggest_categorical("extradense",[True,False])
    beta = trial.suggest_int("beta", 1, 20, 1)
    print("Learning Rate: " + str(learning_rate))
    print("BATCH SIZE: " + str(batch_size))
    print("Layer amount: " + str(layer_amount))
    print("Layer size: " + str(layer_size))
    print("extradense: " + str(extradense))
    print("beta: " + str(beta))
    train_loader, val_loader = getDatasets(data_path,
                                           batch_size,
                                           reduction=0.25)
    model = facenetVAE(layer_amount=layer_amount,
                       layer_size=layer_size,
                       extradense=extradense).cuda()
    es = EarlyStopper(10, 0.1, str("VAE_earlystopsave_4_simple_v3.pth"), False)
    optimizer = optim.Adam(model.parameters(), lr=learning_rate)
    #TRAINING
    training_losses = []
    validation_losses = []
    val_mse = []
    for epoch in range(epochs):
        total_train_loss = 0
        model.train()
        for inputs in train_loader:
            optimizer.zero_grad()
            inputs = inputs.float().cuda()
            outputs, mu, logvar = model(inputs)
            lost, mselost, kldlost = loss_fn(outputs, inputs, mu, logvar, beta)
            lost.backward()
            optimizer.step()
            total_train_loss += lost
        #print("train loss " + str(total_train_loss.detach().cpu().item()))
        training_losses.append(total_train_loss.detach().cpu().item())
        stop_epoch = epoch
        #VALIDATION
        if epoch % 10 == 0:
            model.eval()
            with torch.no_grad():
                total_val_loss = 0
                total_mse_loss = 0
                for vinputs in val_loader:
                    vinputs = vinputs.float().cuda()
                    voutputs, vmu, vlogvar = model(vinputs)
                    vlost, vmselost, vkldlost = loss_fn(
                        voutputs, vinputs, vmu, vlogvar, beta)
                    total_val_loss += vlost
                    total_mse_loss += vmselost
            validation_losses.append(total_val_loss.detach().cpu().item())
            val_mse.append(total_mse_loss.detach().cpu().item())
            print("Epoch " + str(epoch) + " with val loss " +
                  str(validation_losses[-1]))
            stop = es.earlyStopping(total_mse_loss, model)
            #EARLY STOPPING
            if stop:
                print("TRAINING FINISHED AFTER " + str(epoch) +
                      " EPOCHS. K BYE.")
                break
            trial.report(total_val_loss, epoch)
    if (int(stop_epoch / 10) - 10) > 0 and (int(stop_epoch / 10) -
                                            10) < len(validation_losses):
        final_loss = validation_losses[int(stop_epoch / 10) -
                                       10]  #10 coz of patience = 10
        final_mse_loss = val_mse[int(stop_epoch / 10) - 10]
    else:
        final_loss = validation_losses[-1]
        final_mse_loss = val_mse[-1]
    #WRITE OPTIM
    filename = str("vae_optim_v2.txt")
    file = open(filename, 'a')
    file.write("layer_amount:" + str(layer_amount))
    file.write("layer_size:" + str(layer_size))
    file.write("extradense:" + str(extradense))
    file.write("beta:" + str(beta))
    file.write("final_loss:" + str(final_loss))
    file.write("final_mse_loss:" + str(final_mse_loss))
    file.write('\n')
    file.close()
    return final_loss
Exemplo n.º 4
0
def trainNet(epochs,
             learning_rate,
             batch_size,
             data_path,
             layers,
             layer_size,
             beta=1,
             save=True):
    train_loader, val_loader = getDatasets(data_path, batch_size, reduction=1)
    model = facenetVAE(layer_amount=layers, layer_size=layer_size).cuda()
    es = EarlyStopper(10, 0.1, str("VAE_earlystopsave_4_simple_v3_1.pth"),
                      save)
    optimizer = optim.Adam(model.parameters(), lr=learning_rate)
    #TRAINING
    training_losses = []
    validation_losses = []
    for epoch in range(epochs):
        total_train_loss = 0
        model.train()
        for inputs in train_loader:
            optimizer.zero_grad()
            inputs = inputs.float().cuda()
            outputs, mu, logvar = model(inputs)
            lost, mselost, kldlost = loss_fn(outputs, inputs, mu, logvar, beta)
            lost.backward()
            optimizer.step()
            total_train_loss += lost
        print("train loss " + str(total_train_loss.detach().cpu().item()))
        training_losses.append(total_train_loss.detach().cpu().item())
        stop_epoch = epoch
        #VALIDATION
        if epoch % 10 == 0:
            model.eval()
            with torch.no_grad():
                total_val_loss = 0
                for vinputs in val_loader:
                    vinputs = vinputs.float().cuda()
                    voutputs, vmu, vlogvar = model(vinputs)
                    vlost, vmselost, vkldlost = loss_fn(
                        voutputs, vinputs, vmu, vlogvar, beta)
                    total_val_loss += vlost
            validation_losses.append(total_val_loss.detach().cpu().item())
            print("Epoch " + str(epoch) + " with val loss " +
                  str(validation_losses[-1]))
            stop = es.earlyStopping(total_val_loss, model)
            #EARLY STOPPING
            if stop:
                print("TRAINING FINISHED AFTER " + str(epoch) +
                      " EPOCHS. K BYE.")
                break
    #SAVE LOSSES TO FILE
    if save:
        filename = str("VAE_losses_" + str(layers) + "_" + str(layer_size) +
                       "_simple_v3_1.txt")
        file = open(filename, 'w')
        file.write("trained with learning rate " + str(learning_rate) +
                   ", batch size " + str(batch_size) + ", planned epochs " +
                   str(epochs) + " but only took " + str(stop_epoch) +
                   " epochs.")
        file.write("training_losses")
        file.write('\n')
        for element in training_losses:
            file.write(str(element))
            file.write('\n')
        file.write("validation_losses")
        file.write('\n')
        for element in validation_losses:
            file.write(str(element))
            file.write('\n')
        file.close()
Exemplo n.º 5
0
def objective(trial):
    epochs = 300
    data_path="../data/trainingset_final_v2.csv"
    learning_rate = 0.0001 #trial.suggest_loguniform("learning_rate", 1e-5, 1e-3)
    batch_size = 4 #trial.suggest_int("batch_size",8,32,8)
    layer_amount = trial.suggest_int("layer_amount",4,6,1)
    layer_size = trial.suggest_categorical("layer_size",[32,64,128])#,256])
    extradense = False #trial.suggest_categorical("extradense",[True,False])
    factor = trial.suggest_int("factor",1,30)
    print("BATCH SIZE: " + str(batch_size))
    print("learning rate: " + str(learning_rate))
    print("layer amount: " + str(layer_amount))
    print("layer size: " + str(layer_size))
    print("extradense: " + str(extradense))
    print("Factor " + str(factor))
    train_loader,val_loader,train_set,val_set = getDatasets(data_path,batch_size,reduction=0.25,raw=True,findDoubles=True)
    model = facenetVAE().cuda()
    es = EarlyStopper(10,0.1,str("VAE_earlystopsave_siamese_simple_v2.pth"),False)
    optimizer = optim.Adam(model.parameters(), lr=learning_rate)
    #TRAINING
    training_losses = []
    siamese_losses = []
    validation_losses = []
    validation_accs = []
    for epoch in range(epochs):
        total_train_loss = 0
        model.train()
        siamese_loss = 0
        for b in range(int(math.ceil(train_set.getDatasetSize()/batch_size))):
            optimizer.zero_grad()
            b1,b2,l = train_set.getDoubleBatch(batch_size,b)
            o = model.forward_siamese(b1,b2)
            loss_s = loss_siam(o,l)
            o1,mu1,log1 = model.forward(b1)
            loss_t,_,_ = loss_fn(o1,b1,mu1,log1,beta)
            loss = loss_s * factor + loss_t
            loss.backward()
            optimizer.step()
            siamese_loss += loss_s
            total_train_loss += loss
        siamese_losses.append(siamese_loss.detach().cpu().item())
        training_losses.append(total_train_loss.detach().cpu().item()) 
        print("siamese loss "  + str(siamese_loss.detach().cpu().item()))
        print("train loss " + str(total_train_loss.detach().cpu().item()))
        stop_epoch = epoch
        #VALIDATION 
        if epoch % 10 == 0: 
            model.eval()
            vpredictions = []
            vlabels = []
            with torch.no_grad():
                total_val_loss = 0
                for b in range(int(math.ceil(val_set.getDatasetSize()/batch_size))):
                    b1,b2,l = val_set.getDoubleBatch(batch_size,b)
                    o = model.forward_siamese(b1,b2)
                    vlabels.extend(l[:,0].tolist())
                    vpredictions.extend(o[:,0].tolist())
                    loss_s = loss_siam(o,l)
                    o1,vmu,vlog = model.forward(b1)
                    loss_t,_,_ = loss_fn(o1,b1,vmu,vlog,beta)
                    loss = loss_s * factor + loss_t
                    total_val_loss += loss
                va = accuracy_score(vlabels,np.where(np.array(vpredictions) < 0.5, 0.0,1.0))
                validation_accs.append(va)
            validation_losses.append(total_val_loss.detach().cpu().item())
            print("Epoch " + str(epoch) + " with val loss " + str(validation_losses[-1]) + " and val accuracy " + str(validation_accs[-1]))
            trial.report(total_val_loss,epoch)
            stop = es.earlyStopping(total_val_loss,model)
            #EARLY STOPPING
            if stop:
                print("TRAINING FINISHED AFTER " + str(epoch) + " EPOCHS. K BYE.")
                break
    if (int(stop_epoch/10)-10) < len(validation_losses):
        final_loss = validation_losses[int(stop_epoch/10)-10] #10 coz of patience = 10
        final_acc = validation_accs[int(stop_epoch/10)-10]
    else:
        final_loss = validation_losses[-1]
    #WRITE OPTIM 
    filename = str("vae_optim_v2.txt")
    file=open(filename,'a')
    file.write("factor:" + str(factor))
    file.write(" final_acc:" + str(final_acc))
    file.write(" final_loss:" + str(final_loss)) 
    file.write('\n') 
    file.close()
    return final_loss
Exemplo n.º 6
0
def trainNet(epochs,learning_rate,batch_size,data_path,layers,layer_size,factor=15,beta=1,save=True,trainFrom=False):
    train_loader,val_loader,train_set,val_set = getDatasets(data_path,batch_size,raw=True,findDoubles=True)
    savepath= str("VAE_earlystopsave_simple_siamese_v3.pth")
    if trainFrom:
        trainFromEpoch = np.load("vae_current_epoch.npy")[0]
        model = torch.load(savepath)
        training_losses = np.load("vae_earlystopsave_training_losses.npy")
        mse_losses = np.load("vae_earlystopsave_mse_losses.npy")
        kld_losses = np.load("vae_earlystopsave_kld_losses.npy")
        siamese_losses = np.load("vae_earlystopsave_siamese_losses.npy")
        validation_losses = np.load("vae_earlystopsave_validation_losses.npy")
        validation_accs = np.load("vae_earlystopsave_validation_accs.npy")       
    else:
        model = facenetVAE(layer_amount=layers,layer_size=layer_size).cuda()
        training_losses = np.array([])
        mse_losses = np.array([])
        kld_losses = np.array([])
        siamese_losses = np.array([])
        validation_losses = np.array([])
        validation_accs = np.array([])
        trainFromEpoch = 0
    optimizer = optim.Adam(model.parameters(), lr=learning_rate)
    es = EarlyStopper(10,1,savepath,save)
    #Training
    for epoch in range(trainFromEpoch,epochs):
        total_train_loss = 0
        model.train()
        siamese_loss = 0
        total_mse = 0
        total_kld = 0
        if epoch % 2 == 0: #first half
            datasethalf = range(0,int(math.ceil(train_set.getDatasetSize()/batch_size)/2))
        else: #second half
            datasethalf = range(int(math.ceil(train_set.getDatasetSize()/batch_size)/2),int(math.ceil(train_set.getDatasetSize()/batch_size)))
        for b in datasethalf:
            optimizer.zero_grad()
            b1,b2,l = train_set.getDoubleBatch(batch_size,b)
            #o = model.forward_siamese(b1,b2)
            o1,mu1,log1,d = model.full_forward(b1,b2)
            loss_s = loss_siam(d,l)
            loss_t,mse,kld = loss_fn(o1,b1,mu1,log1,beta)
            total_mse += mse
            total_kld += kld
            loss = loss_s * factor + loss_t
            loss.backward()
            optimizer.step()
            siamese_loss += loss_s
            total_train_loss += loss
        siamese_losses = np.append(siamese_losses,siamese_loss.detach().cpu().item())
        training_losses = np.append(training_losses,total_train_loss.detach().cpu().item()) 
        mse_losses = np.append(mse_losses,total_mse.detach().cpu().item())
        kld_losses = np.append(kld_losses,total_kld.detach().cpu().item())
        print("siamese loss "  + str(siamese_loss.detach().cpu().item()))
        print("train loss " + str(total_train_loss.detach().cpu().item()))
        stop_epoch = epoch
        #VALIDATION 
        if epoch % 10 == 0: 
            model.eval()
            vpredictions = []
            vlabels = []
            with torch.no_grad():
                total_val_loss = 0
                for b in range(int(math.ceil(val_set.getDatasetSize()/batch_size))):
                    b1,b2,l = val_set.getDoubleBatch(batch_size,b)
                    vlabels.extend(l[:,0].tolist())
                    o1,vmu,vlog,d = model.full_forward(b1,b2)
                    vpredictions.extend(d[:,0].tolist())
                    loss_s = loss_siam(d,l)
                    loss_t,_,_ = loss_fn(o1,b1,vmu,vlog,beta)
                    loss = loss_s * factor + loss_t
                    total_val_loss += loss
                va = accuracy_score(vlabels,np.where(np.array(vpredictions) < 0.5, 0.0,1.0))
                validation_accs = np.append(validation_accs,va)
            validation_losses = np.append(validation_losses,total_val_loss.detach().cpu().item())
            print("Epoch " + str(epoch) + " with val loss " + str(validation_losses[-1]) + " and val accuracy " + str(validation_accs[-1]))
            stop = es.earlyStopping(total_val_loss,model)
            #EARLY STOPPING
            if stop:
                print("TRAINING FINISHED AFTER " + str(epoch) + " EPOCHS. K BYE.")
                break
            else:
                if save:
                    np.save("vae_earlystopsave_training_losses.npy",training_losses)
                    np.save("vae_earlystopsave_mse_losses.npy",mse_losses)
                    np.save("vae_earlystopsave_kld_losses.npy",kld_losses)
                    np.save("vae_earlystopsave_siamese_losses.npy",siamese_losses)
                    np.save("vae_earlystopsave_validation_losses.npy",validation_losses)
                    np.save("vae_earlystopsave_validation_accs.npy",validation_accs)
                    np.save("vae_current_epoch.npy",np.array([epoch]))
    if (int(stop_epoch/10)-10) < len(validation_losses):
        final_loss = validation_losses[int(stop_epoch/10)-10] #10 coz of patience = 10
        final_acc = validation_accs[int(stop_epoch/10)-10]
    else:
        final_loss = validation_losses[-1]
    #SAVE LOSSES TO FILE
    if save == False or save == True:
        filename = str("VAE_losses_siamese_simple_v3.txt")
        file=open(filename,'w')
        file.write("trained with learning rate " + str(learning_rate) + ", batch size " + str(batch_size) + ", planned epochs " + str(epochs) + " but only took " + str(stop_epoch) + " epochs.")
        file.write("training_losses")
        file.write('\n')
        for element in training_losses:
            file.write(str(element))
            file.write('\n')
        file.write("siamese_losses")
        file.write('\n')
        for element in siamese_losses:
            file.write(str(element))
            file.write('\n')
        file.write("validation_losses")
        file.write('\n')
        for element in validation_losses:
            file.write(str(element))
            file.write('\n')   
        file.write("validation_accuracies")
        for element in validation_accs:
            file.write(str(element))
            file.write('\n')   
        file.close()
Exemplo n.º 7
0
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import os
from dataset import getDatasets

os.environ["CUDA_VISIBLE_DEVICES"] = "0"

data = getDatasets()
num_steps = 10000
batch_size = 100
lr_generator = 0.002
lr_discriminator = 0.002

image_dim = 128 * 128
noise_dim = 100

noise_input = tf.placeholder(tf.float32, shape=[None, noise_dim])
real_image_input = tf.placeholder(tf.float32, shape=[None, 128, 128, 3])

is_training = tf.placeholder(tf.bool)


def leakyrelu(x, alpha=0.2):
    return 0.5 * (1 + alpha) * x + 0.5 * (1 - alpha) * abs(x)


def generator(x, reuse=False):
    with tf.variable_scope('Generator', reuse=reuse):
        x = tf.layers.dense(x, units=16 * 16 * 256)
        x = tf.layers.batch_normalization(x, training=is_training)
Exemplo n.º 8
0
            edgecolor=classes_color[classes_index[box_class]],
            facecolor='none')
        ax.add_patch(rect)
    plt.show()


data_folder = './Data'
root_folder = './'
weights_path = './weights/weights.pt'

# Setting device
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Using device:', device)

# Loading Datasets
train_dataset, val_dataset, test_dataset = getDatasets(data_folder)

# Getting Loaders
train_loader, val_loader, test_loader = getLoaders(train_dataset, val_dataset,
                                                   test_dataset)

# Loading model
print('Loading Model...')
model = loadModelWithWeights(weights_path)
model = model.to(device)

model.eval()
with torch.no_grad():
    for img, label in test_loader:
        image = img.to(device)
        pred = model([image])