コード例 #1
0
def load_images_and_evaluate1(model, args, dataFolder, numImages=None):
    #load images
    if numImages == None:
        #Load all images
        image_files, im = lm.load_im(dataFolder)
        label = lm.load_label(image_files)
    else:
        image_files, im_all = lm.load_im(dataFolder)
        im = im_all[0:numImages, :, :]
        label_all = lm.load_label(image_files)
        label = label_all[0:numImages, :, :]

    print 'Image Shape:' + str(im.shape)
    print 'Label Shape:' + str(label.shape)

    predicted = model.predict(im, batch_size=10)
    print 'Predicted Results Shape:' + str(predicted.shape)
    mse = find_mse(label, predicted)
    sortedIndices = np.argsort(mse)

    topImage = im[sortedIndices[0]]
    topLabel = label[sortedIndices[0]]
    topPredicted = predicted[0]
    topResults = np.concatenate((topImage, topLabel, topPredicted), axis=2)
    bottomImage = im[sortedIndices[sortedIndices.size - 1]]
    bottomLabel = label[sortedIndices[sortedIndices.size - 1]]
    bottomPredicted = predicted[sortedIndices[sortedIndices.size - 1]]
    bottomResults = np.concatenate((bottomImage, bottomLabel, bottomPredicted),
                                   axis=2)
    return topResults, bottomResults, im, label
コード例 #2
0
def load_model_images_and_evaluate(model,
                                   dataFolderPath,
                                   dataFolder='im',
                                   labelFolder='label',
                                   numImages=None):
    #load images
    print dataFolder
    print labelFolder
    if numImages == None:
        #Load all images
        image_files, im = lm.load_im(dataFolderPath, dataFolder)
        label = lm.load_label(image_files, dataFolder, labelFolder)
    else:
        image_files, im_all = lm.load_im(dataFolderPath, dataFolder, numImages)
        im = im_all[0:numImages, :, :]
        label_all = lm.load_label(image_files, dataFolder, labelFolder)
        label = label_all[0:numImages, :, :]

    print 'Image Shape:' + str(im.shape)
    print 'Label Shape:' + str(label.shape)

    predicted = model.predict(im, batch_size=10)
    imageCountNan = 0
    imageCountInf = 0
    for i in range(predicted.shape[0]):
        nancount = anynan(np.squeeze(predicted[i, :, :, :]))
        infcount = anyinf(np.squeeze(predicted[i, :, :, :]))
        if (nancount > 0):
            #print('predicted contains nan:'+str(nancount));
            imageCountNan = imageCountNan + 1

        if (infcount > 0):
            #print('predicted contains inf:'+str(infcount));
            imageCountInf = imageCountInf + 1

    print("Number of image with predicted containing Nan:" +
          str(imageCountNan))
    print("Number of image with predicted containing Inf:" +
          str(imageCountInf))

    print 'Predicted datatype'
    print predicted.dtype

    return predicted, im, label, image_files
コード例 #3
0
ファイル: evaluate.py プロジェクト: DanielGutmann/TextureDL
def main():

    "Initialize the model"
    if hasattr(args, 'load_path') and args.load_path is not None:
        print("Loading Model from" + args.load_path);
        fileName = args.load_path + '/' +'Keras_model_weights.h5';
        model = load_model(fileName);
        print("Model Loaded");
       
    else:
        print("Creating new model");
        model = create_model_seg();
        #set training parameters 
        sgd = opt.SGD(lr=0.0001, decay=0.0005, momentum=0.9, nesterov=True);
        model.compile(loss='mean_squared_error', optimizer='sgd');

    #load images
    dataFolder = os.getcwd() +'/data_prev';
    image_files,im = lm.load_im(dataFolder);
    print im.shape;
    label = lm.load_label(image_files);
    print label.shape;
    losses = model.evaluate(im,label,batch_size=10);
    predicted = model.predict(im,batch_size=10);
    #lm.save_results( predicted,image_files);
    print(predicted.shape);
    mse = find_mse(label,predicted);
    sortedIndices = np.argsort(mse);
        
    resultsFolder = dataFolder + '/results';
    errorFile = resultsFolder + '/mse.csv';
    
    if not os.path.exists(resultsFolder):
        print 'Creating folder:' + resultsFolder;
        create_results_folder(resultsFolder);
    ef = open(errorFile,'w');
    for i in range(sortedIndices.size):
        print >> ef, image_files[sortedIndices[i]]+','+ str(mse[sortedIndices[i]]);
    ef.close();

    #save predicted images for topk and bottomk
    topkFolderName = dataFolder + '/topk_predicted';
    if not os.path.exists(topkFolderName) :
        create_results_folder(topkFolderName);
    topkIndices = sortedIndices[sortedIndices.size-10:sortedIndices.size ];
    print topkIndices;
    
    lm.save_results(predicted,image_files,topkIndices,'topk_predicted' );
    
    bottomkFolderName = dataFolder + '/bottomk_predicted';
    if not os.path.exists(bottomkFolderName) :
       create_results_folder(bottomkFolderName)

    lm.save_results(predicted,image_files,sortedIndices[0:9 ],'bottomk_predicted' );

    #save predicted images for topk and bottomk
    
    topkFolderName = dataFolder + '/topk_im';
    if not os.path.exists(topkFolderName) :
        create_results_folder(topkFolderName);
    topkIndices = sortedIndices[sortedIndices.size-10:sortedIndices.size ];
    print topkIndices;
    
    lm.save_results(im,image_files,topkIndices,'topk_im' );
    
    bottomkFolderName = dataFolder + '/bottomk_im';
    if not os.path.exists(bottomkFolderName) :
       create_results_folder(bottomkFolderName)

    lm.save_results(im,image_files,sortedIndices[0:9 ],'bottomk_im' );
コード例 #4
0
ファイル: visualize.py プロジェクト: DanielGutmann/TextureDL
def main():

    #load/create model
    if hasattr(args, 'load_path') and args.load_path is not None:
        print("Loading Model from" + args.load_path);
        fileName = args.load_path + '/' +'Keras_model_weights.h5';
        model = load_model(fileName);
        print("Model Loaded");
       
    else:
        print("Creating new model");
        model = create_model_seg();
        #set training parameters 
        sgd = opt.SGD(lr=0.0001, decay=0.0005, momentum=0.9, nesterov=True);
        model.compile(loss='mean_squared_error', optimizer='sgd');

    #load images
    dataFolder = os.getcwd() + '/data_prev';
    image_files,im_all = lm.load_im(dataFolder);
    numImages = 20;
    im= im_all[0:numImages,:,:];
    print 'Image Shape:' + str(im.shape);
    label_all = lm.load_label(image_files);
    label = label_all[0:numImages,:,:]
    print 'Label Shape:' + str(label.shape);
    losses = model.evaluate(im,label,batch_size=10);
    predicted = model.predict(im,batch_size=10);
    #lm.save_results( predicted,image_files);
    print 'Predicted Results Shape:' + str(predicted.shape);
    mse = find_mse(label,predicted);
    sortedIndices = np.argsort(mse);

    #Display image, label and predicted output for the image with highest and lowest error
    topImage =  im[sortedIndices[0]];
    topLabel =  label[sortedIndices[0]];
    topPredicted = predicted[0];
    bottomImage = im[sortedIndices[sortedIndices.size - 1]];
    bottomLabel = label[sortedIndices[sortedIndices.size - 1]];
    bottomPredicted = predicted[sortedIndices[sortedIndices.size - 1]];
    pl.figure(1,figsize=(15,15));
    pl.title('Results');
    top = np.zeros([400,200,3]);
    top[:,:,0] = np.squeeze( topImage );
    top[:,:,1] = np.squeeze(topLabel );
    top[:,:,2] = np.squeeze(topPredicted );

    bottom = np.zeros([400,200,3]);
    bottom[:,:,0] = np.squeeze( bottomImage );
    bottom[:,:,1] = np.squeeze( bottomLabel );
    bottom[:,:,2] = np.squeeze( bottomPredicted );
    
    pl.subplot(2,3,1);
    pl.imshow(top[:,:,0],cmap=cm.binary);
    pl.subplot(2,3,2);
    pl.imshow(top[:,:,1],cmap=cm.binary);
    pl.subplot(2,3,3);
    pl.imshow(top[:,:,2],cmap=cm.binary);
    pl.subplot(2,3,4);
    pl.imshow(bottom[:,:,0],cmap=cm.binary);
    pl.subplot(2,3,5);
    pl.imshow(bottom[:,:,1],cmap=cm.binary);
    pl.subplot(2,3,6);
    pl.imshow(bottom[:,:,2],cmap=cm.binary);
    
    layer = 1;
    layer_out = model.layers[layer];
    inputs = [backend.learning_phase()] + model.inputs;

    _convout1_f = backend.function(inputs, layer_out.output);
    def convout1_f(X):
        # The [0] is to disable the training phase flag
        return _convout1_f( [0] + [X] )
    imagesToVisualize = np.zeros([2,400,200,1]);
    imagesToVisualize[0,:,:,0] = np.squeeze( topImage );
    imagesToVisualize[1,:,:,0] = np.squeeze( bottomImage );
    convout1 = np.squeeze(convout1_f(imagesToVisualize));
    print 'Output shape of layer ' +str(layer)+ ':' +str(convout1.shape);
    numFilters = convout1.shape[3];
    numImages = imagesToVisualize.shape[0];
    pl.figure(1, figsize = (15,15));
    pl.title('Output of layer ' +str(layer));
    filterNum = 0;
    imageNum = 0;
    position = 1;
    print 'Number of filters:' + str(numFilters)
    while imageNum < numImages:
        pl.subplot(numImages,numFilters+1,position);
        pl.imshow(np.squeeze(imagesToVisualize[imageNum,:,:,0]),cmap = cm.binary);
        position = position + 1;
        while filterNum < numFilters :
            pl.subplot(numImages,numFilters+1,position);
            pl.imshow( np.squeeze(convout1[imageNum,:,:,filterNum] ),cmap = cm.binary);
            position = position + 1;
            filterNum = filterNum + 1;
        imageNum = imageNum + 1;
        filterNum = 0;
    
    
##    nice_imshow(pl.gca(),make_mosaic(convout6,10,10),cmap=cm.binary);
##        


    # plot the model weights
    layer = 3;

    W = model.layers[layer].W.get_value(borrow=True)
    W = np.squeeze(W)
    print("W shape : ", W.shape);
    print("Dimension : ", len(W.shape));
    

    pl.figure(2,figsize=(15, 15));
    pl.title('Convoution layer:'+ str(layer)+' weights');
        
    nice_imshow(pl.gca(), make_mosaic(W, 10,10), cmap=cm.binary);
    figFileName = args.load_path + '/' + 'layer_'+str(layer)+'.png';
    pl.savefig(figFileName);



    freq,values = np.histogram(W,bins=1000);
    pl.figure(3,figsize=(15,15));
    pl.plot(values[1:len(values)],freq);

    #open files
    weightsFile = args.load_path + '/' + 'layer_'+str(layer)+'.txt';
    weightsStatisticsFile = args.load_path + '/' + 'layer_stats_'+str(layer)+'.txt';
    wf = open(weightsFile, 'w+'); #weights file
    wfs =  open(weightsStatisticsFile, 'w+'); #weights statistics file


    print >> wfs,'Overall Minimum= '+str(np.amin(W));
    print >> wfs,'Overall Maximum= '+str(np.amax(W));
    print >> wfs,'Overall Mean= '+ str(np.mean(W));
    print >> wfs,'Overall variance= '+ str(np.var(W));
##    print >> wfs,'Histogram';
##    print >> wfs, 'Frequency';
##    print >> wfs, freq;
##    print >> wfs, 'Values';
##    print >> wfs, values[len(values) -10: len(values) - 1];

    pruningThreshold = 15;

    """ Print all filter numbers whose weights lies in the interval  [pruningThreshold,0]
    """
    if len(W.shape) == 3 :
        numFilter = W.shape[2];
        numChannel = 1;
    else:
        numFilter = W.shape[3];
        numChannel = W.shape[2];
        
    numberOfFilters = numFilter * numChannel;
    percent = (100.0 * getNumberOfFiltersToBePruned(W, pruningThreshold,0) ) / numberOfFilters;
    
##    while percent  < 90:
##        pruningThreshold = pruningThreshold + 1;
##        percent = ( 100.0 * getNumberOfFiltersToBePruned(W, pruningThreshold,0) ) /numberOfFilters;
##        print percent;
        
    #print >> wfs, 'Total Number of channels = ' + str(getNumberOfFiltersToBePruned(W));
    #print >> wfs, 'Channels and filters in which weights lies in the interval ['+str(pruningThreshold)+',0]';
    
    print >> wfs,'(channel,filter)';
    numberOfFiltersToPrune  =0 ;
    if len(W.shape) == 4:
        for i in range(W.shape[2]) :
            for j in range(W.shape[3]) :
                weightMatrix = W[:,:,i,j];
                maxW = np.amax( W[:,:,i,j] );
                minW = np.amin( W[:,:,i,j] );
                if  abs(minW)  <  pruningThreshold  and abs(maxW) < pruningThreshold:
                    print >> wfs, '('+str(i) +','+str(j) +')';
                    numberOfFiltersToPrune = numberOfFiltersToPrune + 1;

    print >> wfs, 'Total Number of channels= '  + str(numberOfFiltersToPrune); 
        
    if len(W.shape) == 4:
        for i in range(W.shape[2]) :
            for j in range(W.shape[3]) :
                weightMatrix = W[:,:,i,j];
                print >> wfs, 'i=' + str(i) + ' j=' + str(j);
                print >> wf,weightMatrix;
                maxVector = np.amax(W[:,:,i,j],axis=1);
                minVector = np.amin(W[:,:,i,j],axis=1);
                print >> wfs,'Minimum='+str(np.amin(minVector));
                print >> wfs,'Maximum='+str(np.amax(maxVector));
                print >> wfs,'Mean='+str(np.mean(weightMatrix));
                print >> wfs,'Variance='+str(np.var(weightMatrix));
                print >> wfs,maxVector;
                print >> wfs,minVector;
    else:
        for i in range(W.shape[2]):
            print >> wfs,W[:,:,i];
            print >> wfs,np.amax(W[:,:,i],axis=1);
    

    wf.close();
    wfs.close();
    
    pl.show();
コード例 #5
0
from lib_metrics import find_mse
import load_and_save_images as lm
from sklearn.metrics import mean_squared_error
from math import sqrt
import numpy as np

import os

##y_actual = np.array([[1,2,3],[4,5,0]]);
##y_predicted = np.array([[0,0,0],[0,0,0]]);
##rms = mean_squared_error(y_actual, y_predicted);
##print(rms);

dataFolder = os.getcwd() + '/data_prev'
image_files, im = lm.load_im(dataFolder)
print 'Image Shape:' + str(im.shape)
label = lm.load_label(image_files)
print 'Label Shape:' + str(label.shape)
mse = find_mse(label, label)
print mse