Beispiel #1
0
def main():
    emotion_classifier = load_model('../model_best/063834.h5')
    layer_dict = dict([layer.name, layer]
                      for layer in emotion_classifier.layers[:])

    input_img = emotion_classifier.input
    name_ls = ["conv2d_33", "conv2d_34", "conv2d_35", "conv2d_36"]
    collect_layers = [
        K.function([input_img, K.learning_phase()], [layer_dict[name].output])
        for name in name_ls
    ]

    X, Y = parser.parse('../data/train_small.csv')
    private_pixels = X / 255
    private_pixels = [
        private_pixels[i].reshape((1, 48, 48, 1))
        for i in range(len(private_pixels))
    ]

    choose_id = 9
    photo = private_pixels[choose_id]
    for cnt, fn in enumerate(collect_layers):
        nb_filter = layer_dict[name_ls[cnt]].output.shape[-1]
        print "layer_name:{}\tfilter_num:{}".format(name_ls[cnt], nb_filter)
        im = fn([photo, 0])  #get the output of that layer
        fig = plt.figure(figsize=(14, 8))
        nb_filter = im[0].shape[3]
        for i in range(nb_filter):
            ax = fig.add_subplot(nb_filter / 16, 16, i + 1)
            ax.imshow(im[0][0, :, :, i], cmap='BuGn')
            plt.xticks(np.array([]))
            plt.yticks(np.array([]))
            plt.tight_layout()
        fig.suptitle('Output of layer{} (Given image{})'.format(
            name_ls[cnt], choose_id))
        img_path = "5_img_{}_{}.png".format(name_ls[cnt], choose_id)
        fig.savefig(img_path)
Beispiel #2
0
import sys, random
import mytool.parser as parser
import mytool.LR as LR

# set file
trainfile = sys.argv[1] if len(sys.argv) > 1 else './data/train.csv'
testfile = sys.argv[2] if len(sys.argv) > 2 else './data/test_X.csv'
outfile = sys.argv[3] if len(sys.argv) > 3 else './res.csv'

# parse file
trainset, testset = parser.parse(trainfile, testfile, feature=[9], dim=1)
evaluset = trainset[-1000:]
trainset = trainset[:-1000]

def train(maxiter=10):
	for i in xrange(maxiter):
		random.shuffle(trainset)
		reg.train(trainset)
	print reg.evaluate(evaluset)

def traingroup(maxiter=10,batchsize=5):
	for i in xrange(maxiter):
		random.shuffle(trainset)
		reg.traingroup(trainset[:batchsize])

def output():
	out = open(outfile, 'w')
	out.write('id,value\n')
	for i in xrange(len(testset)):
		out.write('id_%d,%f\n' % (i, reg.test(testset[i], printout=False)))
	out.close()
Beispiel #3
0
import sys, random
from mytool import parser
import numpy as np
import keras
from keras.models import Sequential, load_model
from keras.layers.core import Dense, Dropout, Activation
from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D, Flatten
from keras.optimizers import SGD, Adam
from keras.utils import np_utils
from keras.datasets import mnist
from keras.preprocessing.image import ImageDataGenerator

train_file = sys.argv[1] if len(sys.argv) > 1 else './data/train.csv'

# load data
X, Y = parser.parse(train_file)

# split data set
X = X / 255
train_X, train_Y = X[:-1000], Y[:-1000]
evaluate_X, evaluate_Y = X[-1000:], Y[-1000:]

# data gen
datagen = ImageDataGenerator(rotation_range=40,
                             shear_range=0.2,
                             width_shift_range=0.2,
                             height_shift_range=0.2,
                             horizontal_flip=True,
                             fill_mode='nearest')

# init model
Beispiel #4
0
	out.write('id,label\n')
	for i in xrange(len(testset)):
		score = bpn.test(testset[i], printout=False)
		out.write('%d,%d\n' % (i+1, int(score+0.5)))
	out.close()

def save():
	bpn.save("./model/tmp")

def load():
	bpn.load("./model/tmp.npy")


# parse file
print "Parsing ..."
trainset, testset = parser.parse(X_train, Y_train, X_test)
trainset, testset = parser.parse_power(trainset, testset, power=3)
trainset, testset = parser.parse_normalize_mean(trainset, testset)
#trainset, testset = parser.parse_feature_scaling(trainset, testset)

evaluset = trainset[-4000:]
trainset = trainset[:]

# train
print "Training ..."
dim = len(trainset[0][0])
bpn = BPN.BPN(sizes=[dim,1], learn_rate=1, learn_reg=0.0, print_iter=len(trainset))
#train_sgd(100)
bpn.load("model_best/085577_3d.npy")
print "Output"
output()
Beispiel #5
0
    # Equalization
    for idx in xrange(len(data)):
        gray = int(data[idx])
        data[idx] = float(int(round(255.0 * sum_hist[gray] / sum_hist[255])))

    data = data.reshape(shape)


def lightNormalize(dataset):
    print 'lightNormalize'
    for data in dataset:
        histogramEqualization(data)


# load data
X, Y = parser.parse(train_file)
test_X, test_Y = parser.parse(test_file)

# split data set
#lightNormalize(X)
#lightNormalize(test_X)
X = (X - 128) / 255
test_X = (test_X - 128) / 255
train_X, train_Y = X[:-1000], Y[:-1000]
evaluate_X, evaluate_Y = X[-1000:], Y[-1000:]

# data gen
datagen = ImageDataGenerator(rotation_range=20,
                             width_shift_range=0.2,
                             height_shift_range=0.2,
                             horizontal_flip=True,
Beispiel #6
0
                 i,
                 '{:.2f}'.format(cm[i, j]),
                 horizontalalignment="center",
                 color="white" if cm[i, j] > thresh else "black")
    plt.tight_layout()
    plt.ylabel('True label')
    plt.xlabel('Predicted label')


#np.set_printoptions(precision=2)

# load model adn predict
emotion_classifier = load_model('../model_best/063834.h5')

# load data
X, Y = parser.parse('../data/train.csv')
dev_feats = X[-1000:] / 255
te_labels = np.argmax(Y[-1000:], axis=1)

# predict
predictions = emotion_classifier.predict_classes(dev_feats)

# draw confusion
conf_mat = confusion_matrix(te_labels, predictions)
plt.figure()
plot_confusion_matrix(conf_mat,
                      classes=[
                          "Angry", "Disgust", "Fear", "Happy", "Sad",
                          "Surprise", "Neutral"
                      ])
plt.savefig("3_confusion.png")
Beispiel #7
0
import sys
from mytool import parser
import numpy as np
import keras
from keras.models import Sequential, load_model
from keras.layers.core import Dense, Dropout, Activation
from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D, Flatten
from keras.optimizers import SGD, Adam
from keras.utils import np_utils

test_file = sys.argv[1] if len(sys.argv) > 1 else './data/test.csv'
predict_file = sys.argv[2] if len(sys.argv) > 2 else './predict.csv'

# load data
test_X, test_Y = parser.parse(test_file)
test_X = test_X / 255


def output():
    test_Y = model.predict(test_X)
    test_Y = np.argmax(test_Y, axis=1)
    output = open(predict_file, 'w')
    output.write("id,label\n")
    for idx in xrange(len(test_Y)):
        output.write(str(idx) + "," + str(test_Y[idx]) + "\n")
    output.close()


model = load_model('./model_best/063834.h5')
output()