예제 #1
0
cnn.add(Dropout(0.25))
cnn.add(Conv2D(64, (8, 8), activation='linear', padding='same'))
cnn.add(LeakyReLU(alpha=0.1))
cnn.add(MaxPooling2D(pool_size=(8, 8), padding='same'))
cnn.add(Dropout(0.4))
cnn.add(Flatten())
cnn.add(Dense(256, activation='linear'))
cnn.add(LeakyReLU(alpha=0.1))
cnn.add(Dropout(0.3))
cnn.add(Dense(num_classes, activation='softmax'))

cnn.compile(loss=keras.losses.categorical_crossentropy,
            optimizer=keras.optimizers.SGD(),
            metrics=['accuracy'])

X_train_cnn = X_train.reshape(-1, 256, 256, 1)
X_test_cnn = X_test.reshape(-1, 256, 256, 1)

y_train_one_hot = to_categorical(y_train, num_classes=num_classes)
y_test_one_hot = to_categorical(y_test, num_classes=num_classes)

# # Setup

cnn_setup = Setup('cnn_landmark_32-64-128-256_k88')
cnn_setup.setModel(cnn)
cnn_setup.setData(XTrain=X_train_cnn,
                  YTrain=y_train_one_hot,
                  XValidation=X_test_cnn,
                  YValidation=y_test_one_hot)
cnn_setup.save('setup')
예제 #2
0
nn = Sequential()
nn.add(Dense(first_layer, input_dim=2048, activation='relu'))
nn.add(Dense(second_layer, activation='relu'))
nn.add(Dense(third_layer, activation='relu'))
nn.add(Dense(forth_layer, activation='relu'))
nn.add(Dense(no_of_classes, activation='softmax'))

nn.compile(loss=keras.losses.categorical_crossentropy,
           optimizer=keras.optimizers.Adagrad(),
           metrics=['accuracy'])

model_name = '%s.nn_landmark_%s-%s-%s-%s_reduced%d' % (
    model_number, first_layer, second_layer, third_layer, forth_layer,
    no_of_classes)
nn_setup = Setup(model_name)

# # ==========================================================================================
# # ==========================================================================================
# # ==========================================================================================

# print('Preparing Data ...')
#
# filenames = [str(i) + '.npy' for i in range(2000, 1144000 + 1, 2000)] + ['1144636.npy']
#
# X = np.empty((0, 2048))
#
# for filename in filenames:
#     print('.', end='')
#     temp = np.load('../data/preprocessed/%s' % filename)
#     X = np.append(X, temp, axis=0)
예제 #3
0
from cnn.Setup import Setup
import sys
import keras.backend as K

rel_filepath = sys.argv[1]

continue_setup = Setup('')
continue_setup.load(rel_filepath=rel_filepath)

change_lr = None

if change_lr is not None:
    K.set_value(continue_setup.getModel().optimizer.lr, change_lr)
    print('Changing the model optimizer learning rate to = %f' % K.get_value(continue_setup.getModel().optimizer.lr))
else:
    print('Model optimizer learning rate = %f' % K.get_value(continue_setup.getModel().optimizer.lr))

X_train_cnn, y_train_one_hot, X_val_cnn, y_val_one_hot, X_test_cnn, y_test_one_hot = continue_setup.getData()

for epoch in range(continue_setup.getEpoch() + 1, 10000):
    print('Training \'%s\': Epoch %d' % (continue_setup.getName(), epoch))
    dropout = continue_setup.getModel().fit(X_train_cnn, y_train_one_hot,
                                            batch_size=64, epochs=1, verbose=1,
                                            validation_data=(X_val_cnn, y_val_one_hot))

    continue_setup.updateEpochs(add_epochs=1,
                                train_acc=dropout.history['acc'],
                                train_loss=dropout.history['loss'],
                                val_acc=dropout.history['val_acc'],
                                val_loss=dropout.history['val_loss'],
                                test_acc=[0],
from cnn.Setup import Setup
import sys
import keras.backend as K
import numpy as np
import os
from keras.utils import to_categorical
from sklearn.model_selection import train_test_split

rel_filepath = sys.argv[1]

continue_setup = Setup('')
continue_setup.load(rel_filepath=rel_filepath)

change_lr = None

if change_lr is not None:
    K.set_value(continue_setup.getModel().optimizer.lr, change_lr)
    print('Changing the model optimizer learning rate to = %f' %
          K.get_value(continue_setup.getModel().optimizer.lr))
else:
    print('Model optimizer learning rate = %f' %
          K.get_value(continue_setup.getModel().optimizer.lr))

XTrain_directory, YTrain_directory, XValidation_directory, YValidation_directory, XTest_directory, YTest_directory = continue_setup.getDataDirectory(
)

no_of_classes = 15000


def train_data_generator(XTrain_directory, YTrain_directory):
    filenames = [str(i) + '.npy'
예제 #5
0
from cnn.Setup import Setup
import sys
import keras.backend as K
import numpy as np
import os
from keras.utils import to_categorical
from sklearn.model_selection import train_test_split

rel_filepath = sys.argv[1]
XTest_directory = sys.argv[2]

continue_setup = Setup('')
continue_setup.load(rel_filepath=rel_filepath)

no_of_classes = 15000


def test_data_generator(XTest_directory, YTest_directory):
    filenames = [str(i) + '.npy'
                 for i in range(2000, 114000 + 1, 2000)] + ['115424.npy']

    while True:
        for filename in filenames:
            X_test = np.load(os.path.join(XTest_directory, filename))
            X_test = X_test.reshape(-1, 2048, 1, 1)
            yield (X_test)


y_pred = continue_setup.getModel().predict_generator(test_data_generator(
    XTest_directory, None),
                                                     steps=(114000 / 2000 + 1))
예제 #6
0
           padding='same'))
cnn.add(LeakyReLU(alpha=0.1))
cnn.add(MaxPooling2D(pool_size=pool_size, padding='same'))
cnn.add(Dropout(0.4))
cnn.add(Flatten())
cnn.add(Dense(forth_layer, activation='linear'))
cnn.add(LeakyReLU(alpha=0.1))
cnn.add(Dropout(0.3))
cnn.add(Dense(no_of_classes + 1, activation='softmax'))

cnn.compile(loss=keras.losses.categorical_crossentropy,
            optimizer=keras.optimizers.SGD(),
            metrics=['accuracy'])

cnn_setup = Setup('%s.cnn_landmark_%s-%s-%s-%s_k%s%s_p%s%s_s%s%s_reduced%d' %
                  (model_number, first_layer, second_layer, third_layer,
                   forth_layer, kernel_size[0], kernel_size[1], pool_size[0],
                   pool_size[1], strides[0], strides[1], no_of_classes))

# # ==========================================================================================
# # ==========================================================================================
# # ==========================================================================================

datasets = {
    450: {
        'train_csv': '../data_reduced%s/train.csv' % '',
        'train_image_path': '../data_reduced%s/train' % '',
        'test_csv': '../data_reduced%s/test.csv' % '',
        'test_image_path': '../data_reduced%s/test' % '',
    },
    50: {
        'train_csv': '../data_reduced%s/train.csv' % '50',
예제 #7
0
 def setUp(self):
     self.setup = Setup('fashion_model')
예제 #8
0
class TestSetup(TestCase):
    @classmethod
    def setUpClass(cls):
        (cls.train_X, cls.train_Y), (cls.test_X, cls.test_Y) = fashion_mnist.load_data()
        (cls.train_X, cls.train_Y), (cls.test_X, cls.test_Y) = (cls.train_X[:100], cls.train_Y[:100]), (cls.test_X[:100], cls.test_Y[:100])

        cls.train_X = cls.train_X.reshape(-1, 28, 28, 1)
        cls.test_X = cls.test_X.reshape(-1, 28, 28, 1)

        cls.train_X = cls.train_X.astype('float32')
        cls.test_X = cls.test_X.astype('float32')
        cls.train_X = cls.train_X / 255.
        cls.test_X = cls.test_X / 255.

        # Change the labels from categorical to one-hot encoding
        train_Y_one_hot = to_categorical(cls.train_Y)
        test_Y_one_hot = to_categorical(cls.test_Y)

        cls.train_X, cls.valid_X, train_label, cls.valid_label = train_test_split(cls.train_X, train_Y_one_hot, test_size=0.2,
                                                                      random_state=13)

        batch_size = 64
        epochs = 2
        num_classes = 10

        cls.fashion_model = Sequential()
        cls.fashion_model.add(Conv2D(32, kernel_size=(3, 3), activation='linear', input_shape=(28, 28, 1), padding='same'))
        cls.fashion_model.add(LeakyReLU(alpha=0.1))
        cls.fashion_model.add(MaxPooling2D((2, 2), padding='same'))
        cls.fashion_model.add(Conv2D(64, (3, 3), activation='linear', padding='same'))
        cls.fashion_model.add(LeakyReLU(alpha=0.1))
        cls.fashion_model.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
        cls.fashion_model.add(Conv2D(128, (3, 3), activation='linear', padding='same'))
        cls.fashion_model.add(LeakyReLU(alpha=0.1))
        cls.fashion_model.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
        cls.fashion_model.add(Flatten())
        cls.fashion_model.add(Dense(128, activation='linear'))
        cls.fashion_model.add(LeakyReLU(alpha=0.1))
        cls.fashion_model.add(Dense(num_classes, activation='softmax'))

        cls.fashion_model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adam(),
                              metrics=['accuracy'])

        cls.fashion_train = cls.fashion_model.fit(cls.train_X, train_label, batch_size=batch_size, epochs=epochs, verbose=1,
                                          validation_data=(cls.valid_X, cls.valid_label))

        cls.test_eval = cls.fashion_model.evaluate(cls.test_X, test_Y_one_hot, verbose=0)

    def setUp(self):
        self.setup = Setup('fashion_model')

    def test_updateEpochs_correctList_noModify(self):
        mList = [1, 2, 3]
        self.setup.updateEpochs(3, mList, mList, mList, mList, mList, mList, allow_modify=False)
        self.assertEqual(self.setup._train_accuracy, [1, 2, 3])
        self.assertEqual(self.setup._train_loss, [1, 2, 3])
        self.assertEqual(self.setup._val_accuracy, [1, 2, 3])
        self.assertEqual(self.setup._val_loss, [1, 2, 3])
        self.assertEqual(self.setup._test_accuracy, [1, 2, 3])
        self.assertEqual(self.setup._test_loss, [1, 2, 3])

    def test_updateEpochs_smallerList_noModify(self):
        mList = [1, 2, 3]
        self.assertRaises(ValueError, self.setup.updateEpochs, 3, mList, mList, mList[:-1], mList, mList, mList, allow_modify=False)

    def test_updateEpochs_largerList_noModify(self):
        mList = [1, 2, 3]
        self.assertRaises(ValueError, self.setup.updateEpochs, 3, mList, mList, mList, mList, mList, mList + [4], allow_modify=False)

    def test_updateEpochs_correctList_modifyAllowed(self):
        mList = [1, 2, 3]
        self.setup.updateEpochs(3, mList, mList, mList, mList, mList, mList, allow_modify=True)
        self.assertEqual(self.setup._train_accuracy, [1, 2, 3])
        self.assertEqual(self.setup._train_loss, [1, 2, 3])
        self.assertEqual(self.setup._val_accuracy, [1, 2, 3])
        self.assertEqual(self.setup._val_loss, [1, 2, 3])
        self.assertEqual(self.setup._test_accuracy, [1, 2, 3])
        self.assertEqual(self.setup._test_loss, [1, 2, 3])

    def test_updateEpochs_smallerList_modifyAllowed(self):
        mList = [1, 2, 3]
        self.setup.updateEpochs(3, mList, mList, mList, mList, mList[:-1], mList, allow_modify=True)
        self.assertEqual(self.setup._train_accuracy, [1, 2, 3])
        self.assertEqual(self.setup._train_loss, [1, 2, 3])
        self.assertEqual(self.setup._val_accuracy, [1, 2, 3])
        self.assertEqual(self.setup._val_loss, [1, 2, 3])
        self.assertEqual(self.setup._test_accuracy, [1, 2, 2])
        self.assertEqual(self.setup._test_loss, [1, 2, 3])

    def test_updateEpochs_largerList_modifyAllowed(self):
        mList = [1, 2, 3]
        self.setup.updateEpochs(3, mList, mList, mList, mList, mList, mList + [4], allow_modify=True)
        self.assertEqual(self.setup._train_accuracy, [1, 2, 3])
        self.assertEqual(self.setup._train_loss, [1, 2, 3])
        self.assertEqual(self.setup._val_accuracy, [1, 2, 3])
        self.assertEqual(self.setup._val_loss, [1, 2, 3])
        self.assertEqual(self.setup._test_accuracy, [1, 2, 3])
        self.assertEqual(self.setup._test_loss, [1, 2, 3])

    def test_positive(self):
        self.setup.setModel(self.fashion_model)

        self.setup.setData(XTrain=self.train_X)
        self.setup.setData(XValidation=self.valid_X)
        self.setup.setData(XTest=self.test_X)
        self.setup.setData(YTrain=self.train_Y)
        self.setup.setData(YValidation=self.valid_label)
        self.setup.setData(YTest=self.test_Y)

        self.setup.save('setup')