Example #1
0
    def print_keras_model(self, model=None) -> float:
        if model is None:
            model = self.get_bone_net()()
            model.compile(loss=losses.categorical_crossentropy,
                          optimizer=optimizers.Adadelta(),
                          metrics=['accuracy'])

        self.logger.info("load weight from {}".format(
            os.path.abspath(self.keras_model_file)))
        model.set_weights(load_model(self.keras_model_file).get_weights())
        score = model.evaluate(self.x_test, self.y_test, verbose=0)
        self.logger.info('Test loss: {}'.format(score[0]))
        self.logger.info('Test accuracy: {}'.format(score[1]))
        return score[1]
Example #2
0
    def keras_train(self, ):
        model = self.get_bone_net()()
        model.compile(loss=losses.categorical_crossentropy,
                      optimizer=optimizers.Adadelta(),
                      metrics=['accuracy'])

        if os.path.exists(self.keras_model_file):
            self.logger.info("load weight from {}".format(
                os.path.abspath(self.keras_model_file)))
            model.set_weights(load_model(self.keras_model_file).get_weights())

        model.fit(self.x_train,
                  self.y_train,
                  batch_size=self.batch_size,
                  epochs=1,
                  verbose=1,
                  validation_data=(self.x_test, self.y_test))
        model.save(self.keras_model_file)

        self.print_keras_model(model=model)
Example #3
0
 def testAdadeltaCompatibility(self):
     opt_v1 = optimizers.Adadelta(lr=0.01)
     opt_v2 = adadelta.Adadelta(learning_rate=0.01)
     self._testOptimizersCompatibility(opt_v1, opt_v2)
Example #4
0
# Laden der Daten
train_data, train_labels, eval_data, eval_labels = load_fashion_data()
train_data = train_data.reshape(-1, 28, 28, 1)
train_labels = np_utils.to_categorical(train_labels, 10)

print(train_data.shape)

# Model mit Keras
model.add(InputLayer(input_shape=(28, 28,1),name="1_Eingabe"))
model.add(Conv2D(32,(2, 2),padding='same',bias_initializer=Constant(0.01),kernel_initializer='random_uniform',name="2_Conv2D"))
model.add(Activation(activation='relu',name="3_ReLu"))
model.add(MaxPool2D(padding='same',name="4_MaxPooling2D"))
model.add(Conv2D(32,(2, 2),padding='same',bias_initializer=Constant(0.01),kernel_initializer='random_uniform',name="5_Conv2D"))
model.add(Activation(activation='relu',name="6_ReLu"))
model.add(MaxPool2D(padding='same',name="7_MaxPooling2D"))
model.add(Flatten())
model.add(Dense(1024,activation='relu',bias_initializer=Constant(0.01),kernel_initializer='random_uniform',name="8_Dense"))
model.add(Dropout(0.4,name="9_Dense"))
model.add(Dense(10, activation='softmax',name="10_Ausgabe"))

model.compile(loss=losses.categorical_crossentropy, optimizer=optimizers.Adadelta(), metrics = ["accuracy","mse",metrics.categorical_accuracy])

#keras.backend.set_session(tf_debug.TensorBoardDebugWrapperSession(tf.Session(), "localhost:12345"))
K.set_session(tf_debug.TensorBoardDebugWrapperSession(tf.Session(), "localhost:12345"))

history = model.fit(train_data,train_labels, batch_size=64, epochs=100, verbose=1,validation_split=0.33)

# Optionale Ausgabe:
#plt.plot(history.history['val_loss'], 'r', history.history['val_acc'], 'b')
#plt.show()
Example #5
0
model.add(Flatten())

model.add(
    Dense(1024,
          activation='relu',
          kernel_initializer='random_uniform',
          name="Dense_fc_1"))
model.add(
    Dense(512,
          activation='relu',
          kernel_initializer='random_uniform',
          name="Dense_fc_2"))
model.add(Dense(10, activation='softmax', name="Ausgabe"))

model.compile(loss=losses.categorical_crossentropy,
              optimizer=optimizers.Adadelta(),
              metrics=["accuracy", "mse", metrics.categorical_accuracy])


# Wird aufgerufen, wenn das Training beginnt
def train_begin():
    url = 'http://localhost:9000/publish/train/begin'
    post_fields = {"model": model.to_json()}
    request = requests.post(url, data=post_fields)


lambda_cb = LambdaCallback(on_train_begin=train_begin())
remote_cb = RemoteMonitor(root='http://localhost:9000',
                          path="/publish/epoch/end/",
                          send_as_json=True)
Example #6
0
def optimizer(name='adam', l_rate=0.01, decay=0.0, **kwargs):
    '''
    Define the optimizer by default parameters except learning rate.
    Note that most of optimizers do not suggest users to modify their
    speically designed parameters.
    We suggest users to specify gamma according to the practice when
    using Adabound optimizers.
    Options:
        name: the name of optimizer (default='adam') (available: 'adam', 
              'amsgrad', 'adamax', 'adabound', 'amsbound', 'nadam', 
              'namsgrad', 'nadabound', 'namsbound', 'adadelta', 'rms', 
              'adagrad', 'adamw', 'nmoment', 'sgd', 'proximal')
        l_rate: learning rate (default=0.01)
        decay: decay ratio ('adadeltaDA' do not support this option)
        other parameters: see the usage of the specific optimizer.
    Return:
        the particular optimizer object.
    '''
    name = name.casefold()
    if name == 'adam':
        return optimizers.Adam(l_rate, decay=decay, **kwargs)
    elif name == 'amsgrad':
        return optimizers.Adam(l_rate, decay=decay, amsgrad=True, **kwargs)
    elif name == 'adamax':
        return optimizers.Adamax(l_rate, decay=decay, **kwargs)
    elif name == 'adabound':
        return Adabound(l_rate, decay=decay, **kwargs)
    elif name == 'amsbound':
        return Adabound(l_rate, decay=decay, amsgrad=True, **kwargs)
    elif name == 'nadam':
        return MNadam(l_rate, decay=decay, **kwargs)
    elif name == 'namsgrad':
        return MNadam(l_rate, decay=decay, amsgrad=True, **kwargs)
    elif name == 'nadabound':
        return Nadabound(l_rate, decay=decay, **kwargs)
    elif name == 'namsbound':
        return Nadabound(l_rate, decay=decay, amsgrad=True, **kwargs)
    elif name == 'adadelta':
        return optimizers.Adadelta(l_rate, decay=decay, **kwargs)
    elif name == 'rms':
        return optimizers.RMSprop(l_rate, decay=decay, **kwargs)
    elif name == 'adagrad':
        return optimizers.Adagrad(l_rate, decay=decay, **kwargs)
    elif name == 'adamw':
        if compat.COMPATIBLE_MODE['1.14']:
            raise ImportError(
                'This optimizer is not allowed for compatibility, because it require contrib lib.'
            )
        _raise_TF_warn()
        if decay != 0.0:
            logging.warning(
                'This optimizer uses \'decay\' as \'weight_decay\'.')
        else:
            raise ValueError('Should use \'decay\' > 0 for AdamW.')
        return weight_decay_optimizers.AdamWOptimizer(weight_decay=decay,
                                                      learning_rate=l_rate,
                                                      **kwargs)
    elif name == 'nmoment':
        return optimizers.SGD(lr=l_rate,
                              momentum=0.9,
                              decay=decay,
                              nesterov=True,
                              **kwargs)
    elif name == 'moment':
        return optimizers.SGD(lr=l_rate,
                              momentum=0.9,
                              decay=decay,
                              nesterov=False,
                              **kwargs)
    elif name == 'sgd':
        return optimizers.SGD(lr=l_rate, decay=decay, **kwargs)
    elif name == 'proximal':
        _raise_TF_warn()
        if decay != 0.0:
            logging.warning('This optimizer does not support \'decay\'.')
        return proximal_gradient_descent.ProximalGradientDescentOptimizer(
            l_rate, **kwargs)
    def __init__(self, training_batch_size=100, existing_weight=None, test_percentage=0.02, learning_rate=0.002, save_every_x_epoch=5, number_of_training_sample=sys.maxsize, fine_tuning_model=None, memory_safe=True, validate=True):

        if memory_safe == False:
            self.Y = []
            self.X = []

        self.save_every_x_epoch = save_every_x_epoch
        self.validate = validate
        self.memory_safe = memory_safe
        self.number_of_sample = number_of_training_sample
        self.training_batch_size = training_batch_size

        # We know that MNIST images are 28 pixels in each dimension.
        self.img_size = 160

        self.img_size_flat = self.img_size * self.img_size * 3

        self.img_shape_full = (self.img_size, self.img_size, 3)

        self.test = {}
        test_X = []
        test_Y = []
        with open('train_labels.csv', 'r') as csvfile:
            reader = csv.reader(csvfile)
            all_class_samples = []
            for row in reader:
                all_class_samples.append(row)

            print(len(all_class_samples))
            test_count = int(test_percentage * len(all_class_samples))
            print("Training with: " + str(int((1 - test_percentage) * len(all_class_samples))) + ", Testing with: " + str(test_count))
            index = 0
            for row in all_class_samples:

                y = []
                for index in range(1, 7):
                    if index < 3:
                        y.append(float(row[index]))
                    else:
                        y.append(float(row[index]) / 160)
                print(y)

                if index > test_count:
                    if memory_safe == False and len(self.Y) < number_of_training_sample:
                        image = Image.open(row[0])
                        img_array = np.asarray(image)
                        if img_array.shape != self.img_shape_full:
                            continue
                        self.X.append(img_array.flatten())
                        self.Y.append(y)
                        continue
                    else:
                        break

                self.test[row[0]] = y
                index += 1

            for (key, value) in self.test.items():

                image = Image.open(key)
                img_array = np.asarray(image)
                if img_array.shape != self.img_shape_full:
                    continue
                test_X.append(img_array.flatten())
                test_Y.append(value)

            self.test_X = np.array(test_X) / 255
            self.test_Y = np.array(test_Y)

            if memory_safe == False:
                self.Y = np.array(self.Y)
                self.X = np.array(self.X) / 255
                print(self.X.shape, self.Y.shape)

        # Start construction of the Keras Sequential model.
        model = Sequential()
        self.model = model

        model.add(InputLayer(input_shape=(self.img_size_flat,)))

        # The input is a flattened array with 784 elements,
        # but the convolutional layers expect images with shape (28, 28, 1)
        model.add(Reshape(self.img_shape_full))

        # First convolutional layer with ReLU-activation and max-pooling.
        model.add(Conv2D(kernel_size=5, strides=1, filters=16, padding='same',
                         activation='relu', name='layer_conv1'))
        model.add(MaxPooling2D(pool_size=2, strides=2))

        # Second convolutional layer with ReLU-activation and max-pooling.
        model.add(Conv2D(kernel_size=5, strides=1, filters=32, padding='same',
                         activation='relu', name='layer_conv2'))
        model.add(MaxPooling2D(pool_size=2, strides=2))

        model.add(Conv2D(kernel_size=5, strides=2, filters=64, padding='same',
                         activation='relu', name='layer_conv3'))
        model.add(MaxPooling2D(pool_size=2, strides=2))

        # Flatten the 4-rank output of the convolutional layers
        # to 2-rank that can be input to a fully-connected / dense layer.
        model.add(Flatten())

        model.add(Dense(512, activation='relu'))
        model.add(Dropout(0.2))
        model.add(Dense(6))

        def normalize_loss(yTrue, yPred):
            total_loss = 0
            total_loss += K.sum(K.square(yTrue[0:2] - yPred[0:2]))
            # if yTrue[0] == 1:
            #     total_loss += K.sum(K.square(yTrue[3:5] - yPred[3:5]))
            if yTrue[0] == 1:
                total_loss += K.sum(K.square(yTrue[2:4] - yPred[2:4]))
            else:
                total_loss += K.sum(K.square(yTrue[0] - yPred[0]))

            if yTrue[1] == 1:
                total_loss += K.sum(K.square(yTrue[4:6] - yPred[4:6]))
            else:
                total_loss += K.sum(K.square(yTrue[1] - yPred[1]))

            return K.sqrt(total_loss)

        self.optimizer = optimizers.Adadelta(lr=learning_rate, clipnorm=2.)

        model.compile(optimizer=self.optimizer, loss=normalize_loss, metrics=[metrics.cosine,  metrics.mse])
        if existing_weight != None:
            model.load_weights(existing_weight)
        else:
            all_weights = os.listdir(os.path.join("models", "keypoint_model"))
            if ".DS_Store" in all_weights:
                all_weights.remove(".DS_Store")

            if len(all_weights) > 0:
                last_copy = sorted(all_weights)[-1]
                model.load_weights(os.path.join("models", "keypoint_model", last_copy))

        if fine_tuning_model != None:
            for index in range(3):
                weights = fine_tuning_model.layers[index].get_weights()
                model.layers[index].set_weights(weights)
            print("Fine tuning model copied")

        model.save(os.path.join("models", "keypoint_model.h5"))