コード例 #1
0
ファイル: model_bs.py プロジェクト: mposwiata/SpecialeMaster
def model_train(train_input, train_output, test_input, test_output, nodes,
                batch_size, learning_rate, file_name, decay_steps, decay_rate,
                validation_split):
    # Number of nodes
    nodes = nodes

    # Model creation
    model = Sequential()

    # Layer 1
    model.add(Dense(nodes, input_shape=(4, )))
    model.add(Activation('softplus'))  # Rectified Linear Unit, f(x) = max(x,0)
    model.add(Dropout(0.1))

    # Layer 2
    model.add(Dense(nodes))
    model.add(Activation('softplus'))  # Rectified Linear Unit, f(x) = max(x,0)
    model.add(Dropout(0.1))

    # Layer 3
    model.add(Dense(nodes))
    model.add(Activation('softplus'))  # Rectified Linear Unit, f(x) = max(x,0)
    model.add(Dropout(0.1))

    # Layer 4, output
    model.add(Dense(1))
    model.add(Activation('elu'))

    lr_schedule = schedules.ExponentialDecay(
        initial_learning_rate=learning_rate,
        decay_steps=decay_steps,
        decay_rate=decay_rate)

    opt = Adam(learning_rate=lr_schedule)

    model.compile(
        loss='mse',  #mean squared error
        optimizer=opt)

    callbacks_list = [
        EarlyStopping(monitor='val_loss', patience=15)
    ]  #stop if cross validation fails to decrease over #patience epochs

    model.fit(train_input,
              train_output,
              epochs=30,
              batch_size=batch_size,
              validation_split=validation_split,
              verbose=1,
              callbacks=callbacks_list)

    model.save(file_name)

    return model.evaluate(test_input, test_output, verbose=2)
コード例 #2
0
ファイル: HighResST.py プロジェクト: zunguzum00/Artistia
def styleTransfer(sourcepath, stylepath):
    path = 'static/uploads'
    base_image_path = os.path.join(path, "source.png")
    style_reference_image_path = os.path.join(path, "style.png")
    result_prefix = "static/results/result"
    global content_weight
    global total_variation_weight
    global style_weight
    total_variation_weight = 1e-6
    style_weight = 3e-6
    content_weight = 5e-7
    width, height = image.load_img(base_image_path).size
    global img_nrows
    global img_ncols
    img_nrows = 400
    img_ncols = int(width * img_nrows / height)
    model = VGG19(weights="imagenet", include_top=False)
    outputs_dict = dict([(layer.name, layer.output) for layer in model.layers])
    global feature_extractor
    feature_extractor = Model(inputs=model.inputs, outputs=outputs_dict)
    global style_layer_names
    style_layer_names = [
        "block1_conv1",
        "block2_conv1",
        "block3_conv1",
        "block4_conv1",
        "block5_conv1",
    ]
    global content_layer_name
    content_layer_name = "block5_conv2"
    optimizer = SGD(schedules.ExponentialDecay(
        initial_learning_rate=100.0, decay_steps=100, decay_rate=0.96))
    base_image = preprocess_image(base_image_path)
    style_reference_image = preprocess_image(style_reference_image_path)
    combination_image = tf.Variable(preprocess_image(base_image_path))
    iterations = 100
    for i in range(1, iterations + 1):
        loss, grads = compute_loss_and_grads(
            combination_image, base_image, style_reference_image)
        optimizer.apply_gradients([(grads, combination_image)])
        print("Iteration %d: loss=%.2f" % (i, (loss)))
    print(combination_image)
    img = deprocess_image(combination_image.numpy())
    fname = result_prefix + ".png"
    image.save_img(fname, img)
コード例 #3
0
 def __get_model(self):
     x = Input(shape=self.input_shape)
     hidden = Inception_Block(x, 32, 64, 128, 32)
     hidden = Inception_Block(hidden, 16, 32, 64, 16)
     hidden = Inception_Block(hidden, 8, 16, 32, 8)
     hidden = Inception_Block(hidden, 4, 8, 16, 4)
     hidden = Conv2D(16, (1, 1), (1, 1), padding='same')(hidden)
     hidden = BatchNormalization()(hidden)
     hidden = Activation('relu')(hidden)
     out = Conv2D(1, (1, 1), strides=(1, 1), padding='same')(hidden)
     model = Model(x, out)
     initial_learning_rate = 0.02
     lr_schedule = schedules.ExponentialDecay(initial_learning_rate,
                                              decay_steps=100,
                                              decay_rate=0.96,
                                              staircase=True)
     model.compile(loss='mse',
                   metrics=['mae'],
                   optimizer=Adam(learning_rate=lr_schedule))
     model.summary()
     return model
コード例 #4
0
    def train_model(self):
        print('CNN Model')

        x_train = None
        y_train = None
        x_validation = None
        y_validation = None

        bufferedNumpy = Tools.loadFromBuffer(self.path, self.dataPath)

        if (bufferedNumpy == False):
            self.preprocess()
            x_train = np.asarray(self.train_dataset)
            y_train = np.asarray(self.onehotTrainLabels)
            x_validation = np.asarray(self.validation_dataset)
            y_validation = np.asarray(self.onehotValidationLabels)
            Tools.bufferFile(self.path, self.dataPath, np.asarray([x_train, y_train, x_validation, y_validation]))
        else:
            x_train = bufferedNumpy[0]
            y_train = bufferedNumpy[1]
            x_validation = bufferedNumpy[2]
            y_validation = bufferedNumpy[3]

        [x_validation, y_validation] = Tools.shuffleData(x_validation, y_validation)
        [x_train, y_train] = Tools.shuffleData(x_train, y_train)

        sequence = x_train.shape[0]
        joints = x_train.shape[1]
        frames = x_train.shape[2]
        coords = x_train.shape[3]
        # channels = x_train.shape[4]

        self.label_size = y_train.shape[1]

        lr_schedule = schedules.ExponentialDecay(
            initial_learning_rate=1e-2,
            decay_steps=10000,
            decay_rate=0.9)

        model = Sequential()


        model.add(Conv2D(20,  # input shape: (None, 32, 120, 3)
                         activation='tanh',
                         kernel_initializer='he_uniform',
                         data_format='channels_last',
                         input_shape=(joints, frames, coords), # 30 joints, 60 frames, 3 channels representing axis position coordinates (xyz)
                         kernel_size=(3, 3))) # Kernel size is set to 3, 3
        model.add(MaxPooling2D(pool_size=(2, 2), padding="same")) # Maxpool

        model.add(Conv2D(50, kernel_size=(2, 2), activation='tanh'))
        model.add(MaxPooling2D(pool_size=(2, 2),  padding="same"))

        model.add(Conv2D(100, kernel_size=(3, 3), activation='tanh'))
        model.add(MaxPooling2D(pool_size=(2, 2), padding='same'))

        convJointSize = model.output_shape[1]
        convFrameSize = model.output_shape[2]
        model.add(Reshape((convJointSize, convFrameSize, -1)))
        model.add(Permute((2, 1, 3))) # Permuting the conv output shape such that frames are given as the sequential input for the LSTM layers
        model.add(Reshape((convFrameSize, -1))) # Reshaping the permuted output to match the (timesteps, features) LSTM input - while withholding the
        model.add(LSTM(units=20, input_shape=(model.output_shape), return_sequences=True, recurrent_dropout=0.2))
        model.add(LSTM(units=100, input_shape=(model.output_shape), recurrent_dropout=0.1))
        model.add(Dropout(0.2))
        model.add(Dense(300, activation='tanh', kernel_regularizer=regularizers.l2(0.1)))
        model.add(Dropout(0.2))
        model.add(Dense(100, activation='tanh', kernel_regularizer=regularizers.l2(0.1)))
        model.add(Flatten())

        model.add(Dense(self.label_size, activation='softmax'))  # Classification
        model.compile(loss='categorical_crossentropy', optimizer=Adam(),
                      metrics=['accuracy'])
        print((joints, frames, coords))
        model.summary()

        mcp_save = ModelCheckpoint(self.path + 'saved-models/' + self.modelType + '-bestWeights.h5',
                                   save_best_only=True,
                                   monitor='val_loss',
                                   mode='min')
        history = model.fit(x_train, y_train, epochs=self.epochs, batch_size=self.batch_size,
                            validation_data=(x_validation, y_validation), callbacks=[mcp_save])
        plt.plot(history.history['loss'], label='train')
        plt.plot(history.history['val_loss'], label='validation')
        plt.legend()
        plt.show()

        
        Tools.saveModel(self.path, model, self.modelType)
コード例 #5
0
    def train_model(self):
        print('CNN Model')

        x_train = None
        y_train = None
        x_validation = None
        y_validation = None

        bufferedNumpy = Tools.loadFromBuffer(self.path, self.dataPath)

        if (bufferedNumpy == False):
            self.preprocess()
            print('Preprocessing files')
            x_train = np.asarray(self.train_dataset)
            y_train = np.asarray(self.onehotTrainLabels)
            x_validation = np.asarray(self.validation_dataset)
            y_validation = np.asarray(self.onehotValidationLabels)
            Tools.bufferFile(
                self.path, self.dataPath,
                np.asarray([x_train, y_train, x_validation, y_validation]))
        else:
            x_train = bufferedNumpy[0]
            y_train = bufferedNumpy[1]
            x_validation = bufferedNumpy[2]
            y_validation = bufferedNumpy[3]

        sequence = x_train.shape[0]
        joints = x_train.shape[1]
        frames = x_train.shape[2]
        coords = x_train.shape[3]
        channels = x_train.shape[4]

        self.label_size = y_train.shape[1]

        lr_schedule = schedules.ExponentialDecay(initial_learning_rate=1e-2,
                                                 decay_steps=10000,
                                                 decay_rate=0.9)

        model = Sequential()

        model.add(
            Conv3D(
                20,  # (None, 30, 118, 3, 20)
                activation='tanh',
                kernel_initializer='he_uniform',
                data_format='channels_last',
                input_shape=(joints, frames, coords, channels),
                kernel_size=(3, 3, 1)))
        model.add(
            MaxPooling3D(
                pool_size=(2, 2, 1),
                strides=(1, 1, 1),
                data_format='channels_last',
            ))  # (None, 29, 117, 3, 20)
        model.add(Dropout(0.2))  # (None, 29, 117, 3, 20)
        model.add(Conv3D(50, kernel_size=(2, 2, 1),
                         activation='tanh'))  # (None, 28, 116, 3, 50)

        model.add(MaxPooling3D(pool_size=(2, 2, 1)))  # (None, 14, 58, 3, 50)
        model.add(Conv3D(100, kernel_size=(3, 3, 1), activation='tanh'))
        model.add(MaxPooling3D(pool_size=(2, 2, 1)))
        model.add(Dropout(0.2))
        model.add(Dense(300))
        model.add(Dropout(0.2))
        model.add(Dense(100))

        model.add(Flatten())

        model.add(Dense(self.label_size,
                        activation='softmax'))  # Classification
        model.compile(loss='categorical_crossentropy',
                      optimizer=Adam(),
                      metrics=['accuracy'])

        model.summary()

        mcp_save = ModelCheckpoint(self.path + 'saved-models/' +
                                   self.modelType + '-bestWeights.h5',
                                   save_best_only=True,
                                   monitor='val_loss',
                                   mode='min')
        history = model.fit(x_train,
                            y_train,
                            epochs=self.epochs,
                            batch_size=self.batch_size,
                            validation_data=(x_validation, y_validation),
                            callbacks=[mcp_save])
        print(history.history.keys())
        print(mcp_save.best)
        plt.plot(history.history['loss'], label='train')
        plt.plot(history.history['val_loss'], label='validation')
        plt.legend()
        plt.show()

        Tools.saveModel(self.path, model, self.modelType)
コード例 #6
0
ファイル: LSTM_s.py プロジェクト: klem95/GestureRecognitionML
    def train_model(self):
        print('CNN Model')

        x_train = None
        y_train = None
        x_validation = None
        y_validation = None

        bufferedNumpy = Tools.loadFromBuffer(self.path, self.dataPath)

        if (bufferedNumpy == False):
            self.preprocess()
            x_train = np.asarray(self.train_dataset)
            y_train = np.asarray(self.onehotTrainLabels)
            x_validation = np.asarray(self.validation_dataset)
            y_validation = np.asarray(self.onehotValidationLabels)
            Tools.bufferFile(self.path, self.dataPath, np.asarray([x_train, y_train, x_validation, y_validation]))
        else:
            x_train = bufferedNumpy[0]
            y_train = bufferedNumpy[1]
            x_validation = bufferedNumpy[2]
            y_validation = bufferedNumpy[3]

        [x_validation, y_validation] = Tools.shuffleData(x_validation, y_validation)
        [x_train, y_train] = Tools.shuffleData(x_train, y_train)

        sequence = x_train.shape[0]
        joints = x_train.shape[1]
        frames = x_train.shape[2]
        coords = x_train.shape[3]
        # channels = x_train.shape[4]




        print(y_train.shape)

        x_train = x_train.reshape((x_train.shape[0], -1, x_train.shape[2]))
        x_validation = x_validation.reshape((x_validation.shape[0], -1, x_validation.shape[2]))
        print()
        #=(frames, joints),


        self.label_size = y_train.shape[1]

        lr_schedule = schedules.ExponentialDecay(
            initial_learning_rate=0.05,
            decay_steps=10000,
            decay_rate=0.9)

        model = Sequential()
        model.add(LSTM(50,  # (None, 30, 118, 3, 20)
                         recurrent_activation='tanh',
                         recurrent_dropout=0.2,
                         kernel_initializer='he_uniform',
                         return_sequences=True,
                         input_shape=(x_train.shape[1], x_train.shape[2]),
                         )
                  )
        model.add(LSTM(100,  # (None, 30, 118, 3, 20
                         recurrent_dropout=0.3,
                         recurrent_activation='tanh',
                         return_sequences=True,
                         kernel_initializer='he_uniform',
                         input_shape=(x_train.shape[1], x_train.shape[2]),
                         )
                  )
        model.add(Dropout(0.2))  # (None, 29, 117, 3, 20)
        model.add(Dense(200,  activation='tanh', kernel_regularizer=regularizers.l2(0.1)))
        model.add(Dropout(0.2))
        model.add(Dense(100,  activation='tanh', kernel_regularizer=regularizers.l2(0.1)))
        model.add(Flatten())

        model.add(Dense(self.label_size, activation='softmax'))  # Classification
        model.compile(loss='categorical_crossentropy', optimizer=Adam(learning_rate=lr_schedule),
                      metrics=['accuracy'])
        # print((joints, frames, coords, channels))
        model.summary()

        mcp_save = ModelCheckpoint(self.path + 'saved-models/' + self.modelType + '-bestWeights.h5',
                                   save_best_only=True,
                                   monitor='val_loss',
                                   mode='min')
        history = model.fit(x_train, y_train, epochs=self.epochs, batch_size=self.batch_size,
                            validation_data=(x_validation, y_validation), callbacks=[mcp_save])
        plt.plot(history.history['loss'], label='train')
        plt.plot(history.history['val_loss'], label='validation')
        plt.legend()
        plt.show()

        Tools.saveModel(self.path, model, self.modelType)
コード例 #7
0
def model_train(train_input, train_output, test_input, test_output, nodes,
                batch_size, learning_rate, file_name, decay_steps, decay_rate,
                validation_split, dropout, batch_normalization, biased,
                patience):
    # Number of nodes
    nodes = nodes

    # Model creation
    model = Sequential()

    # Normalization layer / layer 1
    if batch_normalization:
        model.add(BatchNormalization(input_shape=(14, )))

        model.add(
            Dense(nodes,
                  use_bias=biased,
                  kernel_initializer='random_normal',
                  bias_initializer='zeros'))
        model.add(Activation('softplus'))
        model.add(Dropout(dropout))
    else:
        model.add(
            Dense(nodes,
                  input_shape=(14, ),
                  use_bias=False,
                  kernel_initializer='random_normal',
                  bias_initializer='zeros'))
        model.add(Activation('softplus'))
        model.add(Dropout(dropout))

    # Layer 2, output
    model.add(Dense(10))
    #model.add(Activation('elu'))

    lr_schedule = schedules.ExponentialDecay(
        initial_learning_rate=learning_rate,
        decay_steps=decay_steps,
        decay_rate=decay_rate)

    opt = Adam(learning_rate=lr_schedule)

    model.compile(
        loss='mse',  #mean squared error
        optimizer=opt)

    callbacks_list = [
        EarlyStopping(monitor='val_loss', patience=patience)
    ]  #stop if cross validation fails to decrease over #patience epochs

    model.fit(train_input,
              train_output,
              epochs=100,
              batch_size=batch_size,
              validation_data=(test_input, test_output),
              verbose=0,
              callbacks=callbacks_list,
              shuffle=True)

    model.save(file_name)

    return 0
コード例 #8
0
    def train_model(self):
        print('CNN Model')

        x_train = None
        y_train = None
        x_validation = None
        y_validation = None

        bufferedNumpy = Tools.loadFromBuffer(self.path, self.dataPath)

        if (bufferedNumpy == False):
            self.preprocess()
            x_train = np.asarray(self.train_dataset)
            y_train = np.asarray(self.onehotTrainLabels)
            x_validation = np.asarray(self.validation_dataset)
            y_validation = np.asarray(self.onehotValidationLabels)
            Tools.bufferFile(
                self.path, self.dataPath,
                np.asarray([x_train, y_train, x_validation, y_validation]))
        else:
            x_train = bufferedNumpy[0]
            y_train = bufferedNumpy[1]
            x_validation = bufferedNumpy[2]
            y_validation = bufferedNumpy[3]

        [x_validation,
         y_validation] = Tools.shuffleData(x_validation, y_validation)
        [x_train, y_train] = Tools.shuffleData(x_train, y_train)

        x_validation = np.transpose(x_validation, (0, 2, 1, 3))
        x_validation = np.reshape(
            x_validation, (x_validation.shape[0], x_validation.shape[1], -1))
        x_train = np.transpose(x_train, (0, 2, 1, 3))
        x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], -1))
        print(x_train.shape)

        sequence = x_train.shape[0]
        frames = x_train.shape[1]
        joints = x_train.shape[2]
        # channels = x_train.shape[4]

        self.label_size = y_train.shape[1]

        lr_schedule = schedules.ExponentialDecay(initial_learning_rate=1e-2,
                                                 decay_steps=10000,
                                                 decay_rate=0.9)

        model = Sequential()
        model.add(
            Conv1D(300,
                   input_shape=(frames, joints),
                   kernel_size=(2),
                   strides=1,
                   activation='tanh'))
        model.add(MaxPooling1D(pool_size=(2), strides=1, padding="same"))
        model.add(
            Conv1D(300,
                   input_shape=(frames, joints),
                   kernel_size=(2),
                   strides=1,
                   activation='tanh'))
        #model.add(Permute((2, 1, 3))) # Permuting the conv output shape such that frames are given as the sequential input for the LSTM layers

        model.add(
            LSTM(joints,
                 activation='tanh',
                 kernel_initializer='he_uniform',
                 return_sequences=True,
                 input_shape=(frames, joints)))
        model.add(
            LSTM(units=joints,
                 input_shape=(model.output_shape),
                 return_sequences=True,
                 recurrent_dropout=0.1))

        model.add(Dropout(0.2))
        model.add(
            Dense(300,
                  activation='tanh',
                  kernel_regularizer=regularizers.l2(0.1)))
        model.add(Dropout(0.2))
        model.add(
            Dense(100,
                  activation='tanh',
                  kernel_regularizer=regularizers.l2(0.1)))
        model.add(Flatten())

        model.add(Dense(self.label_size,
                        activation='softmax'))  # Classification
        model.compile(loss='categorical_crossentropy',
                      optimizer=Adam(),
                      metrics=['accuracy'])
        print((joints, frames))
        model.summary()

        mcp_save = ModelCheckpoint(self.path + 'saved-models/' +
                                   self.modelType + '-bestWeights.h5',
                                   save_best_only=True,
                                   monitor='val_loss',
                                   mode='min')
        history = model.fit(x_train,
                            y_train,
                            epochs=self.epochs,
                            batch_size=self.batch_size,
                            validation_data=(x_validation, y_validation),
                            callbacks=[mcp_save])
        plt.plot(history.history['loss'], label='train')
        plt.plot(history.history['val_loss'], label='validation')
        plt.legend()
        plt.show()

        Tools.saveModel(self.path, model, self.modelType)