Exemplo n.º 1
0
    def preprocess(self):
        all_files = glob2.glob(self.dataPath + "/*.csv")
        largestFrameCount = Tools.biggestDocLength(self.dataPath)

        i = 0
        for filename in sorted(all_files):
            with open(filename, newline='') as csvfile:
                print('loading: ' + filename)
                data = genfromtxt(csvfile, delimiter=';')
                result = Tools.format(data, largestFrameCount)

                if i % self.validationDataEvery == 0:
                    self.validation_dataset.append(result)
                    self.validationFiles.append(filename)
                else:
                    self.train_dataset.append(result)
                    self.trainFiles.append(filename)
            i += 1

        self.onehotTrainLabels = Tools.encode_labels(self.trainFiles)
        self.onehotValidationLabels = Tools.encode_labels(self.validationFiles)
        print('train_dataset shape')
        print(np.asarray(self.train_dataset).shape)
        print('traning onehot shape:')
        print(np.asarray(self.onehotTrainLabels).shape)
Exemplo n.º 2
0
    def predict(self, data, columnSize, zeroPad):
        print('data')
        print(data)

        formattedData = Tools.format(data, columnSize, zeroPad, removeFirstLine=False)
        shape = np.asarray([formattedData])
        print(shape.shape)
        score = self.model.predict(shape, verbose=0)
        return score
 def predict(self, data, columnSize, zeroPad):
     formattedData = Tools.format(data,
                                  columnSize,
                                  zeroPad,
                                  removeFirstLine=False)
     shape = np.asarray([formattedData])
     shape = np.transpose(shape, (0, 2, 1, 3))
     shape = np.reshape(shape, (shape.shape[0], shape.shape[1], -1))
     #print(shape.shape)
     score = self.model.predict(shape, verbose=0)
     return score
Exemplo n.º 4
0
    def __init__(self, lr, bs, e, split, f, loadModel=False, path=''):
        self.path = path
        self.batch_size = 20 if bs is None else bs
        self.learning_rate = 0.01 if lr is None else lr
        self.epochs = 400 if e is None else e
        self.validationDataEvery = 5 if split is None else split
        self.label_size = 10
        self.dataPath = 'Data' if f is None else f
        self.trained_model_path = 'Trained_models'  # use your path
        self.time_steps = 0
        self.feature_size = 0
        self.labels = []
        self.modelType = 'cnnlstm'

        self.train_dataset = []
        self.validation_dataset = []
        self.trainFiles = []
        self.validationFiles = []

        if (loadModel):
            self.model = Tools.loadModel(self.path, self.modelType)
        else:
            self.model = None
Exemplo n.º 5
0
    def train_model(self):
        print('CNN Model')

        x_train = None
        y_train = None
        x_validation = None
        y_validation = None

        bufferedNumpy = Tools.loadFromBuffer(self.path, self.dataPath)

        if (bufferedNumpy == False):
            self.preprocess()
            x_train = np.asarray(self.train_dataset)
            y_train = np.asarray(self.onehotTrainLabels)
            x_validation = np.asarray(self.validation_dataset)
            y_validation = np.asarray(self.onehotValidationLabels)
            Tools.bufferFile(
                self.path, self.dataPath,
                np.asarray([x_train, y_train, x_validation, y_validation]))
        else:
            x_train = bufferedNumpy[0]
            y_train = bufferedNumpy[1]
            x_validation = bufferedNumpy[2]
            y_validation = bufferedNumpy[3]

        [x_validation,
         y_validation] = Tools.shuffleData(x_validation, y_validation)
        [x_train, y_train] = Tools.shuffleData(x_train, y_train)

        sequence = x_train.shape[0]
        joints = x_train.shape[1]
        frames = x_train.shape[2]
        coords = x_train.shape[3]

        trunk_joint_count = 3
        upper_region_joint_count = 8
        lower_region_joint_count = 6

        x_train = x_train.reshape((x_train.shape[0], -1, x_train.shape[2]))
        x_validation = x_validation.reshape(
            (x_validation.shape[0], -1, x_validation.shape[2]))
        print()
        # =(frames, joints),

        trunk_input = Input(shape=(frames, trunk_joint_count * 3))
        upper_left_input = Input(shape=(frames, upper_region_joint_count * 3))
        upper_right_input = Input(shape=(frames, upper_region_joint_count * 3))
        lower_left_input = Input(shape=(frames, lower_region_joint_count * 3))
        lower_right_input = Input(shape=(frames, lower_region_joint_count * 3))

        trunk_lstm_0 = LSTM(units=20,
                            return_sequences=True,
                            recurrent_dropout=0.2)(trunk_input)
        upper_left_lstm_0 = LSTM(units=20,
                                 return_sequences=True,
                                 recurrent_dropout=0.2)(upper_left_input)
        upper_right_lstm_0 = LSTM(units=20,
                                  return_sequences=True,
                                  recurrent_dropout=0.2)(upper_right_input)

        concat_layer = concatenate(
            [trunk_lstm_0, upper_left_lstm_0, upper_right_lstm_0])
        final_lstm_layer = LSTM(units=20,
                                return_sequences=True,
                                recurrent_dropout=0.2)(concat_layer)

        flatten = Flatten()(final_lstm_layer)

        output = Dense(self.label_size, activation='softmax')(flatten)
        model = Model(inputs=[trunk_input, upper_left_input], outputs=output)

        print(model.summary())

        model.compile(loss='categorical_crossentropy',
                      optimizer=Adam(),
                      metrics=['accuracy'])

        mcp_save = ModelCheckpoint(self.path + 'saved-models/' +
                                   self.modelType + '-bestWeights.h5',
                                   save_best_only=True,
                                   monitor='val_loss',
                                   mode='min')
        history = model.fit(x_train,
                            y_train,
                            epochs=self.epochs,
                            batch_size=self.batch_size,
                            validation_data=(x_validation, y_validation),
                            callbacks=[mcp_save])
Exemplo n.º 6
0
    def train_model(self):
        print('CNN Model')

        x_train = None
        y_train = None
        x_validation = None
        y_validation = None

        bufferedNumpy = Tools.loadFromBuffer(self.path, self.dataPath)

        if (bufferedNumpy == False):
            self.preprocess()
            x_train = np.asarray(self.train_dataset)
            y_train = np.asarray(self.onehotTrainLabels)
            x_validation = np.asarray(self.validation_dataset)
            y_validation = np.asarray(self.onehotValidationLabels)
            Tools.bufferFile(self.path, self.dataPath, np.asarray([x_train, y_train, x_validation, y_validation]))
        else:
            x_train = bufferedNumpy[0]
            y_train = bufferedNumpy[1]
            x_validation = bufferedNumpy[2]
            y_validation = bufferedNumpy[3]

        [x_validation, y_validation] = Tools.shuffleData(x_validation, y_validation)
        [x_train, y_train] = Tools.shuffleData(x_train, y_train)

        sequence = x_train.shape[0]
        joints = x_train.shape[1]
        frames = x_train.shape[2]
        coords = x_train.shape[3]
        # channels = x_train.shape[4]

        self.label_size = y_train.shape[1]

        lr_schedule = schedules.ExponentialDecay(
            initial_learning_rate=1e-2,
            decay_steps=10000,
            decay_rate=0.9)

        model = Sequential()


        model.add(Conv2D(20,  # input shape: (None, 32, 120, 3)
                         activation='tanh',
                         kernel_initializer='he_uniform',
                         data_format='channels_last',
                         input_shape=(joints, frames, coords), # 30 joints, 60 frames, 3 channels representing axis position coordinates (xyz)
                         kernel_size=(3, 3))) # Kernel size is set to 3, 3
        model.add(MaxPooling2D(pool_size=(2, 2), padding="same")) # Maxpool

        model.add(Conv2D(50, kernel_size=(2, 2), activation='tanh'))
        model.add(MaxPooling2D(pool_size=(2, 2),  padding="same"))

        model.add(Conv2D(100, kernel_size=(3, 3), activation='tanh'))
        model.add(MaxPooling2D(pool_size=(2, 2), padding='same'))

        convJointSize = model.output_shape[1]
        convFrameSize = model.output_shape[2]
        model.add(Reshape((convJointSize, convFrameSize, -1)))
        model.add(Permute((2, 1, 3))) # Permuting the conv output shape such that frames are given as the sequential input for the LSTM layers
        model.add(Reshape((convFrameSize, -1))) # Reshaping the permuted output to match the (timesteps, features) LSTM input - while withholding the
        model.add(LSTM(units=20, input_shape=(model.output_shape), return_sequences=True, recurrent_dropout=0.2))
        model.add(LSTM(units=100, input_shape=(model.output_shape), recurrent_dropout=0.1))
        model.add(Dropout(0.2))
        model.add(Dense(300, activation='tanh', kernel_regularizer=regularizers.l2(0.1)))
        model.add(Dropout(0.2))
        model.add(Dense(100, activation='tanh', kernel_regularizer=regularizers.l2(0.1)))
        model.add(Flatten())

        model.add(Dense(self.label_size, activation='softmax'))  # Classification
        model.compile(loss='categorical_crossentropy', optimizer=Adam(),
                      metrics=['accuracy'])
        print((joints, frames, coords))
        model.summary()

        mcp_save = ModelCheckpoint(self.path + 'saved-models/' + self.modelType + '-bestWeights.h5',
                                   save_best_only=True,
                                   monitor='val_loss',
                                   mode='min')
        history = model.fit(x_train, y_train, epochs=self.epochs, batch_size=self.batch_size,
                            validation_data=(x_validation, y_validation), callbacks=[mcp_save])
        plt.plot(history.history['loss'], label='train')
        plt.plot(history.history['val_loss'], label='validation')
        plt.legend()
        plt.show()

        
        Tools.saveModel(self.path, model, self.modelType)
Exemplo n.º 7
0
    def train_model(self):
        print('CNN Model')

        x_train = None
        y_train = None
        x_validation = None
        y_validation = None

        bufferedNumpy = Tools.loadFromBuffer(self.path, self.dataPath)

        if (bufferedNumpy == False):
            self.preprocess()
            x_train = np.asarray(self.train_dataset)
            y_train = np.asarray(self.onehotTrainLabels)
            x_validation = np.asarray(self.validation_dataset)
            y_validation = np.asarray(self.onehotValidationLabels)
            Tools.bufferFile(self.path, self.dataPath, np.asarray([x_train, y_train, x_validation, y_validation]))
        else:
            x_train = bufferedNumpy[0]
            y_train = bufferedNumpy[1]
            x_validation = bufferedNumpy[2]
            y_validation = bufferedNumpy[3]

        [x_validation, y_validation] = Tools.shuffleData(x_validation, y_validation)
        [x_train, y_train] = Tools.shuffleData(x_train, y_train)

        sequence = x_train.shape[0]
        joints = x_train.shape[1]
        frames = x_train.shape[2]
        coords = x_train.shape[3]
        # channels = x_train.shape[4]




        print(y_train.shape)

        x_train = x_train.reshape((x_train.shape[0], -1, x_train.shape[2]))
        x_validation = x_validation.reshape((x_validation.shape[0], -1, x_validation.shape[2]))
        print()
        #=(frames, joints),


        self.label_size = y_train.shape[1]

        lr_schedule = schedules.ExponentialDecay(
            initial_learning_rate=0.05,
            decay_steps=10000,
            decay_rate=0.9)

        model = Sequential()
        model.add(LSTM(50,  # (None, 30, 118, 3, 20)
                         recurrent_activation='tanh',
                         recurrent_dropout=0.2,
                         kernel_initializer='he_uniform',
                         return_sequences=True,
                         input_shape=(x_train.shape[1], x_train.shape[2]),
                         )
                  )
        model.add(LSTM(100,  # (None, 30, 118, 3, 20
                         recurrent_dropout=0.3,
                         recurrent_activation='tanh',
                         return_sequences=True,
                         kernel_initializer='he_uniform',
                         input_shape=(x_train.shape[1], x_train.shape[2]),
                         )
                  )
        model.add(Dropout(0.2))  # (None, 29, 117, 3, 20)
        model.add(Dense(200,  activation='tanh', kernel_regularizer=regularizers.l2(0.1)))
        model.add(Dropout(0.2))
        model.add(Dense(100,  activation='tanh', kernel_regularizer=regularizers.l2(0.1)))
        model.add(Flatten())

        model.add(Dense(self.label_size, activation='softmax'))  # Classification
        model.compile(loss='categorical_crossentropy', optimizer=Adam(learning_rate=lr_schedule),
                      metrics=['accuracy'])
        # print((joints, frames, coords, channels))
        model.summary()

        mcp_save = ModelCheckpoint(self.path + 'saved-models/' + self.modelType + '-bestWeights.h5',
                                   save_best_only=True,
                                   monitor='val_loss',
                                   mode='min')
        history = model.fit(x_train, y_train, epochs=self.epochs, batch_size=self.batch_size,
                            validation_data=(x_validation, y_validation), callbacks=[mcp_save])
        plt.plot(history.history['loss'], label='train')
        plt.plot(history.history['val_loss'], label='validation')
        plt.legend()
        plt.show()

        Tools.saveModel(self.path, model, self.modelType)
    def train_model(self):
        print('CNN Model')

        x_train = None
        y_train = None
        x_validation = None
        y_validation = None

        bufferedNumpy = Tools.loadFromBuffer(self.path, self.dataPath)

        if (bufferedNumpy == False):
            self.preprocess()
            x_train = np.asarray(self.train_dataset)
            y_train = np.asarray(self.onehotTrainLabels)
            x_validation = np.asarray(self.validation_dataset)
            y_validation = np.asarray(self.onehotValidationLabels)
            Tools.bufferFile(
                self.path, self.dataPath,
                np.asarray([x_train, y_train, x_validation, y_validation]))
        else:
            x_train = bufferedNumpy[0]
            y_train = bufferedNumpy[1]
            x_validation = bufferedNumpy[2]
            y_validation = bufferedNumpy[3]

        [x_validation,
         y_validation] = Tools.shuffleData(x_validation, y_validation)
        [x_train, y_train] = Tools.shuffleData(x_train, y_train)

        x_validation = np.transpose(x_validation, (0, 2, 1, 3))
        x_validation = np.reshape(
            x_validation, (x_validation.shape[0], x_validation.shape[1], -1))
        x_train = np.transpose(x_train, (0, 2, 1, 3))
        x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], -1))
        print(x_train.shape)

        sequence = x_train.shape[0]
        frames = x_train.shape[1]
        joints = x_train.shape[2]
        # channels = x_train.shape[4]

        self.label_size = y_train.shape[1]

        lr_schedule = schedules.ExponentialDecay(initial_learning_rate=1e-2,
                                                 decay_steps=10000,
                                                 decay_rate=0.9)

        model = Sequential()
        model.add(
            Conv1D(300,
                   input_shape=(frames, joints),
                   kernel_size=(2),
                   strides=1,
                   activation='tanh'))
        model.add(MaxPooling1D(pool_size=(2), strides=1, padding="same"))
        model.add(
            Conv1D(300,
                   input_shape=(frames, joints),
                   kernel_size=(2),
                   strides=1,
                   activation='tanh'))
        #model.add(Permute((2, 1, 3))) # Permuting the conv output shape such that frames are given as the sequential input for the LSTM layers

        model.add(
            LSTM(joints,
                 activation='tanh',
                 kernel_initializer='he_uniform',
                 return_sequences=True,
                 input_shape=(frames, joints)))
        model.add(
            LSTM(units=joints,
                 input_shape=(model.output_shape),
                 return_sequences=True,
                 recurrent_dropout=0.1))

        model.add(Dropout(0.2))
        model.add(
            Dense(300,
                  activation='tanh',
                  kernel_regularizer=regularizers.l2(0.1)))
        model.add(Dropout(0.2))
        model.add(
            Dense(100,
                  activation='tanh',
                  kernel_regularizer=regularizers.l2(0.1)))
        model.add(Flatten())

        model.add(Dense(self.label_size,
                        activation='softmax'))  # Classification
        model.compile(loss='categorical_crossentropy',
                      optimizer=Adam(),
                      metrics=['accuracy'])
        print((joints, frames))
        model.summary()

        mcp_save = ModelCheckpoint(self.path + 'saved-models/' +
                                   self.modelType + '-bestWeights.h5',
                                   save_best_only=True,
                                   monitor='val_loss',
                                   mode='min')
        history = model.fit(x_train,
                            y_train,
                            epochs=self.epochs,
                            batch_size=self.batch_size,
                            validation_data=(x_validation, y_validation),
                            callbacks=[mcp_save])
        plt.plot(history.history['loss'], label='train')
        plt.plot(history.history['val_loss'], label='validation')
        plt.legend()
        plt.show()

        Tools.saveModel(self.path, model, self.modelType)