Пример #1
0
    def train(self):
        modelDir = 'Experiments/CNN'
        dropout = 0.5
        epoch = 30
        U.save_GPU_mem_keras()
        expr = U.ExprCreaterAndResumer(modelDir,
                                       postfix="dr%s_imgOnly" % (str(dropout)))

        inputs = L.Input(shape=inputShape)
        x = inputs  # inputs is used by the line "Model(inputs, ... )" below

        conv1 = L.Conv2D(64, (3, 3), strides=1, padding='valid')
        x = conv1(x)
        x = L.Activation('relu')(x)
        x = L.BatchNormalization()(x)
        # Batch needs to be after relu, otherwise it won't train...

        conv2 = L.Conv2D(32, (3, 3), strides=1, padding='valid')
        x = conv2(x)
        x = L.Activation('relu')(x)
        x = L.BatchNormalization()(x)

        conv3 = L.Conv2D(32, (3, 3), strides=1, padding='valid')
        x = conv3(x)
        x = L.Activation('relu')(x)
        x = L.BatchNormalization()(x)

        #x = L.GlobalAveragePooling2D()(x)

        #x = L.Flatten()(x)
        x = L.Dense(128, activation='relu')(x)
        x = L.Dropout(dropout)(x)
        output = L.Dense(2, activation='softmax')(x)
        model = Model(inputs=inputs, outputs=output)

        opt = K.optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0)
        #opt = K.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
        model.compile(optimizer=opt,
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])

        print(model.summary())
        # snapshot code before training the model
        expr.dump_src_code_and_model_def(sys.argv[0], model)

        model.fit(self.trainData,
                  self.trainLabel,
                  validation_data=(self.testData, self.testLabel),
                  class_weight=self.class_weights,
                  shuffle=True,
                  batch_size=100,
                  epochs=epoch,
                  verbose=2,
                  callbacks=[
                      K.callbacks.TensorBoard(log_dir=expr.dir),
                      K.callbacks.ReduceLROnPlateau(monitor='val_loss',
                                                    factor=0.5,
                                                    patience=5,
                                                    min_lr=0.00001),
                      U.PrintLrCallback()
                  ])

        expr.save_weight_and_training_config_state(model)

        score = model.evaluate(self.testData,
                               self.testLabel,
                               batch_size=100,
                               0)
        expr.printdebug("eval score:" + str(score))
Пример #2
0
    def train(self):
        U.save_GPU_mem_keras()
        expr = U.ExprCreaterAndResumer(modelDir,
                                       postfix="dr%s_imgOnly" % (str(dropout)))

        inputs = L.Input(shape=inputShape)
        x = inputs  # inputs is used by the line "Model(inputs, ... )" below

        conv1 = L.Conv2D(64, (3, 3),
                         strides=1,
                         dilation_rate=2,
                         padding='valid')
        x = conv1(x)
        x = L.Activation('relu')(x)
        x = L.BatchNormalization()(x)
        # Batch needs to be after relu, otherwise it won't train...
        #x = L.MaxPooling2D(pool_size=(2,2))(x)

        conv2 = L.Conv2D(64, (3, 3),
                         strides=1,
                         dilation_rate=2,
                         padding='valid')
        x = conv2(x)
        x = L.Activation('relu')(x)
        x = L.BatchNormalization()(x)
        #x = L.MaxPooling2D(pool_size=(2,2))(x)

        conv3 = L.Conv2D(64, (3, 3),
                         strides=1,
                         dilation_rate=2,
                         padding='valid')
        x = conv3(x)
        x = L.Activation('relu')(x)
        x = L.BatchNormalization()(x)
        #x = L.MaxPooling2D(pool_size=(2,2))(x)

        x = L.Flatten()(x)
        x = L.Dense(256, activation='relu')(x)
        x = L.Dropout(dropout)(x)
        output = L.Dense(1, activation='sigmoid')(x)
        model = Model(inputs=inputs, outputs=output)

        opt = K.optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0)
        #opt = K.optimizers.Adam(lr=0.1, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
        model.compile(optimizer=opt,
                      loss='binary_crossentropy',
                      metrics=[K.metrics.binary_accuracy])

        print(model.summary())
        # snapshot code before training the model
        expr.dump_src_code_and_model_def(sys.argv[0], model)

        if dataAug:
            datagen = ImageDataGenerator(horizontal_flip=True)

            # compute quantities required for featurewise normalization
            # (std, mean, and principal components if ZCA whitening is applied)
            datagen.fit(self.trainData)

            # fits the model on batches with real-time data augmentation:
            # model.fit_generator(datagen.flow(self.trainData, self.trainLabel, batch_size=64),
            # 	validation_data=(self.testData, self.testLabel), class_weight=self.class_weights,
            # 	samples_per_epoch=len(self.trainData), epochs=epoch, verbose=2)

            # here's a more "manual" example
            for e in range(epoch):
                print('Epoch', e)
                batches = 0
                for x_batch, y_batch in datagen.flow(self.trainData,
                                                     self.trainLabel,
                                                     batch_size=5000):
                    model.fit(x_batch,
                              y_batch,
                              validation_data=(self.testData, self.testLabel),
                              class_weight=self.class_weights,
                              shuffle=True,
                              verbose=2)
                    batches += 1
                    print("Batch", batches)
                    if batches >= len(self.trainData) / 5000:
                        break
        else:
            # model.fit(self.trainData, self.trainLabel, validation_data=(self.testData, self.testLabel),
            # 	class_weight=self.class_weights, shuffle=True, batch_size=100, epochs=epoch, verbose=2,
            # 	callbacks=[K.callbacks.TensorBoard(log_dir=expr.dir),
            # 	K.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=4, min_lr = 0.00001),
            # 	U.PrintLrCallback()])
            model.fit(self.trainData,
                      self.trainLabel,
                      validation_data=(self.testData, self.testLabel),
                      shuffle=True,
                      batch_size=100,
                      epochs=epoch,
                      verbose=2,
                      callbacks=[
                          K.callbacks.TensorBoard(log_dir=expr.dir),
                          K.callbacks.ReduceLROnPlateau(monitor='val_loss',
                                                        factor=0.5,
                                                        patience=5,
                                                        min_lr=0.00001),
                          U.PrintLrCallback()
                      ])

        expr.save_weight_and_training_config_state(model)

        score = model.evaluate(self.testData,
                               self.testLabel,
                               batch_size=100,
                               verbose=0)
        expr.printdebug("eval score:" + str(score))
Пример #3
0
    def train(self):
        U.save_GPU_mem_keras()
        expr = U.ExprCreaterAndResumer(modelDir,
                                       postfix="dr%s_imgOnly" % (str(dropout)))

        # x channel: image
        x_inputs = L.Input(shape=inputShape)
        x = x_inputs  # inputs is used by the line "Model(inputs, ... )" below

        conv11 = L.Conv2D(16, (3, 3),
                          strides=1,
                          dilation_rate=2,
                          padding='valid')
        x = conv11(x)
        x = L.Activation('relu')(x)
        x = L.BatchNormalization()(x)
        # Batch needs to be after relu, otherwise it won't train...

        conv12 = L.Conv2D(16, (3, 3),
                          strides=1,
                          dilation_rate=2,
                          padding='valid')
        x = conv12(x)
        x = L.Activation('relu')(x)
        x = L.BatchNormalization()(x)

        conv13 = L.Conv2D(16, (3, 3),
                          strides=1,
                          dilation_rate=2,
                          padding='valid')
        x = conv13(x)
        x = L.Activation('relu')(x)
        x_output = L.BatchNormalization()(x)

        # z channel: optical flow
        z_inputs = L.Input(shape=inputShapeOF)
        z = z_inputs  # inputs is used by the line "Model(inputs, ... )" below

        conv21 = L.Conv2D(16, (3, 3),
                          strides=1,
                          dilation_rate=2,
                          padding='valid')
        z = conv21(z)
        z = L.Activation('relu')(z)
        z = L.BatchNormalization()(z)

        conv22 = L.Conv2D(16, (3, 3),
                          strides=1,
                          dilation_rate=2,
                          padding='valid')
        z = conv22(z)
        z = L.Activation('relu')(z)
        z = L.BatchNormalization()(z)

        conv23 = L.Conv2D(16, (3, 3),
                          strides=1,
                          dilation_rate=2,
                          padding='valid')
        z = conv23(z)
        z = L.Activation('relu')(z)
        z_output = L.BatchNormalization()(z)

        joint = L.Average()([x_output, z_output])
        joint = L.Flatten()(joint)
        joint = L.Dense(32, activation='relu')(joint)
        joint = L.Dropout(dropout)(joint)
        output = L.Dense(1, activation='sigmoid')(joint)

        model = Model(inputs=[x_inputs, z_inputs], outputs=output)

        opt = K.optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0)
        #opt = K.optimizers.Adam(lr=0.1, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
        model.compile(optimizer=opt,
                      loss='binary_crossentropy',
                      metrics=[K.metrics.binary_accuracy])

        print(model.summary())
        # snapshot code before training the model
        expr.dump_src_code_and_model_def(sys.argv[0], model)

        model.fit([self.trainData, self.trainDataOF],
                  self.trainLabel,
                  validation_data=([self.testData,
                                    self.testDataOF], self.testLabel),
                  shuffle=True,
                  batch_size=100,
                  epochs=epoch,
                  verbose=2,
                  callbacks=[
                      K.callbacks.TensorBoard(log_dir=expr.dir),
                      K.callbacks.ReduceLROnPlateau(monitor='val_loss',
                                                    factor=0.5,
                                                    patience=5,
                                                    min_lr=0.00001),
                      U.PrintLrCallback()
                  ])

        expr.save_weight_and_training_config_state(model)

        score = model.evaluate([self.testData, self.testDataOF],
                               self.testLabel,
                               batch_size=100,
                               verbose=0)
        expr.printdebug("eval score:" + str(score))
Пример #4
0
    def train_fully_connected_model(self):
        # TODO: train a fully connected network to do regression
        import keras as K
        import keras.layers as L
        from keras.models import Model
        from keras.utils import to_categorical
        from keras.models import Sequential
        from keras.layers import Dense, Conv2D, Flatten
        from keras.preprocessing.image import ImageDataGenerator
        from sklearn.utils import class_weight
        import utils as U

        #X: self.allIVData
        #Y: self.gaze1Data // self.gaze2Data

        (numGaze, dim) = self.allIVData.shape
        print(numGaze, dim)
        # Unlike regression model, deep net needs to split data first
        trainRatio = 0.85
        # split data
        numGazeTrain = int(numGaze * trainRatio)
        numGazeTest = numGaze - numGazeTrain

        self.trainData = self.allIVData[0:numGazeTrain]
        self.testData = self.allIVData[numGazeTrain:]

        self.trainLabel = self.gaze1Data[0:numGazeTrain]
        self.testLabel = self.gaze1Data[numGazeTrain:]

        inputShape = (dim, )
        # TODO: where to store results
        modelDir = 'Experiments/' + '/body-fc'
        dropout = 0.0
        epoch = 20

        U.save_GPU_mem_keras()
        expr = U.ExprCreaterAndResumer(modelDir,
                                       postfix="dr%s_jointsOnly" %
                                       (str(dropout)))

        inputs = L.Input(shape=inputShape)
        x = inputs  # inputs is used by the line "Model(inputs, ... )" below

        # TODO: input data need standadrization; I use batch norm trick here; unknown effects comparing to standard way to do this
        x = L.BatchNormalization()(x)
        x = L.Dense(1024, activation='relu')(x)
        x = L.Dropout(dropout)(x)
        x = L.BatchNormalization()(x)
        output = L.Dense(3, activation='linear')(
            x)  #TODO: if label shape changes, this '3' should also change
        model = Model(inputs=inputs, outputs=output)

        opt = K.optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0)
        #opt = K.optimizers.Adam(lr=0.1, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
        model.compile(optimizer=opt,
                      loss="mean_squared_error",
                      metrics=['mae', 'mse'])

        print(model.summary())
        # snapshot code before training the model
        expr.dump_src_code_and_model_def(sys.argv[0], model)

        model.fit(self.trainData,
                  self.trainLabel,
                  validation_data=(self.testData, self.testLabel),
                  shuffle=True,
                  batch_size=100,
                  epochs=epoch,
                  verbose=2,
                  callbacks=[
                      K.callbacks.TensorBoard(log_dir=expr.dir),
                      K.callbacks.ReduceLROnPlateau(monitor='val_loss',
                                                    factor=0.5,
                                                    patience=5,
                                                    min_lr=0.00001),
                      U.PrintLrCallback()
                  ])

        expr.save_weight_and_training_config_state(model)

        score = model.evaluate(self.testData,
                               self.testLabel,
                               batch_size=100,
                               verbose=0)
        expr.printdebug("eval score:" + str(score))