def load_data(session, model_type, feature_type):
    """
    Load raw data
    if feature_type == FeatureType.MANUAL data is augmented with the magnitude
    model_type determines the shape of the data
    """
    # loads the data from the given session: 2/3 as training and 1/3 as testing
    if MEASUREMENT_PROTOCOL_TYPE == MeasurementProtocol.SAME_DAY:
        print('Training data SAME SESSION')
        X_train, y_train = \
            load_recordings_from_session(session, 1, 154, 1, 5, model_type, feature_type)
        print('Testing data SAME SESSION')
        X_test, y_test = \
            load_recordings_from_session(session, 1, 154, 5, 7, model_type, feature_type)
        return X_train, y_train, X_test, y_test

    # training data: session_1, testing data: session_2
    if MEASUREMENT_PROTOCOL_TYPE == MeasurementProtocol.CROSS_DAY:
        print('Training data CROSS SESSION')
        X_train, y_train = \
            load_recordings_from_session('session_1', 1, 154, 1, 7, model_type, feature_type)
        print('Testing data CROSS SESSION')
        X_test, y_test = \
            load_recordings_from_session('session_2', 1, 154, 1, 7, model_type, feature_type)
        return X_train, y_train, X_test, y_test
Beispiel #2
0
def extract_manual_features():
    modeltype = AUTOENCODER_MODEL_TYPE.LSTM
    featuretype = FeatureType.MANUAL

    # Session_0
    X0, y0 = load_recordings_from_session('session_0', 1, 23, 1, 7, modeltype,
                                          featuretype)
    F0 = feature_extraction(X0)
    lines = F0.shape[0]
    cols = F0.shape[1]
    if (CYCLE == True):
        csv_file = open(FEAT_DIR + '/' + "session_0_handcrafted_cycles.csv",
                        mode='w')
    else:
        csv_file = open(FEAT_DIR + '/' + "session_0_handcrafted_frames.csv",
                        mode='w')
    for i in range(0, lines):
        for j in range(0, cols):
            # print(F0[i,j])
            csv_file.write('%f,' % (F0[i, j]))
        csv_file.write('%s\n' % y0[i][0])

    # Session_1
    X0, y0 = load_recordings_from_session('session_1', 1, 154, 1, 7, modeltype,
                                          featuretype)
    F0 = feature_extraction(X0)
    lines = F0.shape[0]
    cols = F0.shape[1]
    if (CYCLE == True):
        csv_file = open(FEAT_DIR + '/' + "session_1_handcrafted_cycles.csv",
                        mode='w')
    else:
        csv_file = open(FEAT_DIR + '/' + "session_1_handcrafted_frames.csv",
                        mode='w')

    for i in range(0, lines):
        for j in range(0, cols):
            # print(F0[i,j])
            csv_file.write('%f,' % (F0[i, j]))
        csv_file.write('%s\n' % y0[i][0])

    # Session_2
    X0, y0 = load_recordings_from_session('session_2', 1, 154, 1, 7, modeltype,
                                          featuretype)
    F0 = feature_extraction(X0)
    lines = F0.shape[0]
    cols = F0.shape[1]
    if (CYCLE == True):
        csv_file = open(FEAT_DIR + '/' + "session_2_handcrafted_cycles.csv",
                        mode='w')
    else:
        csv_file = open(FEAT_DIR + '/' + "session_2_handcrafted_frames.csv",
                        mode='w')

    for i in range(0, lines):
        for j in range(0, cols):
            # print(F0[i,j])
            csv_file.write('%f,' % (F0[i, j]))
        csv_file.write('%s\n' % y0[i][0])
Beispiel #3
0
def train_test_autoencoder(num_epochs=10):
    # ZJU: session_2
    X1, y1 = load_recordings_from_session('session_2', 1, 154, 1, 7,
                                          AUTOENCODER_MODEL_TYPE.CONV1D,
                                          FeatureType.AUTOMATIC)

    # IDNet
    # X1, y1 = create_idnet_training_dataset(AUTOENCODER_MODEL_TYPE.CONV1D)

    # Training from scratch
    model_name = 'IDNetModel.h5'
    encoder, model = train_FCN_autoencoder(num_epochs,
                                           X1,
                                           y1,
                                           'relu',
                                           update=True,
                                           model_name=model_name)

    # print('Saved model: ' + model_name)
    # model.save(TRAINED_MODELS_DIR + '/' + model_name)

    # encoder_name = 'Encoder_' + model_name
    # print('Saved encoder: ' + encoder_name)
    # encoder.save(TRAINED_MODELS_DIR + '/' + encoder_name)

    X_train, y_train = extract_features(encoder, 'session_1',
                                        AUTOENCODER_MODEL_TYPE.CONV1D, 1, 154,
                                        1, 5)
    X_test, y_test = extract_features(encoder, 'session_1',
                                      AUTOENCODER_MODEL_TYPE.CONV1D, 1, 154, 5,
                                      7)

    # performs evaluation (train and test a classifier)
    evaluation(X_train, y_train, X_test, y_test)
Beispiel #4
0
def extract_and_save_features(encoder, modeltype, start_user, stop_user):
    mysessions = ['session_1', 'session_2']
    feature_type = FeatureType.AUTOMATIC
    for session in mysessions:
        print('Extract features from ' + session)
        data, ydata = load_recordings_from_session(session, start_user,
                                                   stop_user, 1, 7, modeltype,
                                                   feature_type)

        print('data shape: ' + str(data.shape))
        # if (modeltype==AUTOENCODER_MODEL_TYPE.CONV1D):
        #     data = np.array(data)[:, :, np.newaxis]

        # Extract features
        encoded_frames = encoder.predict(data)

        # Normalize data
        scaled_data = preprocessing.scale(encoded_frames)

        num_features = encoded_frames.shape[1]

        # Concatenate features(encoded_frames) with labels (ydata)
        df1 = pd.DataFrame(data=scaled_data)
        df2 = pd.DataFrame(data=ydata)

        df2[0] = df2[0].apply(lambda x: x.replace('subj_', 'u'))
        df3 = pd.concat([df1, df2], axis=1)

        # Save data into a CSV file
        df3.to_csv('./' + FEAT_DIR + '/' + session + ".csv",
                   header=False,
                   index=False)
def load_data(session, model_type, feature_type):
    # loads the data from the given session: 2/3 as training and 1/3 as testing
    if MEASUREMENT_PROTOCOL_TYPE == MEASUREMENT_PROTOCOL.SAME_DAY:
        print('Training data SAME SESSION')
        X_train, y_train = load_recordings_from_session(
            session, 1, 154, 1, 5, model_type, feature_type)
        print('Testing data SAME SESSION')
        X_test, y_test = load_recordings_from_session(session, 1, 154, 5, 7,
                                                      model_type, feature_type)
        return X_train, y_train, X_test, y_test

    # training data: session_1, testing data: session_2
    if MEASUREMENT_PROTOCOL_TYPE == MEASUREMENT_PROTOCOL.CROSS_DAY:
        print('Training data CROSS SESSION')
        X_train, y_train = load_recordings_from_session(
            'session_1', 1, 154, 1, 7, model_type, feature_type)
        print('Testing data CROSS SESSION')
        X_test, y_test = load_recordings_from_session('session_2', 1, 154, 1,
                                                      7, model_type,
                                                      feature_type)
        return X_train, y_train, X_test, y_test
Beispiel #6
0
def create_zju_training_dataset(augmentation, modeltype):
    # modeltype=AUTOENCODER_MODEL_TYPE.DENSE
    print('Create training dataset for autoencoder')
    feature_type = FeatureType.AUTOMATIC

    if (EVALUATION_DATA == EvaluationData.INDEPENDENT):
        data1, ydata1 = load_recordings_from_session('session_1', 1, 103, 1, 7,
                                                     modeltype, feature_type)
        data2, ydata2 = load_recordings_from_session('session_2', 1, 103, 1, 7,
                                                     modeltype, feature_type)
    if (EVALUATION_DATA == EvaluationData.ALL):
        data1, ydata1 = load_recordings_from_session('session_1', 1, 153, 1, 7,
                                                     modeltype, feature_type)
        data2, ydata2 = load_recordings_from_session('session_2', 1, 153, 1, 7,
                                                     modeltype, feature_type)

    if (EVALUATION_DATA == EvaluationData.MSTHESIS):
        data1, ydata1 = load_recordings_from_session('session_1', 1, 153, 1, 4,
                                                     modeltype, feature_type)
        data2, ydata2 = load_recordings_from_session('session_0', 1, 23, 1, 7,
                                                     modeltype, feature_type)

    data = np.concatenate((data1, data2), axis=0)
    ydata = np.concatenate((ydata1, ydata2), axis=0)

    if augmentation == True:
        data, ydata = data_augmentation(data, ydata)

    print('ZJU GaitAcc dataset - Data shape: ' + str(data.shape) +
          ' augmentation: ' + str(augmentation))
    return data, ydata
def create_zju_training_dataset(augmentation=False):
    """
    Create training dataset for autoencoder using the ZJU-GaitAcc dataset
    Users [1, 103)
    Recordings [1, 7)
    """
    modeltype = const.AutoencoderModelType.DENSE
    print('Create training dataset for autoencoder')
    feature_type = const.FeatureType.AUTOMATIC
    data1, ydata1 = load_recordings_from_session('session_1', 1, 103, 1, 7,
                                                 modeltype, feature_type)
    data2, ydata2 = load_recordings_from_session('session_2', 1, 103, 1, 7,
                                                 modeltype, feature_type)

    data = np.concatenate((data1, data2), axis=0)
    ydata = np.concatenate((ydata1, ydata2), axis=0)

    if augmentation is True:
        data, ydata = data_augmentation(data, ydata)

    print('ZJU GaitAcc dataset - Data shape: ' + str(data.shape) +
          ' augmentation: ' + str(augmentation))
    return data, ydata
Beispiel #8
0
def extract_features(encoder, session, modeltype, start_user, stop_user,
                     start_recording, stop_recording):
    feature_type = FeatureType.AUTOMATIC
    data, ydata = load_recordings_from_session(session, start_user, stop_user,
                                               start_recording, stop_recording,
                                               modeltype, feature_type)
    # print('data shape: '+ str(data.shape))

    # Extract features
    encoded_frames = encoder.predict(data)
    # Normalize data
    print(encoded_frames.shape)
    scaled_data = preprocessing.scale(encoded_frames)
    #print('Scaling')
    #print(scaled_data)
    #print('features shape: ' + str(encoded_frames.shape))

    return scaled_data, ydata
def extract_and_save_features(encoder, modeltype, start_user, stop_user):
    """
    Automatic features using the autoencoder
    ZJU-GaitAcc
    Extract and save features for session_1 and session_2
    
    encoder: the encoder part of a trained autoencoder
    modeltype: used for data shape
    users: [start_user, stop_user)
    Output: session_1.csv, session_2.csv
    """
    mysessions = ['session_1', 'session_2']
    feature_type = const.FeatureType.AUTOMATIC
    for session in mysessions:
        print('Extract features from ' + session)
        data, ydata = load_recordings_from_session(session, start_user,
                                                   stop_user, 1, 7, modeltype,
                                                   feature_type)

        print('data shape: ' + str(data.shape))
        if (modeltype == const.AutoencoderModelType.CONV1D):
            data = np.array(data)[:, :, np.newaxis]

        # Extract features
        encoded_frames = encoder.predict(data)

        # Normalize data
        scaled_data = preprocessing.scale(encoded_frames)

        num_features = encoded_frames.shape[1]

        # Concatenate features (encoded_frames) with labels (ydata)
        df1 = pd.DataFrame(data=scaled_data)
        df2 = pd.DataFrame(data=ydata)

        df2[0] = df2[0].apply(lambda x: x.replace('subj_', 'u'))
        df3 = pd.concat([df1, df2], axis=1)

        # Save data into a CSV file
        df3.to_csv('./' + const.FEAT_DIR + '/' + session + ".csv",
                   header=False,
                   index=False)
def extract_handcrafted_features(session, start_user, stop_user, output_file):
    # create $FEAT_DIR if does not exist
    create_directory('./' + FEAT_DIR)

    modeltype = AUTOENCODER_MODEL_TYPE.NONE
    featuretype = FeatureType.MANUAL
    X, y = load_recordings_from_session(session, start_user, stop_user, 1, 7,
                                        modeltype, featuretype)
    # print(X.shape)
    # print(y.shape)
    features = feature_extraction(X)
    num_segments = features.shape[0]
    num_features = features.shape[1]
    csv_file = open(FEAT_DIR + '/' + output_file, mode='w+')
    for i in range(0, num_segments):
        for j in range(0, num_features):
            csv_file.write('%f,' % (features[i, j]))
        user = y[i, 0].replace("subj_", "u")
        csv_file.write('%s\n' % user)
    csv_file.close()
def load_session_data(session, model_type, feature_type):
    X, y = load_recordings_from_session(session, 1, 154, 1, 7, model_type,
                                        feature_type)
    return X, y
Beispiel #12
0
def ZJU_feature_extraction(encoder_name, modeltype):

    # loads the encoder part of the model - it is needed for feature extraction

    encoder_name = TRAINED_MODELS_DIR + '/' + encoder_name
    encoder = load_model(encoder_name)
    print('Loaded model: ' + encoder_name)

    feature_type = FeatureType.AUTOMATIC

    # Extract features from session_1 and session_2
    mysessions = ['session_1', 'session_2']
    start_user = 1
    stop_user = 154

    for session in mysessions:
        print('Extract features from ' + session)
        data, ydata = load_recordings_from_session(session, start_user,
                                                   stop_user, 1, 7, modeltype,
                                                   feature_type)

        print('data shape: ' + str(data.shape))
        # if (modeltype==AUTOENCODER_MODEL_TYPE.CONV1D):
        #      data = np.array(data)[:, :, np.newaxis]

        # Extract features
        encoded_frames = encoder.predict(data)

        # Normalize data
        scaled_data = preprocessing.scale(encoded_frames)

        num_features = encoded_frames.shape[1]
        # Concatenate features(encoded_frames) with labels (ydata)
        df1 = pd.DataFrame(data=scaled_data)
        df2 = pd.DataFrame(data=ydata)

        df2[0] = df2[0].apply(lambda x: x.replace('subj_', 'u'))
        df3 = pd.concat([df1, df2], axis=1)

        # Save data into a CSV file
        df3.to_csv('./' + FEAT_DIR + '/' + session + ".csv",
                   header=False,
                   index=False)

    print('Extract features from session_0')
    session = 'session_0'
    start_user = 1
    stop_user = 23
    data, ydata = load_recordings_from_session(session, start_user, stop_user,
                                               1, 7, modeltype, feature_type)
    print('data shape: ' + str(data.shape))

    # if (modeltype==AUTOENCODER_MODEL_TYPE.CONV1D):
    #     data = np.array(data)[:, :, np.newaxis]

    # Extract features
    encoded_frames = encoder.predict(data)

    # Normalize data
    scaled_data = preprocessing.scale(encoded_frames)

    num_features = encoded_frames.shape[1]
    # Concatenate features(encoded_frames) with labels (ydata)
    df1 = pd.DataFrame(data=scaled_data)
    df2 = pd.DataFrame(data=ydata)

    df2[0] = df2[0].apply(lambda x: x.replace('subj_', 'u'))
    df3 = pd.concat([df1, df2], axis=1)

    # Save data into a CSV file
    df3.to_csv('./' + FEAT_DIR + '/' + session + ".csv",
               header=False,
               index=False)