コード例 #1
0
def compute_triaxial_norm(df, remove_triaxial_vectors=True):
    columns = df.columns
    sensors = [
        x[:-2] for x in columns
        if ('_x' in x) and ((x[:-2] + '_y') in columns) and ((x[:-2] +
                                                              '_z') in columns)
    ]
    for sens in sensors:
        cols = df.loc[:, [
            "{}_x".format(sens), "{}_y".format(sens), "{}_z".format(sens)
        ]].values
        norms = np.linalg.norm(cols, axis=1)
        if remove_triaxial_vectors:
            df.drop(labels=mul_str_arr([sens], ["x", "y", "z"]),
                    axis=1,
                    inplace=True)
        df.insert(loc=len(df.columns),
                  column="{}_magnitude".format(sens),
                  value=norms)
    return df
コード例 #2
0
#     "label",
# ]

samples_table = "__".join([DATASET_NAME, "samples"])
samples_schema = OrderedDict([
    ("sample_id", "INTEGER PRIMARY KEY"),
    ("timestamp", "DECIMAL"),
    ("subject_id", "INTEGER"),
    ("activity_id", "INTEGER"),
])
samples_n_columns = len(samples_schema)

sensor_readings_table = "__".join([DATASET_NAME, "sensor_readings"])
sensor_readings_schema = OrderedDict([
    ("sample_id", "INTEGER PRIMARY KEY"),
] + [(k, "FLOAT") for k in mul_str_arr(["chest_acc"], ["x", "y", "z"]) +
     ["ecg_1", "ecg_2"] + mul_str_arr(["left_ankle", "right_lower_arm"],
                                      ["acc", "gyro", "magn"], ["x", "y", "z"])
     ] + [("FOREIGN KEY",
           "(sample_id) REFERENCES {}(sample_id)".format(samples_table))])
sensor_readings_n_columns = len(sensor_readings_schema) - 1


def _load_dataset(path, loader):
    return OrderedDict(
        (int(filepath[15:-4]), loader(os.path.join(path, DATA_PATH, filepath)))
        for filepath in os.listdir(os.path.join(path, DATA_PATH))
        if filepath.endswith('.log'))
    # insert timestamp and subject id
    # tbls_new = []
    # for subject in range(len(tbls)):
コード例 #3
0
samples_table = "__".join([DATASET_NAME, "samples"])
samples_schema = OrderedDict([
    ("sample_id", "INTEGER PRIMARY KEY"),
    ("timestamp", "DECIMAL"),
    ("subject_id", "INTEGER"),
    ("activity_id", "INTEGER"),
])
samples_n_columns = len(samples_schema)

sensor_readings_table = "__".join([DATASET_NAME, "sensor_readings"])
sensor_readings_schema = OrderedDict([
    ("sample_id", "INTEGER PRIMARY KEY"),
    ("heart_rate", "FLOAT"),
] + [
    (k, "FLOAT")
    for k in mul_str_arr(["hand", "chest", "ankle"], ["temperature"] +
                         mul_str_arr(["acc", "gyro", "magn"], ["x", "y", "z"]))
] + [("FOREIGN KEY",
      "(sample_id) REFERENCES {}(sample_id)".format(samples_table))])
sensor_readings_n_columns = len(sensor_readings_schema) - 1

# column_headings = [
#     "timestamp",
#     "activity_id",
#     "heart_rate",
# ]


def _load_dataset(path, loader_func):
    _data_path = os.path.join(path, DATA_PATH)
    tbls = []
    subdirectories = ["Protocol", "Optional"]
コード例 #4
0
#     "label",
# ]

samples_table = "__".join([DATASET_NAME, "samples"])
samples_schema = OrderedDict([
    ("sample_id", "INTEGER PRIMARY KEY"),
    ("timestamp", "DECIMAL"),
    ("subject_id", "INTEGER"),
    ("activity_id", "INTEGER"),
])
samples_n_columns = len(samples_schema)

sensor_readings_table = "__".join([DATASET_NAME, "sensor_readings"])
sensor_readings_schema = OrderedDict([
    ("sample_id", "INTEGER PRIMARY KEY"),
] + [(k, "FLOAT") for k in mul_str_arr(["chest_acc"], ["x", "y", "z"])] + [(
    "FOREIGN KEY",
    "(sample_id) REFERENCES {}(sample_id)".format(samples_table))])
sensor_readings_n_columns = len(sensor_readings_schema) - 1


def _load_dataset(path, loader):
    return OrderedDict(
        (int(filepath[:-4]), loader(os.path.join(path, DATA_PATH, filepath)))
        for filepath in os.listdir(os.path.join(path, DATA_PATH))
        if filepath.endswith('.csv'))


def load_dataset_to_mem(path):
    return _load_dataset(path, np.genfromtxt)
コード例 #5
0
# acc unit: m/s^2
# gyro unit: deg/s

distinct_subject_query = """
SELECT DISTINCT subject_id FROM {};
""".format(samples_table)

distinct_activity_query = """
SELECT DISTINCT activity_id FROM {};
""".format(samples_table)

raw_table_valid_data_query = ("""
SELECT
    activity_id, subject_id,
""" + ", ".join(
    mul_str_arr(["chest_acc"], ["x", "y", "z"]) + ["ecg_1", "ecg_2"] +
    mul_str_arr(["left_ankle", "right_lower_arm"], ["acc", "gyro", "magn"],
                ["x", "y", "z"])) + """
FROM
    {0}, {1}
WHERE {0}.sample_id = {1}.sample_id AND activity_id != 0;
""").format(samples_table, sensor_readings_table)

raw_table_valid_data_query_with_subject_id = ("""
SELECT
    activity_id, timestamp, subject_id,
""" + ", ".join(
    mul_str_arr(["chest_acc"], ["x", "y", "z"]) + ["ecg_1", "ecg_2"] +
    mul_str_arr(["left_ankle", "right_lower_arm"], ["acc", "gyro", "magn"],
                ["x", "y", "z"])) + """
FROM
コード例 #6
0
sampling_interval = Decimal('1.0') / sampling_freq

samples_table = "__".join([DATASET_NAME, "samples"])
samples_schema = OrderedDict([
    ("sample_id", "INTEGER PRIMARY KEY"),
    ("timestamp", "DECIMAL"),
    ("subject_id", "INTEGER"),
    ("activity_id", "INTEGER"),
])
samples_n_columns = len(samples_schema)

sensor_readings_table = "__".join([DATASET_NAME, "sensor_readings"])
sensor_readings_schema = OrderedDict([
    ("sample_id", "INTEGER PRIMARY KEY"),
] + [(k, "FLOAT")
     for k in mul_str_arr(["body_acc", "body_gyro"], ["x", "y", "z"])] + [(
         "FOREIGN KEY",
         "(sample_id) REFERENCES {}(sample_id)".format(samples_table))])
sensor_readings_n_columns = len(sensor_readings_schema) - 1


def _load_dataset(path, loaders):
    return [
        loaders(*[
            os.path.join(path, DATA_PATH, subdirectory, filepath)
            for filepath in [
                "subject_{}.txt".format(subdirectory), "y_{}.txt".format(
                    subdirectory)
            ] + [
                "Inertial Signals/{}_{}_{}.txt".format(s, a, subdirectory)
                for s in ["total_acc", "body_gyro"] for a in ["x", "y", "z"]