Beispiel #1
0
def get_activity_model(is_gravity):
    if is_gravity:
        model_file_contents = get_resource_contents(ACTIVITY_MODEL_FILENAME)
    else:
        model_file_contents = get_resource_contents(
            ACTIVITY_ACCEL_ONLY_MODEL_FILENAME)

    clf = pickle.loads(model_file_contents)
    return clf
Beispiel #2
0
def get_posture_model(is_gravity) -> RandomForestClassifier:
    if is_gravity:
        model_file_contents = get_resource_contents(POSTURE_MODEL_FILENAME)
    else:
        model_file_contents = get_resource_contents(
            POSTURE_ACCEL_ONLY_MODEL_FILENAME)

    clf = pickle.loads(model_file_contents)
    return clf
def get_activity_model(is_gravity: bool):
    """

    :rtype: object
    :param bool is_gravity:
    :return:
    """
    if is_gravity:
        model_file_contents = get_resource_contents(ACTIVITY_MODEL_FILENAME)
    else:
        model_file_contents = get_resource_contents(
            ACTIVITY_ACCEL_ONLY_MODEL_FILENAME)

    clf = pickle.loads(model_file_contents)
    return clf
def get_posture_model(is_gravity: bool) -> RandomForestClassifier:
    """

    :rtype: object
    :param bool is_gravity:
    :return:
    """
    if is_gravity:
        model_file_contents = get_resource_contents(POSTURE_MODEL_FILENAME)
    else:
        model_file_contents = get_resource_contents(
            POSTURE_ACCEL_ONLY_MODEL_FILENAME)

    clf = pickle.loads(model_file_contents)
    return clf
Beispiel #5
0
def get_model() -> RandomForestClassifier:
    """

    :rtype: object
    :param bool is_gravity:
    :return:
    """
    model_file_contents = get_resource_contents(MODEL_FILENAME)
    clf = pickle.loads(model_file_contents)
    return clf
Beispiel #6
0
    def predictLabel(self, timestampEntry, timestampExit):
        """
        Predict label from model

        :param int timestampEntry: entry time into an epoch
        :param int timestampExit: exit time into an epoch
        :return: predicted label ('home', 'work', or 'other')
        """
        modelFilePath = self.MODEL_FILE_PATH
        model = pickle.loads(get_resource_contents(modelFilePath))
        featuresM = self.getFeatures(timestampEntry, timestampExit)
        featuresM = np.vstack((featuresM, featuresM))
        result = model.predict(featuresM)
        result = result[0]
        return result
def typing_episodes(dataset: pd.DataFrame, offset: int) -> List[DataPoint]:
    """
    This function detects typing episodes and typing speed.

    Makes a prediction every 200ms using values from a window of 1000ms.
    This means there will be a overlap of 800ms between each sample window.

    :param pd.DataFrame dataset: the synced dataframe of left and right accl and gyro data
    :param int offset: offset for local time
    :return: DataPoints of typing episodes
    :rtype:List(DataPoint)
    """

    dataset = dataset.values

    # 12 columns of x,y,z values for accl and gyro data
    dataset_cp = np.copy(dataset[:, 1:13])

    n_samples, d = dataset_cp.shape

    # Data Reshaping
    # the following lines convert the data stream into a sliding window
    # with window size 800ms and stride 200 ms

    data_slide = np.zeros((int(
        (n_samples - WINDOW_SIZE) / STRIDE) + 1, WINDOW_SIZE, d))

    #stores staring time for each window
    time_t = np.zeros((int((n_samples - WINDOW_SIZE) / STRIDE) + 1, 1))
    k = 0
    for i in range(0, n_samples - WINDOW_SIZE, STRIDE):
        data_slide[k, :, :] = dataset_cp[i:i + WINDOW_SIZE, :]
        time_t[k] = dataset[i, 0]
        k = k + 1

    speed_slide = np.zeros((int(
        (n_samples - WINDOW_SIZE) / STRIDE_S) + 1, WINDOW_SIZE, d))
    time_s = np.zeros((int((n_samples - WINDOW_SIZE) / STRIDE_S) + 1, 1))
    k = 0
    for i in range(0, n_samples - WINDOW_SIZE, STRIDE_S):
        speed_slide[k, :, :] = dataset_cp[i:i + WINDOW_SIZE, :]
        time_s[k] = dataset[i, 0]
        k = k + 1

    z = 0
    X_test0 = data_slide[z:]

    # Load Trained Model
    # model = load_model(TYPING_MODEL_FILENAME)

    tmpfile = tempfile.NamedTemporaryFile(delete=True)
    tmpfile.write(get_resource_contents(TYPING_MODEL_FILENAME))
    model = load_model(os.path.realpath(tmpfile.name))
    tmpfile.close()

    #network_type = 'ConvLSTM'
    network_type = 'CNN'
    _, win_len, dim = X_test0.shape

    # data has to be reshaped before being fed into the model
    X_test = _data_reshaping(X_test0, network_type)

    # y_pred = 1 indicates typing
    # y_pred = 0 indicates no_typing
    y_pred = np.argmax(model.predict(X_test), axis=1)

    # Smoothing - to reduce noisy predictions
    indices_type = np.where(y_pred == 1)[0]
    time_type = time_t[
        indices_type]  #contains timestamps of when user is typing
    data = []

    typing_time = timedelta(0)
    # smooth_labels_3: final output prediction
    # start_time: start time of the typing seesion
    # end_time of the typing session

    if (len(indices_type) > 0):
        pred_l = len(y_pred)
        ind_l = len(indices_type)
        smooth_labels_3 = np.zeros((pred_l, 1))
        s = 0
        start_time = []
        end_time = []

        for i in range(0, ind_l - 1):
            if (s == 0):
                start_time.append(time_type[i])
                s = 1

            if ((time_type[i + 1] - time_type[i]) <
                    10000):  #10000 = 10 seconds
                smooth_labels_3[indices_type[i]:indices_type[i + 1]] = 1
            else:
                end_time.append(time_type[i] + 200)  #200 = 200 miliseconds
                s = 0
        end_time.append(time_type[-1] + 200)  #200 = 200 miliseconds
        z = 0
        X_test0 = speed_slide[z:]

        tmpfile = tempfile.NamedTemporaryFile(delete=True)
        tmpfile.write(get_resource_contents(TYPINGSPEED_MODEL_FILENAME))
        model = load_model(os.path.realpath(tmpfile.name))
        tmpfile.close()

        network_type = 'CNN'

        _, win_len, dim = X_test0.shape

        X_test = _data_reshaping(X_test0, network_type)

        y_pred = np.argmax(model.predict(X_test), axis=1)

        #Filtering based on typing labels
        smooth_typing = np.zeros(np.shape(time_s))
        y_pred_filtered = np.copy(y_pred)
        k = 0
        n_typing = np.shape(smooth_labels_3)[0]
        for i in range(0, n_typing, 5):
            if (i + 5 < n_typing):
                smooth_typing[k] = np.amax(smooth_labels_3[i:i + 5])
            else:
                smooth_typing[k] = np.amax(smooth_labels_3[i:])
            if (smooth_typing[k] == 0):
                y_pred_filtered[k] = 0
            k = k + 1
        '''
        output : y_pred_filtered
        start_time: time_s
        end_time: time_s+1 sec
        0 - Slow (< 3 keys)
        1 - medium (>= 3 & < 5 keys)
        2 - fast (>= 5 keys)
        '''
        if len(time_s) > 0:
            for i in range(len(time_s)):
                st = datetime.fromtimestamp(int(float(time_s[i])))
                et = st + timedelta(seconds=.999999)
                data.append(
                    DataPoint(start_time=st,
                              end_time=et,
                              offset=offset,
                              sample=y_pred_filtered[i]))

#                 print(st,et,y_pred_filtered[i])

        return data
Beispiel #8
0
def get_posture_model() -> RandomForestClassifier:
    clf = pickle.loads(get_resource_contents(PUFFMARKER_MODEL_FILENAME))
    return clf
Beispiel #9
0
def typing_episodes(dataset: pd.DataFrame, offset: int) -> List[DataPoint]:
    """
    This function detects typing episodes.

    Makes a prediction every 200ms using values from a window of 1000ms.
    This means there will be a overlap of 800ms between each sample window.

    :param pd.DataFrame dataset: the synced dataframe of left and right accl and gyro data
    :param int offset: offset for local time
    :return: DataPoints of typing episodes
    :rtype:List(DataPoint)
    """

    dataset = dataset.values

    # 12 columns of x,y,z values for accl and gyro data
    dataset_cp = np.copy(dataset[:, 1:13])

    n_samples, d = dataset_cp.shape

    # Data Reshaping
    # the following lines convert the data stream into a sliding window
    # with window size 1000ms and stride 200 ms

    data_slide = np.zeros((int(
        (n_samples - WINDOW_SIZE) / STRIDE) + 1, WINDOW_SIZE, d))

    # stores staring time for each window
    time_t = np.zeros((int((n_samples - WINDOW_SIZE) / STRIDE) + 1, 1))
    k = 0
    for i in range(0, n_samples - WINDOW_SIZE, STRIDE):  # 400ms
        data_slide[k, :, :] = dataset_cp[i:i + WINDOW_SIZE, :]
        time_t[k] = dataset[i, 0]
        k = k + 1

    z = 0
    X_test0 = data_slide[z:]

    # Load Trained Model

    tmpfile = tempfile.NamedTemporaryFile(delete=True)
    tmpfile.write(get_resource_contents(TYPING_MODEL_FILENAME))
    model = load_model(os.path.realpath(tmpfile.name))
    tmpfile.close()

    # network_type = 'ConvLSTM'
    network_type = 'CNN'
    _, win_len, dim = X_test0.shape

    # data has to be reshaped before being fed into the model
    X_test = _data_reshaping(X_test0, network_type)

    # y_pred = 1 indicates typing
    # y_pred = 0 indicates no_typing
    y_pred = np.argmax(model.predict(X_test), axis=1)

    # Smoothing - to reduce noisy predictions
    indices_type = np.where(y_pred == 1)[0]
    time_type = time_t[
        indices_type]  # contains timestamps of when user is typing
    data = []
    typing_time = timedelta(0)

    # smooth_labels_3: final output prediction
    # start_time: start time of the typing seesion
    # end_time of the typing session

    if len(indices_type) > 0:
        pred_l = len(y_pred)
        ind_l = len(indices_type)
        smooth_labels_3 = np.zeros((pred_l, 1))
        s = 0
        start_time = []
        end_time = []

        for i in range(0, ind_l - 1):
            if s == 0:
                start_time.append(time_type[i])
                s = 1

            if (time_type[i + 1] - time_type[i]) < 10000:  # 10000 = 10 seconds
                smooth_labels_3[indices_type[i]:indices_type[i + 1]] = 1
            else:
                end_time.append(time_type[i] + 200)  # 200 = 200 milliseconds
                s = 0
        end_time.append(time_type[-1] + 200)  # 200 = 200 milliseconds

        for i in range(0, len(start_time)):
            st = datetime.fromtimestamp(int(float(start_time[i])))
            et = datetime.fromtimestamp(int(float(end_time[i])))
            if st.day != et.day:
                et = datetime(st.year, st.month, st.day) + timedelta(
                    hours=23, minutes=59, seconds=59)
            typing_time = et - st

            # data.append(DataPoint(start_time=st, end_time=et, offset=offset,sample=1))
            # data.append(DataPoint(st,et,offset,[1,float(format(typing_time.seconds/60,'.3f'))]))
            data.append(
                DataPoint(
                    start_time=st,
                    end_time=et,
                    offset=offset,
                    sample=[1,
                            float(format(typing_time.seconds / 60, '.3f'))]))

    return data