Example #1
0
 def get_blank_distribution(self):
     nd = 3
     dist_list = []
     for _ in range(nd):
         dist_list.append(pg.IndependentComponentsDistribution(
             [pg.NormalDistribution(0, 1) for _ in range(len(self.feature_list))]))
     return pg.GeneralMixtureModel(dist_list, weights=[1 / nd] * nd)
Example #2
0
 def get_dist(data_vec):
     dist_list = []
     for vec in data_vec:
         vec = vec[~np.isnan(vec)]
         if len(vec):
             dist_list.append(pg.NormalDistribution(np.nanmean(vec), max(np.std(vec), 1E-6)))
         else:
             dist_list.append(pg.NormalDistribution(0, 999999))
     return pg.IndependentComponentsDistribution(dist_list)
Example #3
0
def create_dist_for_states(n_states, feature_list, seed):
    distributions = []
    i = 0
    for _ in range(n_states):
        dist_list = []
        for f in feature_list:
            dist_ = create_independent_dist(f, i)
            i += 1
            dist_list.append(dist_)
        distributions.append(pom.IndependentComponentsDistribution(dist_list))
    return distributions
Example #4
0
def build_model(n_bins, n_cmps, n_features, means, stds, state_names=None):
    # Initial values for all Gaussian components
    dist_init = np.random.random((n_bins, n_cmps, n_features, 2))
    dist_init[..., 0] -= 0.5  # Center means to 0.0
    for feat_i in range(n_features):
        # Random init mean in range [-2std, 2std)
        dist_init[..., feat_i, 0] *= 4 * stds[feat_i]
        dist_init[..., feat_i, 0] += means[feat_i]
        # Random init std in range [0, std)
        dist_init[..., feat_i, 1] *= stds[feat_i]

    if n_cmps > 1:
        dists = tuple(
            pgn.GeneralMixtureModel(
                list(
                    pgn.IndependentComponentsDistribution(
                        tuple(
                            pgn.NormalDistribution(*dist_init[bin_i, cmp_i,
                                                              feat_i, :])
                            for feat_i in range(n_features)))
                    for cmp_i in range(n_cmps))) for bin_i in range(n_bins))
    else:
        dists = tuple(
            pgn.IndependentComponentsDistribution(
                tuple(
                    pgn.NormalDistribution(*dist_init[bin_i, 0, feat_i, :])
                    for feat_i in range(n_features)))
            for bin_i in range(n_bins))
    trans_mat = np.random.random((n_bins, n_bins))
    starts = np.ones(n_bins)

    model = pgn.HiddenMarkovModel.from_matrix(trans_mat,
                                              dists,
                                              starts,
                                              state_names=state_names)
    return model
Example #5
0
    def normal_dist(self, feature_names):
        possible_states = np.unique(
            ['NonREM1', "NonREM2", "NonREM3", "REM", "Wake"]).tolist()
        state_names = []
        set_of_state_sets = []
        for state in range(0, len(possible_states)):
            if not self.train[self.train.hypnogram_User ==
                              possible_states[state]].empty:
                set_of_state_sets.append(self.train[self.train.hypnogram_User
                                                    == possible_states[state]])
                state_names.append(possible_states[state])
        binary_features = [
            "Gain", "Bradycardia", "LegMovement", "CentralApnea", "Arousal",
            "Hypopnea", "RelativeDesaturation", "Snore", "ObstructiveApnea",
            "MixedApnea", "LongRR", "Tachycardia"
        ]
        state_multidistributions = []
        for set in range(0, len(set_of_state_sets)):
            state_dist = []
            for i in range(0, self.n_features):
                if feature_names[i] in binary_features:
                    state_dist.append(
                        pg.BernoulliDistribution.from_samples(
                            set_of_state_sets[set][
                                self.data_columns[i]].tolist()))
                else:
                    state_dist.append(
                        pg.NormalDistribution.from_samples(
                            set_of_state_sets[set][
                                self.data_columns[i]].tolist()))
            state_multidistributions.append(state_dist)
        dist = [
            pg.IndependentComponentsDistribution(x)
            for x in state_multidistributions
        ]

        return dist, state_names