Beispiel #1
0
def generate_mean_grasp(datasets):

    smooth_wind_default = 234
    means = {}
    for perception in datasets:
        data_list = datasets[perception]

        # get the minimum model length
        lens = [len(m) for m in data_list]
        max_len = np.max(lens)

        ret_means = []
        # find the number of coordinates from the first element
        num_coords = len(data_list[0][0][1])
        for coord in range(num_coords):
            mean_models, variance_models = [], []
            for stream in data_list:
                # extract only the data stream for a single coordinate (all x values)
                stream_coord = np.array(zip(*zip(*stream)[1])[coord])
                cur_mean_model = signal_smooth(stream_coord, smooth_wind)
                mean_models += [cur_mean_model]

            # find the average case over the several runs
            avg_means_model = np.array([0.] * max_len)
            for i in range(max_len):
                n = 0
                for j in range(len(mean_models)):
                    if i < len(mean_models[j]):
                        avg_means_model[i] += mean_models[j][i]
                        n += 1
                avg_means_model[i] /= n

            ret_means += [avg_means_model]

        means[perception] = np.array(zip(*ret_means))

    return means
Beispiel #2
0
def split_signals(datasets):

    for perception in datasets:
        data_list = datasets[perception]

        # get the minimum model length
        lens = [len(m) for m in data_list]
        max_len = np.max(lens)

        # dynamic finding of parameters
        if smooth_wind_dict is None or smooth_wind_dict[perception] is None:
            smooth_wind = smooth_wind_default
        else:
            smooth_wind = smooth_wind_dict[perception]

        ret_means, ret_vars, ret_mean_models, ret_noise_vars = [], [], [], []
        ret_times, noise_vars = [], []
        # find the number of coordinates from the first element
        num_coords = len(data_list[0][0][1])
        for coord in range(num_coords):
            mean_models, variance_models = [], []
            times = None
            for stream in data_list:
                # extract only the data stream for a single coordinate (all x values)
                stream_coord = np.array(zip(*zip(*stream)[1])[coord])
                cur_mean_model = signal_smooth(stream_coord, smooth_wind)
                mean_models += [cur_mean_model]
            
                # sum up the squared difference over the whole model
                noise_vars += [ ( sum([(x - y) ** 2 for x,y in zip(cur_mean_model,stream_coord)]) /
                                                 len(cur_mean_model) ) ]

            # find the average case over the several runs
            avg_means_model = np.array([0.] * max_len)
            for i in range(max_len):
                n = 0
                for j in range(len(mean_models)):
                    if i < len(mean_models[j]):
                        avg_means_model[i] += mean_models[j][i]
                        n += 1
                avg_means_model[i] /= n

            if var_wind_dict is None or var_wind_dict[perception] is None:
                var_wind = var_wind_default
            else:
                var_wind = var_wind_dict[perception]
            # find the variance of the signal but use var_wind points around the centers
            # to increase the sample size
            vars_model = signal_list_variance(mean_models, avg_means_model, var_wind)
            vars_model = signal_smooth(vars_model, var_smooth_wind)
            vars_model = signal_smooth(vars_model, var_smooth_wind + 23)

            ret_times += [times]
            ret_means += [avg_means_model]
            ret_vars += [vars_model]
            ret_mean_models += [mean_models]
            ret_noise_vars += [np.average(noise_vars)]

        # TODO deal with timestamp data in some way?
        # self.models[perception]["time"] = ret_times
        self.models[perception]["mean"] = np.array(zip(*ret_means))
        self.models[perception]["variance"] = np.array(zip(*ret_vars))
        a = ret_mean_models
        b = []
        for stream in range(len(a[0])):
            t1 = []
            for val in range(len(a[0][0])):
                    t2 = []
                    for coord in range(len(a)):
                            if val < len(a[coord][stream]):
                                t2 += [a[coord][stream][val]]
                    t1 += [np.array(t2)]
            b += [t1]

        self.models[perception]["smoothed_signals"] = b
        self.models[perception]["noise_variance"] = np.array(ret_noise_vars)

    return self.models