Ejemplo n.º 1
0
def Cubic_Spline_Approximation_Smoothing(y, x, smooth = None):
    xi = x # i = interpolation
    if smooth is None:
        y_csaps, smooth = csaps(x, y, xi)
    else:
        y_csaps = csaps(x, y, xi, smooth=smooth)
    return y_csaps, smooth
Ejemplo n.º 2
0
def get_smooth_coor(cont1):
    x,y = getXY(cont1)
    index = list(range(len(x)))
    index2 = np.linspace(index[0],index[-1],5000)
    xs = csaps(index,x, index2, smooth=0.01)
    ys = csaps(index,y, index2, smooth=0.01)
    return xs,ys
Ejemplo n.º 3
0
def test_shortcut_output(data):
    x, y, xi, smooth, sp_cls = data

    yi = csaps(x, y, xi, smooth=smooth)
    assert isinstance(yi, np.ndarray)

    smoothed_data = csaps(x, y, xi)
    assert isinstance(smoothed_data, SmoothingResult)

    sp = csaps(x, y)
    assert isinstance(sp, sp_cls)
Ejemplo n.º 4
0
def bench_evaluate_spline(benchmark, ndgrid_data, output_data_sites, ndim, input_size, output_size):
    shape = [input_size] * ndim
    x, y = ndgrid_data(shape=shape)
    xi = output_data_sites(x, output_size)

    spline = csaps(x, y)
    benchmark(spline, xi)
Ejemplo n.º 5
0
def data_smoothing(x, y, N, smooth, plot, plot_title):
    '''
    x: interpolated time, unified independet variable
    y: value to be smoothed
    N: cant step to discretiza the independent value
    smooth: eg: smooth=0.999, smoother factor parameter
    plot: "True" or "False", boolean value that shows plot of filtered data
    yhat: return data, smoothed/filtered value
    '''
    data = np.asarray([x, y])
    uniqueValues, indicesList = np.unique(data[:, 0], return_index=True)
    data_raw = data[indicesList, :]

    xs = np.linspace(data_raw[0, 0], data_raw[-1, 0], N)
    #yhat = csaps(xs, y, xs, smooth=0.999)
    yhat = csaps(x, y, x, smooth=0.999)
    #yhat = savgol_filter(xs, 51, 5)
    if plot:
        fig_size = (12, 4)
        fig, axs = plt.subplots(1, 1, figsize=fig_size)
        fig.canvas.set_window_title('Datos')
        fig.suptitle('Data Filtered - ' + plot_title)
        plt.plot(x, y, label='y')
        plt.plot(x, yhat, color='red', label='yhat')
        plt.legend()
        plt.grid()

        #plt.show()

    return yhat
Ejemplo n.º 6
0
def bench_evaluate_spline(benchmark, multivariate_data, output_data_sites,
                          ndim, input_size, output_size):
    x, y = multivariate_data(ndim=ndim, size=input_size)
    xi = output_data_sites(x, size=output_size)
    spline = csaps(x, y)

    benchmark(spline, xi)
Ejemplo n.º 7
0
def test_shortcut_output(data, tolist):
    x, y, xi, smooth, sp_cls = data

    if tolist and isinstance(x, np.ndarray):
        x = x.tolist()
        y = y.tolist()
        xi = xi.tolist()

    yi = csaps(x, y, xi, smooth=smooth)
    assert isinstance(yi, np.ndarray)

    smoothed_data = csaps(x, y, xi)
    assert isinstance(smoothed_data, AutoSmoothingResult)

    sp = csaps(x, y)
    assert isinstance(sp, sp_cls)
    def get_2d_spline(self, x, y, z):
        # print("BOOP")

        # try:
        # fit = np.polyfit()
        # fit = np.polynomial.polynomial.Polynomial.fit(x,y,7)
        # #print(fit.linspace(100))
        # spline = np.stack(fit.linspace(50), axis=1)
        # return spline
        # except Exception as e:
        #     print(e)
        #     raise e

        # spline = si.SmoothBivariateSpline(x,y,z)
        # xs = np.linspace(np.array(x).min(), np.array(x).max(), 150)
        # ys = spline(xs)
        # return xs, ys

        # bbox = (np.array(x).min(), np.array(x).max())
        #
        # spline = si.UnivariateSpline(x,y,bbox=bbox,k=5)
        # xs = np.linspace(np.array(x).min(), np.array(x).max(), 150)
        # ys = spline(xs)
        # return xs, ys

        sp = csaps(x, y, smooth=0.05)
        xs = np.linspace(np.array(x).min(), np.array(x).max(), 150)
        ys = sp(xs)
        # return xs, ys
        spline = np.stack((xs, ys), axis=1)
        return spline
Ejemplo n.º 9
0
def normalize_input(arr):

    smooth = csaps(range(arr.shape[0]), arr, range(arr.shape[0]), smooth=0.8)

    _max = np.max(smooth)
    _min = np.min(smooth)
    normalized = (smooth - _min) / (_max - _min)
    return torch.from_numpy(normalized)[None, None, :], _max, _min
Ejemplo n.º 10
0
def test_normalized_smooth(data, smooth, scale):
    x, y, xi, *_ = data

    x2 = ([scale * np.array(xx, dtype=np.float64)
           for xx in x] if isinstance(x, list) else scale *
          np.array(x, dtype=np.float64))
    xi2 = ([scale * np.array(xx, dtype=np.float64)
            for xx in xi] if isinstance(x, list) else scale *
           np.array(xi, dtype=np.float64))

    smoothed_data_a = csaps(x, y, xi, smooth=smooth, normalizedsmooth=True)
    smoothed_data_b = csaps(x2, y, xi2, smooth=smooth, normalizedsmooth=True)

    if isinstance(smoothed_data_a, AutoSmoothingResult):
        smoothed_data_a = smoothed_data_a.values
        smoothed_data_b = smoothed_data_b.values

    assert smoothed_data_a == pytest.approx(smoothed_data_b, rel=1e-2)
def directional_index(xarn, y, price):
    global old
    global biggest_dif
    global yar
    global yi

    yar_percent = []

    difference = y[-1] - old

    if difference != price:
        yar.append(difference)
        if difference >= 0:
            if difference > biggest_dif:
                biggest_dif = difference
                for x in yar:
                    yar_percent.append((x / biggest_dif) * 100)
            else:
                for x in yar:
                    yar_percent.append((x / biggest_dif) * 100)
        else:
            if difference > biggest_dif:
                biggest_dif = difference
            for x in yar:
                if x < 0:
                    yar_percent.append(yar_percent[-1] +
                                       ((x / biggest_dif) * 100))
                else:
                    yar_percent.append((x / biggest_dif) * 100)

    else:
        yar.append(0)
        yar_percent.append(0)

    yar_percent = [0 if math.isnan(x) else x for x in yar_percent]

    num_series = pd.Series(yar_percent)
    window = num_series.rolling(10)
    mov_av = window.mean()

    mov_list = mov_av.tolist()

    mov_list = [0 if math.isnan(x) else x for x in mov_list]

    if len(xarn) >= 2:
        xi = np.linspace(xarn[0], xarn[-1], len(xarn))
        yi = csaps(xarn, np.array(mov_list), xi, smooth=0.0001)
    else:
        yi = [0, 0]

    old = y[-1]

    return yi
def fitting(signal_, theta, plot = False):

    theta_s = np.arange(-np.pi, np.pi, np.pi/len(signal_))
    newdf= pd.DataFrame({'theta': theta, 'signal': signal_})
    newdf = newdf.sort_values(by = 'theta')
    avg = csaps(theta, signal, theta_s, smooth = .95)

    if plot == True:
        plt.scatter(np.rad2deg(newdf['theta']), newdf['signal'], s =0.01)
        plt.plot(np.rad2deg(theta_s), avg)
        plt.xlabel('time[s]')
        #plt.ylabel()
    return avg, theta_s
Ejemplo n.º 13
0
def normalize_input(arr):

    smooth = csaps(range(arr.shape[0]), arr, range(arr.shape[0]), smooth=0.8)

    _max = np.max(smooth)
    _min = np.min(smooth)

    if _max - _min != 0:
        normalized = (smooth - _min) / (_max - _min)
    else:
        normalized = (smooth - _min)

    return tf.convert_to_tensor(normalized)[None, :, None], _max, _min
def angle_theta(df, content):
    t = df.index
    Fy1 = df['Fy1'].to_numpy()
    Fy2 = df['Fy2'].to_numpy()
    Fy3 = df['Fy3'].to_numpy()
    fr = rotor_freq(df['RPM'])
    angle = angle_turbine(t, Fy1, Fy2, Fy3, fr)
    #plt.scatter(angle_theta['theta'], df[content], s= 0.01)
    newdf = pd.DataFrame({'theta':angle['theta'], 'signal': df[content]})

    angle_df_sorted = newdf.sort_values(by = 'theta')
    theta_s = np.arange(-np.pi, np.pi, np.pi/len(newdf))
    fx_s = csaps(angle_df_sorted['theta'], angle_df_sorted['signal'], theta_s, smooth = .9)
    #plt.plot(theta_s, fx_s)
    fit_df = pd.DataFrame({'theta_s': theta_s, 'Average': fx_s})
    return angle_df_sorted, fit_df
Ejemplo n.º 15
0
def morphological_smoothing(wn, rawspectrum, smoothing_penalty_at_denoising=1):
    """
    function that can be used for smoothing and/or de-noising
    JRS 2017, DOI 10.1002/jrs.5010
    smoothing_penalty_at_denoising=1 is for cubic spline (good choice for de-noising);
    for penalized spline you can set it, say, to 0.1
    """
    global min_struct_el
    rmsnoise = morphological_noise(rawspectrum)
    basic_m_smoothing = skm.opening(rawspectrum, np.ones(min_struct_el))
    basic_m_smoothing = skm.closing(basic_m_smoothing, np.ones(min_struct_el))
    knots_K1 = np.where(np.abs(basic_m_smoothing - rawspectrum) < rmsnoise)
    smoothed_spectrum = csaps(wn[knots_K1],
                              rawspectrum[knots_K1],
                              wn,
                              smooth=smoothing_penalty_at_denoising)
    return smoothed_spectrum
Ejemplo n.º 16
0
def morph_pspline_baseline(x,
                           y,
                           display=2,
                           patch=1,
                           smoothing_penalty_at_baseline=0.1):
    """
    Morphology-based cubic p-spline baseline
    Gonzales-Vidal et al. JRS 2017 DOI 10.1002/jrs.5130    
    If patch=1 add to the K2 knots the edges of the spectrum
     (otherwise you may get insane deviation from the expected baseline)
    """
    current_struct_el = find_structure_element(y)
    rmsnoise = morphological_noise(y)

    opening_modified = skm.opening(y, np.ones(current_struct_el))
    opening_modified = np.minimum(
        opening_modified, 0.5 * (skm.dilation(y, np.ones(current_struct_el)) +
                                 skm.erosion(y, np.ones(current_struct_el))))

    knots_K2 = np.where(np.abs(y - opening_modified) < rmsnoise * 1e-11)[0]
    if patch > 0:
        # correction for end-points of the spectrum and K2 knots:
        #   if the 1st and the last points are not in K2,
        #   then add them with small weights
        if knots_K2[0] != 0:
            knots_K2 = np.insert(knots_K2, 0, 0)
        if knots_K2[-1] != len(x) - 1:
            knots_K2 = np.append(knots_K2, len(x) - 1)

    baseline = csaps(x[knots_K2],
                     y[knots_K2],
                     x,
                     smooth=smoothing_penalty_at_baseline)
    subtractedspectrum = y - baseline
    if display > 1:
        plt.plot(x, subtractedspectrum, 'r', x, y, 'k', x, baseline, 'b')
        plot_annotation = 'opening + p-spline'
        plt.text(0.5,
                 0.92,
                 plot_annotation,
                 horizontalalignment='center',
                 verticalalignment='center',
                 transform=plt.gca().transAxes)
        plt.show()
    return baseline
Ejemplo n.º 17
0
def random_baseline(x, number_of_knots=5, smoothing_penalty=0.1, display=0):
    # a: define the knots
    knots = np.rint(np.linspace(0, len(x) - 1,
                                num=number_of_knots)).astype(int)
    y_for_knots = 2 * (np.random.random(number_of_knots) - 0.5)
    # b: compute spline
    random_baseline = csaps(x[knots], y_for_knots, x, smooth=smoothing_penalty)
    # c: #@Test&Debug:
    if display > 0:
        plt.plot(x, random_baseline, 'k--')
        plt.plot(x[knots],
                 random_baseline[knots],
                 'o',
                 mfc='none',
                 ms=6,
                 mec='red')
        plt.show()
    return random_baseline
Ejemplo n.º 18
0
    def fit(self, x, y):
        """Fit smoothing spline.

        Parameters
        ----------
        x: array-like
            Training data, input array.
        y: array-like
            Target values

        Returns
        -------
        self: returns an instance of self.

        """
        self.x = x
        self.y = y
        self.model = csaps(x, y, smooth=self.smooth)
        return self
Ejemplo n.º 19
0
def run(model, train_iter, kafka_consumer, kafka_producer, live_plot=None):

    opt = keras.optimizers.Adam(learning_rate=CFG.train.LR)
    crit = keras.losses.MSE

    while (True):

        with kafka_consumer.lock:
            hist, i, time, msg_uuid = kafka_consumer.get_current()

            if i < train_iter + SEQ_LEN + PRED_LEN:
                tensor_in, _max, _min = normalize_input(hist[0:i])

            else:
                tensor_in, _max, _min = normalize_input(
                    hist[i - (train_iter + SEQ_LEN + PRED_LEN):i])

            train(model, tensor_in, SEQ_LEN, PRED_LEN, opt, crit)

            in_len = tensor_in.shape[0]
            for j in range(PRED_LEN):
                model(tensor_in[:,
                                -(SEQ_LEN + PRED_LEN - j):-(PRED_LEN - j), :])

            if _max - _min != 0:
                pred = model(tensor_in[:,
                                       -SEQ_LEN:, :])[0] * (_max - _min) + _min
            else:
                pred = model(tensor_in[:, -SEQ_LEN:, :])[0] + _min

        spline_smooth = csaps(range(SEQ_LEN, SEQ_LEN + PRED_LEN),
                              pred,
                              range(SEQ_LEN, SEQ_LEN + PRED_LEN),
                              smooth=0.8)

        kafka_producer.send_predictions(spline_smooth, time, msg_uuid)

        if live_plot is not None:
            live_plot.update(hist[i - SEQ_LEN:i], spline_smooth)

        wait_for_new(kafka_consumer, i)
Ejemplo n.º 20
0
def csaps_1d(in_x, in_t, value, uncertainty, out_x):
    final_sig = []

    for time_ind in range(len(in_t)):
        excluded_inds = np.isnan(value[time_ind, :])
        y = value[time_ind, ~excluded_inds]
        x = in_x[time_ind, ~excluded_inds]
        err = uncertainty[time_ind, ~excluded_inds]

        inds = np.argsort(x)
        x = x[inds]
        y = y[inds]

        try:
            final_sig.append(csaps(x, y, out_x,
                                   smooth=0.99995))  #get_value(out_x))
        except:
            final_sig.append(np.zeros(len(out_x)))

    final_sig = np.array(final_sig)
    return final_sig
Ejemplo n.º 21
0
    def smoothen_absolute_transformations(
        self,
        smooth,
        initial_frames_count,
        initial_frames_weight,
        non_keyframes_weight,
    ):
        x = np.arange(len(self.detection_frames))
        y = np.array([
            detection_frame.transformation
            for detection_frame in self.detection_frames
        ])

        weights = np.ones(len(self.detection_frames))
        weights[[not d.is_keyframe
                 for d in self.detection_frames]] = non_keyframes_weight
        weights[:initial_frames_count] = initial_frames_weight

        yi = csaps.csaps(x, y, x, smooth=smooth, axis=0, weights=weights)
        # yi = scipy.signal.savgol_filter(x=y, window_length=49, polyorder=2, axis=0)

        for detection_frame, smoothed_matrix in zip(self.detection_frames, yi):
            detection_frame.transformation = smoothed_matrix
Ejemplo n.º 22
0
def run(model, train_iter, kafka):

    opt = optim.AdamW(model.parameters(), lr=CFG.train.LR)
    crit = nn.MSELoss()
    producer = KafkaPredictionProducer(CFG.kafka.out_topic, CFG.kafka.ip,
                                       CFG.kafka.input_interval)

    while (True):

        with kafka.lock:
            hist, i, time, msg_uuid = kafka.get_current()

            if i < train_iter + SEQ_LEN + PRED_LEN:
                tensor_in, _max, _min = normalize_input(hist[0:i])

            else:
                tensor_in, _max, _min = normalize_input(
                    hist[i - (train_iter + SEQ_LEN + PRED_LEN):i])

            train(model, tensor_in, opt, crit, SEQ_LEN, PRED_LEN)

            model.eval()
            in_len = tensor_in.shape[0]
            with torch.no_grad():

                for j in range(PRED_LEN - 1):
                    model(tensor_in[in_len - (SEQ_LEN + PRED_LEN - j):in_len])
                pred = model(tensor_in[in_len - SEQ_LEN:in_len])[0] * (
                    _max - _min) + _min

        spline_smooth = csaps(range(SEQ_LEN, SEQ_LEN + PRED_LEN),
                              pred,
                              range(SEQ_LEN, SEQ_LEN + PRED_LEN),
                              smooth=0.8)

        producer.send_predictions(spline_smooth, time, msg_uuid)
        wait_for_new(kafka, i)
def retargeting_otimization_3D(joints,
                               trans,
                               source_betas,
                               source_model_type,
                               motion_thetas,
                               all_restrictions,
                               smoothing=0.8):

    end_effector = [20, 21, 7, 8]

    config = flags.FLAGS
    pose_mean, pose_covariance = MuVs_util.load_initial_param()

    if source_model_type == 1:
        model_path = config.smpl_model_path_m
    elif source_model_type == 2:
        model_path = config.smpl_model_path_f
    else:
        model_path = config.smpl_model_path

    print("IK to inicialization")

    window = 40

    for i in range(0, joints.shape[0], window):
        print("Doing ", i)

        g_2, betas_in, pose_new, trans_new, joints_new, error, loss_3d, loss_sim, loss_prior, prior_pose, loss = buid_graph_body_3D(
            trans[i:np.minimum(i + window, joints.shape[0])],
            motion_thetas[i:np.minimum(i + window, joints.shape[0])],
            joints[i:np.minimum(i + window, joints.shape[0])], pose_mean,
            pose_covariance, model_path, all_restrictions, i,
            np.minimum(i + window, joints.shape[0] - 1))

        with tf.Session(config=config_tf, graph=g_2) as sess:
            sess.run(tf.global_variables_initializer())
            for s in range(300):
                pose_resposta = sess.run(
                    [
                        pose_new, trans_new, joints_new, error, loss_3d,
                        loss_sim, loss_prior, loss
                    ],
                    feed_dict={
                        betas_in:
                        np.reshape(
                            np.array([
                                source_betas for i in range((
                                    joints[i:np.
                                           minimum(i +
                                                   window, joints.shape[0])]
                                ).shape[0])
                            ]), (-1, 10)),
                        prior_pose:
                        1.0e-4
                    })
            #print (str(pose_resposta[4]) + " " + str(pose_resposta[5]) + " " + str(pose_resposta[6]) + " " + str(pose_resposta[7]))

            trans[i:np.minimum(i + window, joints.shape[0])] = pose_resposta[1]
            motion_thetas[i:np.minimum(i + window, joints.shape[0]
                                       )] = pose_resposta[0]
            joints[i:np.minimum(i +
                                window, joints.shape[0])] = pose_resposta[2]

        sess.close()

    time = np.arange(joints.shape[0])

    for joint in end_effector:
        w = np.ones_like(time) * 1.0
        for t in time:
            for restri in all_restrictions:
                if restri.my_type == 0:
                    if restri.joint == joint:
                        if t > restri.start and t < restri.end:
                            w[t] = 0.5
                        if t == restri.start or t == restri.end:
                            w[t] = 50.0
                        if len(restri.restriction_point) == 0 or (
                                t in restri.restriction_point):
                            w[t] = 100.0

        #pdb.set_trace()
        joints_sm = np.transpose(
            csaps(time,
                  np.transpose(joints[:, joint, :]),
                  time,
                  weights=w,
                  smooth=smoothing))

        for i, wi in enumerate(w):
            if wi < 1.0:
                joints[i, joint, :] = joints_sm[i, :]

    print("IK adjustment")

    all_restrictions_now = [
        Restriction(my_type=0, start=0, end=joints.shape[0] + 1, joint=7),
        Restriction(my_type=0, start=0, end=joints.shape[0] + 1, joint=8),
        Restriction(my_type=0, start=0, end=joints.shape[0] + 1, joint=20),
        Restriction(my_type=0, start=0, end=joints.shape[0] + 1, joint=21)
    ]

    for i in range(0, joints.shape[0], window):
        print("Doing ", i)

        g_2, betas_in, pose_new, trans_new, joints_new, error, loss_3d, loss_sim, loss_prior, prior_pose, loss = buid_graph_body_3D(
            trans[i:np.minimum(i + window, joints.shape[0])],
            motion_thetas[i:np.minimum(i + window, joints.shape[0])],
            joints[i:np.minimum(i + window, joints.shape[0])], pose_mean,
            pose_covariance, model_path, all_restrictions_now, i,
            np.minimum(i + window, joints.shape[0] - 1))

        with tf.Session(config=config_tf, graph=g_2) as sess:
            sess.run(tf.global_variables_initializer())
            for s in range(300):
                pose_resposta = sess.run(
                    [
                        pose_new, trans_new, joints_new, error, loss_3d,
                        loss_sim, loss_prior, loss
                    ],
                    feed_dict={
                        betas_in:
                        np.reshape(
                            np.array([
                                source_betas for i in range((
                                    joints[i:np.
                                           minimum(i +
                                                   window, joints.shape[0])]
                                ).shape[0])
                            ]), (-1, 10)),
                        prior_pose:
                        1.0e-4
                    })
            #print (str(pose_resposta[4]) + " " + str(pose_resposta[5]) + " " + str(pose_resposta[6]) + " " + str(pose_resposta[7]))

            trans[i:np.minimum(i + window, joints.shape[0])] = pose_resposta[1]
            motion_thetas[i:np.minimum(i + window, joints.shape[0]
                                       )] = pose_resposta[0]
            joints[i:np.minimum(i +
                                window, joints.shape[0])] = pose_resposta[2]

        sess.close()

    return trans, motion_thetas
Ejemplo n.º 24
0
def test_shortcut_ndgrid_smooth_output(surface, smooth, cls):
    x, y = surface

    output = csaps(x, y, x, smooth=smooth)
    assert isinstance(output, cls)
Ejemplo n.º 25
0
        c[1::2] = blinks["blink_offset"]
        print(c)

        # remove blinks
        x_clean = x
        y_clean = y
        delete_range = np.array([])
        for i in range(int(c.shape[0] / 2)):
            delete_range = np.append(delete_range, np.linspace(c[2 * i]-1, c[2 * i + 1]-1, int(c[2 * i + 1] - c[2 * i] + 1)))
        delete_range = delete_range.astype(int)

        x_clean = np.delete(x_clean, delete_range)
        y_clean = np.delete(y_clean, delete_range)

        # use cubic spline
        spline = csaps(x_clean, y_clean)
        xi1 = np.linspace(x[0], x[-1], n)
        yi1 = spline(xi1)
        mean = np.mean(yi1)
        std = np.std(yi1)

        pupilDiameters.append(mean)
        pupilDiameters.append(std)
        print(pupilDiameters)
        writer.writerow(pupilDiameters)

        f, (ax1, ax2) = plt.subplots(2, 1, figsize=(20, 7))
        ax1.plot(x, y, 'o')
        for i in range(int(c.shape[0] / 2)):
            ax1.axvspan(c[2 * i], c[2 * i + 1], color='red', alpha=0.5)
Ejemplo n.º 26
0
Omega = cal_Omega(xi)
for lambd in lambdas:
    # S = [email protected](N.T@N + lambd*Omega)@N.T
    yh, S = smooth_spline(Xd, Yd, lambd)
    se.append(np.sqrt((S @ S.T).diagonal()))
    Yh.append(yh)
    dof.append(np.trace(S))
    print('Degree of Freedom trace(S) = {}'.format(np.trace(S)))

# csaps implementation
from csaps import csaps

smoothes = 1 / (1 + lambdas)
Yh_csaps = []
for smooth in smoothes:
    Yh_csaps.append(csaps(Xd, Yd, xi, smooth=smooth))

# plot
nrow = len(lambdas)
fig, axes = plt.subplots(nrow, 1, figsize=(5, 5 * nrow))
for i, ax in enumerate(axes):
    ax.plot(X, Y, color='C0', lw=2, label='true')
    ax.plot(Xd,
            Yd,
            linestyle='none',
            marker='o',
            markersize=6,
            markerfacecolor='w',
            markeredgecolor='k')
    ax.plot(Xd, Yh[i], color='C1', lw=2, label='my')
    ax.plot(Xd,
Ejemplo n.º 27
0
    pdf = PdfPages(opts.fig_fnam)

nb = 17  # (trans_dN,bsc_minN,fp_offsN,post_sN,fpi_N)x3,fpi_s,fpi_e
output_data = np.full((nb, nobject), np.nan)
if not opts.debug:
    warnings.simplefilter('ignore')
for ii in range(nobject):
    if ii % 1000 == 0:
        sys.stderr.write('{}/{}\n'.format(ii, nobject))
    object_id = object_ids[ii]
    if object_id != ii + 1:
        raise ValueError('Error, object_id={}, ii={}'.format(object_id, ii))
    if inds[ii].size < 1:
        continue
    yi = vh_data[:, ii]  # VH
    yy = csaps(vh_ntim, yi, xx, smooth=opts.smooth)

    # Calculate fishpond index
    yy_max = yy.max()
    yy_min = yy.min()
    yy_thr = max(yy_min + 0.3 * (yy_max - yy_min), -21.0)
    fp_thr = 30.0
    cc = (yy < yy_thr)
    fp_inds = []
    i1 = None
    i2 = None
    flag = False
    for ic in range(cc.size):
        if not flag and cc[ic]:
            i1 = ic
            i2 = None
Ejemplo n.º 28
0
from csaps import csaps
from scipy.signal import savgol_filter

#data = np.loadtxt('Resu_ref/Wernert_AIAA2010_7460/beta_BFP.csv', delimiter=',', skiprows=0)
data = np.loadtxt(
    './Resu_ref/Wernert_AIAA2010_7460/Caso_F01_unificated/Forces_proc.txt',
    delimiter=',',
    skiprows=1)
#data = np.loadtxt('./Resu_ref/Wernert_AIAA2010_7460/Caso_F01_unificated/Moments_proc.txt', delimiter=',', skiprows=1)

uniqueValues, indicesList = np.unique(data[:, 0], return_index=True)
data_raw = data[indicesList, :]
i = 15

xs = np.linspace(data_raw[0, 0], data_raw[-1, 0], 5000)
ys = csaps(data_raw[:, 0], data_raw[:, i], data_raw[:, 0], smooth=0.999)
smoothing_result = csaps(data_raw[:, 0], data_raw[:, i], xs)

yhat = savgol_filter(data_raw[:, i], 51, 5)

fig_size = (12, 4)
fig, axs = plt.subplots(1, 1, figsize=fig_size)
fig.canvas.set_window_title('Datos')
fig.suptitle('Datos filtrados')
plt.plot(data_raw[:, 0], data_raw[:, i])
plt.plot(data_raw[:, 0], yhat, color='green')
plt.plot(data_raw[:, 0], ys, color='red')

plt.grid()

plt.show()
Ejemplo n.º 29
0
def motion_smooth_spline(Jtr_motion, smoothing):
    from csaps import csaps

    # Set outliers bounds to get outlier joints that are far from median standard dev
    std_bounds = 2
    # let's accept a number max of 30% of outliers -- i.e. max of 8 joints
    rate_outliers = 0.3
    Njoints = 25
    k_mad = 1.48  # convertion constant of robust mad to a Gaussian std without outliers
    Nframes = len(Jtr_motion)

    # smooth joint 3D trajectories
    xjoints = [None] * Njoints
    yjoints = [None] * Njoints

    xjoints_sm = [None] * Njoints
    yjoints_sm = [None] * Njoints

    time = np.arange(Nframes)

    error_pred = [None] * Njoints
    # first run per joint before outliers removal
    for ii in range(Njoints):
        xjoints[ii] = np.hstack(
            [Jtr_motion[jj][ii, 0] for jj in range(Nframes)])
        yjoints[ii] = np.hstack(
            [Jtr_motion[jj][ii, 1] for jj in range(Nframes)])

        poses = [xjoints[ii], yjoints[ii]]
        poses_sm = csaps(time, poses, time, smooth=smoothing)
        # make use the norm here
        error = np.sum(np.absolute(poses_sm - poses), axis=0)
        error_pred[ii] = np.where(np.isnan(error), np.Inf, error)

    # voting scheme using robust median and MAD per joint
    outliers = [None] * Njoints
    outliers_cumul = np.zeros(len(time))
    for ii in range(Njoints):
        mediane = np.median(error_pred[ii])
        # std = k*mad -- k = 1.148
        made = mad(error_pred[ii])
        # test if values are afar of 2*std using robust mad
        outliers[ii] = (np.absolute(error_pred[ii] - mediane) >
                        std_bounds * k_mad * made)
        #pdb.set_trace()
        outliers_cumul += outliers[ii].astype(int)

    xjoints = [None] * Njoints
    yjoints = [None] * Njoints

    ## let's accept a number max of 30% of outliers -- i.e. max of 8 joints
    max_outliers = Njoints * rate_outliers
    inlier_poses = (outliers_cumul < max_outliers)
    frame_inliers = time[inlier_poses]
    # spline with inliers per joint after outliers removal
    for ii in range(Njoints):
        xjoints[ii] = np.hstack(
            [Jtr_motion[jj][ii, 0] for jj in frame_inliers])
        yjoints[ii] = np.hstack(
            [Jtr_motion[jj][ii, 1] for jj in frame_inliers])
        poses = [xjoints[ii], yjoints[ii]]
        poses_sm = csaps(frame_inliers, poses, time, smooth=smoothing)
        xjoints_sm[ii] = poses_sm[0, :]
        yjoints_sm[ii] = poses_sm[1, :]

    return [xjoints_sm, yjoints_sm, inlier_poses]