def main():
    args = et.utils.ExampleArgumentParser().parse_args()
    et.utils.config_logging(args)

    if args.socket_addr:
        client = et.SocketClient(args.socket_addr)
    elif args.spi:
        client = et.SPIClient()
    else:
        port = args.serial_port or et.utils.autodetect_serial_port()
        client = et.UARTClient(port)

    client.squeeze = False

    range_start = 0.2
    range_end = 0.6
    sensor_config = et.EnvelopeServiceConfig()
    sensor_config.sensor = args.sensors
    sensor_config.range_interval = [range_start, range_end]
    sensor_config.profile = sensor_config.Profile.PROFILE_2
    sensor_config.hw_accelerated_average_samples = 20
    sensor_config.downsampling_factor = 2

    session_info = client.setup_session(sensor_config)
    # pg_updater = PGUpdater(sensor_config, None, session_info)
    # pg_process = et.PGProcess(pg_updater)
    # pg_process.start()

    client.start_session()

    #KLダイバージェンス設定
    dx = 0.001

    #取得するデータの個数
    num = 10

    #取得するセンサデータの個数とカウンター
    sample = 500
    counter = 0

    df1 = np.loadtxt('test.csv')
    df2 = np.zeros(len(df1))
    b = np.ones(num) / num

    temp_ndarray = np.array([])
    for i in range(num):
        interrupt_handler = et.utils.ExampleInterruptHandler()
        print("Press Ctrl-C to end session")
        # start = time.time()
        while not interrupt_handler.got_signal:
            data_info, data = client.get_next()
            if (counter == 0):
                df2 = np.delete(data[0], np.s_[-7::])
            else:
                df2 = df2 + np.delete(data[0], np.s_[-7::])
            counter += 1
            if (counter > sample):
                df2 = df2 / sample
                counter = 0
                break
            # print("sensor data: "+str(data[0]))
            # print("number of sensor data: "+str(len(data[0])))
        # finish = time.time()

        #前に取得したデータと現在取得したデータの処理
        difference = abs(df1 - df2)  #振幅の差の絶対値を取得
        abs_max = max(difference)
        max_sequence = np.argmax(difference)
        location = range_start * 100 + (
            (int(range_end * 100) - int(range_start * 100)) /
            len(df1)) * max_sequence
        print(location)
        exit(1)

        temp_ndarray = np.append(temp_ndarray, abs_max)
        # print(temp_ndarray)
        # temp_list = list(temp_ndarray)
        # temp_list.append()
        # temp_list = np.array(temp_list)
        # print(temp_list)
        # np.savetxt('absvalue_samevalue.csv',temp_ndarray,delimiter=',')
    temp_list = []
    temp_list.append(temp_ndarray)

    #ファイルからデータを読みこむ
    #csvファイルに値がないか、1行のときは動かない
    #numの数は同じ数でなければならない
    temp = []
    temp_2dimensional_ndarray = []
    # prev_ndarray = np.loadtxt('absvalue_samevalue.csv',delimiter=',')
    prev_ndarray = np.loadtxt('absvalue_BookAgainstEmptyValue.csv',
                              delimiter=',')
    # prev_ndarray = np.loadtxt('KLDivergence_Sparse.csv',delimiter=",")
    # temp.append(np.array(prev_ndarray))
    # temp_2dimensional_ndarray.append(np.array(temp))
    temp_2dimensional_ndarray.append(np.array(prev_ndarray))
    store_data = np.empty((0, num), int)
    # if(len(temp_2dimensional_ndarray)<= 1):
    #     if(len(temp_2dimensional_ndarray)==0):
    #         store_data = np.append(store_data,np.array(temp_list),axis=0)
    #         np.savetxt('absvalue_BookAgainstEmptyValue.csv',np.array(store_data),delimiter=',')
    #     else:
    #         store_data = np.append(store_data,np.array(temp_2dimensional_ndarray),axis=0)
    #         store_data = np.append(store_data,np.array(temp_list),axis=0)
    #         np.savetxt('absvalue_BookAgainstEmptyValue.csv',np.array(store_data),delimiter=',')
    # else:
    # print(temp_2dimensional_ndarray)
    for data in temp_2dimensional_ndarray:
        store_data = np.append(store_data, np.array(data), axis=0)

    store_data = np.append(store_data, np.array(temp_list), axis=0)
    # np.savetxt('absvalue_samevalue.csv',np.array(store_data),delimiter=',')
    np.savetxt('absvalue_BookAgainstEmptyValue.csv',
               np.array(store_data),
               delimiter=',')
    # difference = (df1-df2)**2 #振幅の差の二乗差を取得
    # print(difference)

    #合計が1になるように計算
    # df2 = df2/np.sum(df2)
    # print(np.sum(df2))

    #データ保存部分
    # np.savetxt('test.csv',df2)
    # exit(1)
    # df2 = pd.DataFrame(get_data,columns=['sensor_data'])
    # df2.to_csv('test.csv')
    # df1 = np.delete(df1,np.s_[:300])
    # df2 = np.delete(df2,np.s_[:300])
    # df1 = np.convolve(df1,b, mode = 'same')
    # df2 = np.convolve(df2,b, mode = 'same')
    # KL_U2  = KLdivergence(df1,df2)
    # print(KL_U2)
    # KL_U2  = KLdivergence(df2,df1)
    # print(KL_U2)
    # KL_U2  = JSdivergence(df1,df2)
    # KL_U2  = Pearson(df1,df2)

    # デフォルトの色
    clr = plt.rcParams['axes.prop_cycle'].by_key()['color']

    #第一引数から第二引数までの範囲で第三引数刻みで数字の配列を作成
    x = np.arange(0, 5000 + 0.25, 0.25)
    # p(x)
    ax = plt.subplot(1, 3, 1)
    #第一引数から第二引数までの範囲で第三引数刻みで数字の配列を作成
    x = np.arange(range_start * 100, range_end * 100,
                  (int(range_end * 100) - int(range_start * 100)) / len(df1))

    # print(len(x))
    plt.plot(x, df1, label='Previous Data')

    # 凡例
    # plt.legend(loc=1)
    #plt.xticks(np.arange(0,5000+1,500))
    plt.xlabel('$x[cm]$')
    plt.ylabel('Amplitude')
    plt.title('Previous Data')

    # q(x)
    qx = plt.subplot(1, 3, 2)
    plt.plot(x, df2, label='Current Data')

    # 凡例
    # plt.legend(loc=1)
    #plt.xticks(np.arange(0,5000+1,500))
    plt.xlabel('$x[cm]$')
    plt.ylabel('Amplitude')
    plt.title('Current Data')

    qx = plt.subplot(1, 3, 3)
    plt.plot(x, difference, label='Square difference')

    # plt.legend(loc=1)
    #plt.xticks(np.arange(0,5000+1,500))
    plt.xlabel('$x[cm]$')
    plt.ylabel('Amplitude')
    plt.title('Absolute value of difference')

    plt.tight_layout()
    # plt.show()

    print("Disconnecting...")
    # pg_process.close()
    client.disconnect()
Exemple #2
0
def main():
    args = et.utils.ExampleArgumentParser().parse_args()
    et.utils.config_logging(args)

    if args.socket_addr:
        client = et.SocketClient(args.socket_addr)
    elif args.spi:
        client = et.SPIClient()
    else:
        port = args.serial_port or et.utils.autodetect_serial_port()
        client = et.UARTClient(port)

    client.squeeze = False

    sensor_config = et.EnvelopeServiceConfig()
    sensor_config.sensor = args.sensors
    range_start = 0.2
    range_end = 1.0
    sensor_config.range_interval = [range_start, range_end]
    sensor_config.profile = sensor_config.Profile.PROFILE_2
    sensor_config.hw_accelerated_average_samples = 20
    sensor_config.downsampling_factor = 2

    session_info = client.setup_session(sensor_config)
    # print("session_info: "+str(session_info))

    # pg_updater = PGUpdater(sensor_config, None, session_info)
    # pg_process = et.PGProcess(pg_updater)
    # pg_process.start()

    client.start_session()

    #KLダイバージェンス設定
    dx = 0.001

    #移動平均の個数
    num = 500
    #取得するセンサデータの個数とカウンター
    # sample = 2000
    sample = 5000
    counter = 0
    b = np.ones(num) / num
    #事前に保存しておいたcsvファイル読み込み
    # df1 = pd.read_csv("test.csv",usecols=[1])
    df1 = np.loadtxt('test.csv')
    maxid = signal.argrelmax(df1, order=10)
    print(maxid)
    standard = max(df1) / 2
    target_peak = []
    for i in maxid[0]:
        if (standard < df1[i]):
            target_peak.append(i)

    print(target_peak)
    plt.plot(range(0, len(df1)), df1)
    plt.show()
    exit(1)
    # df2 = np.zeros(len(df1))

    interrupt_handler = et.utils.ExampleInterruptHandler()
    print("Press Ctrl-C to end session")
    while not interrupt_handler.got_signal:
        data_info, data = client.get_next()
        if (counter == 0):
            df2 = data[0]
        else:
            df2 = df2 + data[0]
        counter += 1
        if (counter > sample):
            df2 = df2 / sample
            break

    #データ保存部
    max_val = df2[np.argmax(df2)]
    min_val = df2[np.argmin(df2)]
    print(max_val)
    print(min_val)
    print((max_val + min_val) / 2)
    np.savetxt('test.csv', df2)
    exit(1)
    # print((int(range_end*100) - int(range_start*100))/len(df1))

    # print(df2)
    # print(np.argmax(df2))
    # print(df2[np.argmax(df2)])

    #以前のデータから最大値の山の抽出範囲検索
    df1_max_order = np.argmax(df1)
    df1_max_value = df1[np.argmax(df1)]

    df1_start_loc = df1_max_order - 1
    while (df1_start_loc > 0):
        if (df1[df1_start_loc] <= df1[df1_start_loc - 1]):
            break
        df1_start_loc = df1_start_loc - 1

    df1_finish_loc = df1_max_order + 1
    while (df1_finish_loc < len(df1)):
        if (df1[df1_finish_loc] <= df1[df1_finish_loc + 1]):
            break
        df1_finish_loc = df1_finish_loc + 1

    #現在のデータから最大値の山の抽出範囲検索
    df2_max_order = np.argmax(df2)
    df2_max_value = df2[np.argmax(df2)]
    df2_start_loc = df2_max_order - 1

    while (df2_start_loc > 0):
        if (df2[df2_start_loc] <= df2[df2_start_loc - 1]):
            break
        df2_start_loc = df2_start_loc - 1

    df2_finish_loc = df2_max_order + 1
    while (df2_finish_loc < len(df2)):
        if (df2[df2_finish_loc] <= df2[df2_finish_loc + 1]):
            break
        df2_finish_loc = df2_finish_loc + 1

    #ピークの値を合わせる
    # df1_copy = df1.copy()
    # df2_copy = df2.copy()
    df1_diff_peak_start = df1_max_order - df1_start_loc
    df1_diff_peak_finish = df1_finish_loc - df1_max_order
    df2_diff_peak_start = df2_max_order - df2_start_loc
    df2_diff_peak_finish = df2_finish_loc - df2_max_order

    #ピークよりも左側の調整
    if (df2_diff_peak_start > df1_diff_peak_start):
        # diff = df2_diff_peak_start-df1_diff_peak_start #スライスを指定するためマイナスの値にする
        start_offset = df1_diff_peak_start
    else:
        start_offset = df2_diff_peak_start

    #ピークよりも右側の調整
    if (df2_diff_peak_finish > df1_diff_peak_finish):
        finish_offset = df1_diff_peak_finish
    else:
        finish_offset = df2_diff_peak_finish

    #ピークを合わせるために削除
    df1_copy = np.copy(df1[df1_max_order - start_offset:df1_max_order +
                           finish_offset])
    df2_copy = np.copy(df2[df2_max_order - start_offset:df2_max_order +
                           finish_offset])

    #ピーク位置調整後
    # ax = plt.subplot(1,2,1)
    # ax = plt.subplot(1,3,1)
    ax = plt.subplot(2, 2, 1)

    plt.plot(range(0, len(df1_copy)), df1_copy, label='Previous', color='b')
    plt.plot(range(0, len(df2_copy)), df2_copy, label='Current', color='r')

    plt.xlabel('Data Number')
    plt.ylabel('Amplitude')
    plt.title('After Adjustment of Peak Position')
    plt.legend(loc='best')

    #ユークリッド距離,コサイン類似度の算出
    euclidean_distance = Euclidean_Distance(df1_copy, df2_copy)
    cosine_similarity = Cosine_Similarity(df1_copy, df2_copy)
    point_distance = np.sqrt((df1_copy - df2_copy)**2)
    peak_point = np.argmax(df1_copy)

    print("各点におけるユークリッド距離の総和: " + str(euclidean_distance))
    # print("コサイン類似度: "+str(cosine_similarity))
    print("ピーク地点におけるユークリッド距離: " +
          str(np.sqrt((df1_copy[peak_point] - df2_copy[peak_point])**2)))
    # print("点ごとの距離: "+str(point_distance))

    # ax = plt.subplot(1,2,2)
    # ax = plt.subplot(1,3,2)
    ax = plt.subplot(2, 2, 2)
    value = np.sqrt(np.square(df1_copy - df2_copy))

    plt.plot(range(0, len(value)),
             value,
             label='Euclidean_Distance',
             color='r')

    plt.xlabel('Data Number')
    plt.ylabel('Euclidean Distance')
    plt.title('Euclidean Distance')

    df1_slope = []
    df2_slope = []
    for i in range(len(df1_copy) - 1):
        df1_slope.append(df1_copy[i + 1] - df1_copy[i])
        df2_slope.append(df2_copy[i + 1] - df2_copy[i])

    # ax = plt.subplot(1,3,3)
    ax = plt.subplot(2, 2, 3)
    plt.plot(range(0, len(df1_slope)), df1_slope, label='Previous', color='b')
    plt.plot(range(0, len(df2_slope)), df2_slope, label='Current', color='r')

    plt.xlabel('Data Number')
    plt.ylabel('Slope')
    plt.legend()
    plt.title('Slope')

    ax = plt.subplot(2, 2, 4)
    slope_diff = np.sqrt(np.square(np.array(df1_slope) - np.array(df2_slope)))
    plt.plot(range(0, len(slope_diff)),
             slope_diff,
             label='Previous',
             color='b')
    print("傾きの差のユークリッド距離: " + str(slope_diff[np.argmax(slope_diff)]))

    plt.xlabel('Data Number')
    plt.ylabel('Slope Difference')
    plt.title('Slope Difference')

    dir_name = "/Users/sepa/Desktop/60GHzレーダーの実験/Euclidean_Distance/実験環境変更/空箱同士"
    now = datetime.datetime.fromtimestamp(time.time())
    file_name = dir_name + now.strftime("%Y_%m_%d_%H_%M_%S") + ".png"

    plt.tight_layout()
    plt.savefig(file_name)
    plt.show()

    print("Disconnecting...")
    # pg_process.close()
    client.disconnect()
Exemple #3
0
def main():
    args = et.utils.ExampleArgumentParser().parse_args()
    et.utils.config_logging(args)

    if args.socket_addr:
        client = et.SocketClient(args.socket_addr)
    elif args.spi:
        client = et.SPIClient()
    else:
        port = args.serial_port or et.utils.autodetect_serial_port()
        client = et.UARTClient(port)

    client.squeeze = False

    range_start = 0.2
    range_end = 1.0
    sensor_config = et.EnvelopeServiceConfig()
    sensor_config.sensor = args.sensors
    sensor_config.range_interval = [range_start, range_end]
    sensor_config.profile = sensor_config.Profile.PROFILE_2
    sensor_config.hw_accelerated_average_samples = 20
    sensor_config.downsampling_factor = 2

    session_info = client.setup_session(sensor_config)
    # pg_updater = PGUpdater(sensor_config, None, session_info)
    # pg_process = et.PGProcess(pg_updater)
    # pg_process.start()

    client.start_session()

    #KLダイバージェンス設定
    dx = 0.001

    #移動平均の個数
    num = 500

    #取得するセンサデータの個数とカウンター
    sample = 300
    counter = 0
    b = np.ones(num) / num
    #事前に保存しておいたcsvファイル読み込み
    # df1 = pd.read_csv("test.csv",usecols=[1])
    df1 = np.loadtxt('test.csv')
    df2 = np.zeros(len(df1))

    interrupt_handler = et.utils.ExampleInterruptHandler()
    print("Press Ctrl-C to end session")
    start = time.time()
    while not interrupt_handler.got_signal:
        data_info, data = client.get_next()
        if (counter == 0):
            df2 = np.delete(data[0], np.s_[-7::])
        else:
            df2 = df2 + np.delete(data[0], np.s_[-7::])
        counter += 1
        if (counter > sample):
            df2 = df2 / sample
            break
        # print("sensor data: "+str(data[0]))
        # print("number of sensor data: "+str(len(data[0])))
    finish = time.time()
    print("処理時間: " + str(finish - start))

    #合計が1になるように計算
    df2 = df2 / np.sum(df2)
    print(np.sum(df2))

    #正規化
    # df2 = (df2-df2.min())/(df2.max() - df2.min())
    # df2[np.argmin(df2)] = df2[np.argmin(df2)] + 0.00001
    # df1[np.argmin(df1)] = df1[np.argmin(df1)] + 0.00001

    #データ保存部分
    # np.savetxt('test.csv',df2)
    # exit(1)
    # df2 = pd.DataFrame(get_data,columns=['sensor_data'])
    # df2.to_csv('test.csv')
    # df1 = np.delete(df1,np.s_[:300])
    # df2 = np.delete(df2,np.s_[:300])
    # df1 = np.convolve(df1,b, mode = 'same')
    # df2 = np.convolve(df2,b, mode = 'same')
    KL_U2 = KLdivergence(df1, df2)
    print(KL_U2)
    KL_U2 = KLdivergence(df2, df1)
    print(KL_U2)
    # KL_U2  = JSdivergence(df1,df2)
    # KL_U2  = Pearson(df1,df2)

    # デフォルトの色
    clr = plt.rcParams['axes.prop_cycle'].by_key()['color']

    #第一引数から第二引数までの範囲で第三引数刻みで数字の配列を作成
    x = np.arange(0, 5000 + 0.25, 0.25)

    # p(x)
    ax = plt.subplot(1, 2, 1)
    #第一引数から第二引数までの範囲で第三引数刻みで数字の配列を作成
    x = np.arange(range_start * 100, range_end * 100,
                  (int(range_end * 100) - int(range_start * 100)) / len(df1))
    # print(len(x))
    plt.plot(x, df1, label='$p(x)$')

    # 凡例
    plt.legend(loc=1, prop={'size': 13})
    #plt.xticks(np.arange(0,5000+1,500))
    plt.xlabel('$x[cm]$')

    # q(x)
    qx = plt.subplot(1, 2, 2)
    plt.plot(x, df2, label='$q(x)$')

    # 凡例
    plt.legend(loc=1, prop={'size': 13})
    #plt.xticks(np.arange(0,5000+1,500))
    plt.xlabel('$x[cm]$')

    ax.set_title('$KL(p||q)=%.16f$' % KL_U2, fontsize=20)
    print(KL_U2)

    plt.tight_layout()
    plt.show()
    print("Disconnecting...")
    # pg_process.close()
    client.disconnect()
def main():
    args = et.utils.ExampleArgumentParser().parse_args()
    et.utils.config_logging(args)

    if args.socket_addr:
        client = et.SocketClient(args.socket_addr)
    elif args.spi:
        client = et.SPIClient()
    else:
        port = args.serial_port or et.utils.autodetect_serial_port()
        client = et.UARTClient(port)

    client.squeeze = False

    range_start = 0.2
    range_end = 0.6
    sensor_config = et.EnvelopeServiceConfig()
    sensor_config.sensor = args.sensors
    sensor_config.range_interval = [range_start, range_end]
    sensor_config.profile = sensor_config.Profile.PROFILE_2
    sensor_config.hw_accelerated_average_samples = 20
    sensor_config.downsampling_factor = 2

    session_info = client.setup_session(sensor_config)
    # pg_updater = PGUpdater(sensor_config, None, session_info)
    # pg_process = et.PGProcess(pg_updater)
    # pg_process.start()

    client.start_session()

    #KLダイバージェンス設定
    dx = 0.001

    #移動平均の個数
    num = 500

    #取得するセンサデータの個数とカウンター
    sample = 500
    counter = 0
    b = np.ones(num) / num
    #事前に保存しておいたcsvファイル読み込み
    # df1 = pd.read_csv("test.csv",usecols=[1])
    df1 = np.loadtxt('test.csv')
    # df2 = np.zeros(len(df1))

    interrupt_handler = et.utils.ExampleInterruptHandler()
    print("Press Ctrl-C to end session")
    while not interrupt_handler.got_signal:
        data_info, data = client.get_next()
        if (counter == 0):
            df2 = data[0]
        else:
            df2 = df2 + data[0]
        counter += 1
        if (counter > sample):
            df2 = df2 / sample
            break

    #データ保存部
    # np.savetxt('test.csv',df2)
    # exit(1)

    # print(df2)
    # print(np.argmax(df2))
    # print(df2[np.argmax(df2)])

    #以前のデータから最大値の山の抽出範囲検索
    df1_max_order = np.argmax(df1)
    df1_max_value = df1[np.argmax(df1)]

    df1_start_loc = df1_max_order - 1
    while (df1_start_loc > 0):
        if (df1[df1_start_loc] <= df1[df1_start_loc - 1]):
            break
        df1_start_loc = df1_start_loc - 1

    df1_finish_loc = df1_max_order + 1
    while (df1_finish_loc < len(df1)):
        if (df1[df1_finish_loc] <= df1[df1_finish_loc + 1]):
            break
        df1_finish_loc = df1_finish_loc + 1

    #現在のデータから最大値の山の抽出範囲検索
    df2_max_order = np.argmax(df2)
    df2_max_value = df2[np.argmax(df2)]
    df2_start_loc = df2_max_order - 1

    while (df2_start_loc > 0):
        if (df2[df2_start_loc] <= df2[df2_start_loc - 1]):
            break
        df2_start_loc = df2_start_loc - 1

    df2_finish_loc = df2_max_order + 1
    while (df2_finish_loc < len(df2)):
        if (df2[df2_finish_loc] <= df2[df2_finish_loc + 1]):
            break
        df2_finish_loc = df2_finish_loc + 1

    #ピークの値を合わせる
    # df1_copy = df1.copy()
    # df2_copy = df2.copy()
    df1_diff_peak_start = df1_max_order - df1_start_loc
    df1_diff_peak_finish = df1_finish_loc - df1_max_order
    df2_diff_peak_start = df2_max_order - df2_start_loc
    df2_diff_peak_finish = df2_finish_loc - df2_max_order

    #ピークよりも左側の調整
    if (df2_diff_peak_start > df1_diff_peak_start):
        # diff = df2_diff_peak_start-df1_diff_peak_start #スライスを指定するためマイナスの値にする
        start_offset = df1_diff_peak_start
    else:
        start_offset = df2_diff_peak_start

    #ピークよりも右側の調整
    if (df2_diff_peak_finish > df1_diff_peak_finish):
        finish_offset = df1_diff_peak_finish
    else:
        finish_offset = df2_diff_peak_finish

    #ピークを合わせるために削除
    df1_copy = np.copy(df1[df1_max_order - start_offset:df1_max_order +
                           finish_offset])
    df2_copy = np.copy(df2[df2_max_order - start_offset:df2_max_order +
                           finish_offset])
    # df1_copy = np.delete(df1_copy,np.s_[df1_max_order-start_offset:df1_max_order+finish_offset])
    # df2_copy = np.delete(df2_copy,np.s_[df2_max_order-start_offset:df2_max_order+finish_offset])

    #ピーク位置調整前
    ax = plt.subplot(1, 2, 1)
    interval = (int(range_end * 100) - int(range_start * 100)) / len(df1)

    df1_x = np.arange(range_start * 100 + df1_start_loc * interval,
                      range_start * 100 + df1_finish_loc * interval - interval,
                      interval)
    plt.plot(df1_x,
             df1[df1_start_loc:df1_finish_loc - 1],
             label='Previous',
             color='b')

    df2_x = np.arange(range_start * 100 + df2_start_loc * interval,
                      range_start * 100 + df2_finish_loc * interval - interval,
                      interval)
    plt.plot(df2_x,
             df2[df2_start_loc:df2_finish_loc - 1],
             label='Current',
             color='r')

    # plt.xlabel('Data Number')
    plt.xlabel('Distance(cm)')
    plt.ylabel('Amplitude')
    plt.title('Before Adjustment of Peak Position')
    plt.legend(loc=1)

    #ピーク位置調整後
    ax = plt.subplot(1, 2, 2)
    print(start_offset)
    print(finish_offset)

    plt.plot(range(0, len(df1_copy)), df1_copy, label='Previous', color='b')
    plt.plot(range(0, len(df2_copy)), df2_copy, label='Current', color='r')
    # plt.plot(range(df1_max_order-start_offset,df1_max_order+finish_offset),df1[df1_max_order-start_offset:df1_max_order+finish_offset],label='Previous Data',color='b')
    # plt.plot(range(df2_max_order-start_offset,df2_max_order+finish_offset),df2_copy,label='Current Data',color='r')

    plt.xlabel('Data Number')
    plt.ylabel('Amplitude')
    plt.title('After Adjustment of Peak Position')

    plt.tight_layout()
    plt.legend(loc=1)
    plt.show()

    print("Disconnecting...")
    # pg_process.close()
    client.disconnect()
def main():
    args = et.utils.ExampleArgumentParser().parse_args()
    et.utils.config_logging(args)

    if args.socket_addr:
        client = et.SocketClient(args.socket_addr)
    elif args.spi:
        client = et.SPIClient()
    else:
        port = args.serial_port or et.utils.autodetect_serial_port()
        client = et.UARTClient(port)

    client.squeeze = False

    range_start = 0.2
    range_end = 0.6
    sensor_config = et.EnvelopeServiceConfig()
    sensor_config.sensor = args.sensors
    sensor_config.range_interval = [range_start, range_end]
    sensor_config.profile = sensor_config.Profile.PROFILE_2
    sensor_config.hw_accelerated_average_samples = 20
    sensor_config.downsampling_factor = 2

    session_info = client.setup_session(sensor_config)
    # pg_updater = PGUpdater(sensor_config, None, session_info)
    # pg_process = et.PGProcess(pg_updater)
    # pg_process.start()

    client.start_session()

    #KLダイバージェンス設定
    dx = 0.001

    #試行回数
    num = 1

    #取得するセンサデータの個数とカウンター
    sample = 500
    counter = 0

    # filename = "absvalue_Book2EachOther.csv"
    # filename = "absvalue_Book1AgainstBook2.csv"
    # filename = "absvalue_NoBookAgainstBook2.csv"
    # filename = "absvalue_Book1EachOther.csv"
    filename = "absvalue_NoBookEachOther.csv"

    df1 = np.loadtxt('test.csv')
    df2 = np.zeros(len(df1))
    b = np.ones(num) / num

    storage = []
    temp_ndarray = np.array([])
    for i in range(num):
        interrupt_handler = et.utils.ExampleInterruptHandler()
        print("Press Ctrl-C to end session")
        # start = time.time()
        while not interrupt_handler.got_signal:
            data_info, data = client.get_next()
            if (counter == 0):
                df2 = np.delete(data[0], np.s_[-7::])
            else:
                df2 = df2 + np.delete(data[0], np.s_[-7::])
            counter += 1
            if (counter > sample):
                df2 = df2 / sample
                counter = 0
                break
            # print("sensor data: "+str(data[0]))
            # print("number of sensor data: "+str(len(data[0])))
        # finish = time.time()

        #前に取得したデータと現在取得したデータの処理
        # print(temp_ndarray)
        # temp_list = list(temp_ndarray)
        # temp_list.append()
        # temp_list = np.array(temp_list)
        # print(temp_list)
        # np.savetxt('absvalue_samevalue.csv',temp_ndarray,delimiter=',')
    difference = abs(df1 - df2)  #振幅の差の絶対値を取得
    i = 0
    dataset = []
    while (i < len(df1) and i < len(df2)):
        dataset_temp = []
        dataset_temp.append(df1[i])
        dataset_temp.append(df2[i])
        dataset.append(dataset_temp)
        i += 1
    df_corr = pd.DataFrame(dataset, columns=['Previous', 'Current'])

    print("相関係数: " + str(df_corr.corr().iat[0, 1]))

    #合計が1になるように計算
    # df2 = df2/np.sum(df2)
    # print(np.sum(df2))

    #データ保存部分
    # np.savetxt('test.csv',df2)
    # exit(1)
    # df2 = pd.DataFrame(get_data,columns=['sensor_data'])
    # df2.to_csv('test.csv')
    # df1 = np.delete(df1,np.s_[:300])
    # df2 = np.delete(df2,np.s_[:300])
    # df1 = np.convolve(df1,b, mode = 'same')
    # df2 = np.convolve(df2,b, mode = 'same')
    # KL_U2  = KLdivergence(df1,df2)
    # print(KL_U2)
    # KL_U2  = KLdivergence(df2,df1)
    # print(KL_U2)
    # KL_U2  = JSdivergence(df1,df2)
    # KL_U2  = Pearson(df1,df2)

    # デフォルトの色
    clr = plt.rcParams['axes.prop_cycle'].by_key()['color']

    #第一引数から第二引数までの範囲で第三引数刻みで数字の配列を作成
    x = np.arange(0, 5000 + 0.25, 0.25)
    # p(x)
    ax = plt.subplot(1, 3, 1)
    #第一引数から第二引数までの範囲で第三引数刻みで数字の配列を作成
    x = np.arange(range_start * 100, range_end * 100,
                  (int(range_end * 100) - int(range_start * 100)) / len(df1))

    # print(len(x))
    plt.plot(x, df1, label='Previous Data')

    # 凡例
    # plt.legend(loc=1)
    #plt.xticks(np.arange(0,5000+1,500))
    plt.xlabel('$x[cm]$')
    plt.ylabel('Amplitude')
    plt.title('Previous Data')

    # q(x)
    qx = plt.subplot(1, 3, 2)
    plt.plot(x, df2, label='Current Data')

    # 凡例
    # plt.legend(loc=1)
    #plt.xticks(np.arange(0,5000+1,500))
    plt.xlabel('$x[cm]$')
    plt.ylabel('Amplitude')
    plt.title('Current Data')

    qx = plt.subplot(1, 3, 3)
    plt.plot(x, difference, label='Square difference')

    # plt.legend(loc=1)
    #plt.xticks(np.arange(0,5000+1,500))
    plt.xlabel('$x[cm]$')
    plt.ylabel('Amplitude')
    plt.title('Difference of Absolute value')

    plt.tight_layout()
    plt.show()

    print("Disconnecting...")
    # pg_process.close()
    client.disconnect()