Beispiel #1
0
def forward_test():
    try:
        f = forward.forward_gpu()
        coef = forward.filter_coefficient()
        geo = forward.geoelectric_model()
        data = forward.forward_data()

        geo_path = [
            '../test_data/forward_model1.json',
            '../test_data/forward_model2.json',
            '../test_data/forward_model3.json'
        ]

        geos = []
        responses = []
        for i in range(len(geo_path)):
            geos.append(forward.geoelectric_model())
            responses.append(forward.forward_data())

        coef.load_cos_coef('../test_data/cos_xs.txt')
        coef.load_hkl_coef('../test_data/hankel1.txt')
        data.generate_time_stamp_by_count(-5, -2, 40)

        f.load_general_params(10, 100, 50)
        f.load_filter_coef(coef)
        f.load_time_stamp(data)

        for i in range(len(geo_path)):
            geos[i].load_from_file(geo_path[i])

            f.load_geo_model(geos[i])
            f.forward()

            responses[i] = f.get_result_magnetic()
            responses[i].name = geos[i].name

        fig = draw_resistivity(*geos)
        fig.show()

        fig = draw_forward_result(*responses)
        fig.show()

        # print(loss(responses[0], responses[1]))

        # print(loss(responses[1], responses[2]))
        # fig.show()

        # fig = draw_forward_result(m)
        #
        # fig.show()

    except Exception as e:
        traceback.print_exc()
        log.error(repr(e))
    finally:
        log.debug('forward_test finished')
Beispiel #2
0
def forward_test():
    try:
        f = forward.forward_gpu()
        coef = forward.filter_coefficient()
        geo = forward.geoelectric_model()
        data = forward.forward_data()
        m_data = forward.forward_data()

        coef.load_cos_coef('../test_data/cos_xs.txt')
        coef.load_hkl_coef('../test_data/hankel1.txt')
        geo.load_from_file('../test_data/forward_model1.json')
        data.generate_time_stamp_by_count(-5, -2, 20)

        f.load_time_stamp(data)
        f.load_geo_model(geo)
        f.load_general_params(10, 100, 50)
        f.load_filter_coef(coef)

        f.forward()

        data = f.get_result_magnetic()
        data.name = 'CUDA'
        m_data.resize(data.count)
        m_data.time = data.time
        m_data.idx = data.idx
        m_data.name = 'Matlab'

        m_data.set_item_s('response', [
            1.127565, 0.998018, 0.894319, 0.805530, 0.723312, 0.642532,
            0.561468, 0.481032, 0.403363, 0.330769, 0.265176, 0.207871,
            0.159425, 0.119744, 0.088197, 0.063800, 0.045399, 0.031824,
            0.022005, 0.015023
        ])

        fig = draw_resistivity(geo)
        fig.show()

        fig = draw_forward_result(data, m_data)
        fig.show()

        print('loss:')
        print(loss(m_data, data))

    except Exception as e:
        traceback.print_exc()
        log.error(repr(e))
    finally:
        log.debug('forward_test finished')
Beispiel #3
0
def forward_test():
    try:
        f = forward.forward_gpu()
        coef = forward.filter_coefficient()
        geo = forward.geoelectric_model()
        data = forward.forward_data()

        coef.load_cos_coef('../test_data/cos_xs.txt')
        coef.load_hkl_coef('../test_data/hankel1.txt')
        geo.load_from_file('../test_data/forward_model1.json')
        data.generate_time_stamp_by_count(-5, -2, 20)

        f.load_time_stamp(data)
        f.load_geo_model(geo)
        f.load_general_params(10, 100, 50)
        f.load_filter_coef(coef)

        f.forward()

        data = f.get_result_magnetic()
        data.name = 'Model'

        fig = draw_resistivity(geo)
        fig.show()

        fig = draw_forward_result(data)
        fig.show()


    except Exception as e:
        traceback.print_exc()
        log.error(repr(e))
    finally:
        log.debug('forward_test finished')
Beispiel #4
0
def forward_test():
    try:
        # 正演类
        f = forward.forward_gpu()
        coef = forward.filter_coefficient()
        geo = forward.geoelectric_model()
        geo2 = forward.geoelectric_model()
        iso = forward.isometric_model()
        data = forward.forward_data()

        # 各种数据加载
        coef.load_cos_coef('../test_data/cos_xs.txt')
        coef.load_hkl_coef('../test_data/hankel1.txt')
        geo.load_from_file('../test_data/test_geo_model.json')
        geo2.load_from_file('../test_data/test_geo_model2.json')
        iso.load_from_file('../test_data/test_iso_model.json')
        data.generate_time_stamp_by_count(-5, -2, 20)

        # 测试绘制地电模型
        fig = draw_resistivity(geo,
                               geo2,
                               forward.iso_to_geo(iso),
                               last_height=300)
        fig.show()

        # 正演类加载数据
        f.load_general_params(10, 100, 50)
        f.load_filter_coef(coef)
        f.load_geo_model(geo)
        f.load_time_stamp(data)

        # 正演开始
        f.forward()

        # 获得结果
        m = f.get_result_late_m()
        e = f.get_result_late_e()

        m.name = 'late_m'
        e.name = 'late_e'

        # 绘制结果
        fig = draw_forward_result(m, e)
        fig.show()

        add_noise(m, 0.1)
        fig = draw_forward_result(m)

        fig.show()

    except Exception as e:
        traceback.print_exc()
        log.error(repr(e))
    finally:
        log.debug('forward_test finished')
Beispiel #5
0
def forward_test():
    try:
        f = forward.forward_gpu()
        coef = forward.filter_coefficient()
        geo = forward.geoelectric_model()
        data = forward.forward_data()

        coef.load_cos_coef('../test_data/cos_xs.txt')
        coef.load_hkl_coef('../test_data/hankel1.txt')
        geo.load_from_file('../test_data/forward_model1.json')
        data.generate_time_stamp_by_count(-5, 0, 100)

        f.load_time_stamp(data)
        f.load_geo_model(geo)
        f.load_general_params(10, 100, 50)
        f.load_filter_coef(coef)

        f.forward()

        counts = [40, 60, 80, 100]
        layers = [5, 6, 8, 10, 12]

        fig = plt.figure()
        ax = fig.add_subplot(1, 1, 1, xlabel='layer count', ylabel='time/s')
        for c in counts:
            data.generate_time_stamp_by_count(-5, 0, c)
            f.load_time_stamp(data)

            t = []

            for l in layers:
                geo.resize(l)
                res = npy.ones([l]) * 100
                hei = npy.ones([l]) * 50
                geo.set_item_s('resistivity', list(res))
                geo.set_item_s('height', list(hei))

                f.load_geo_model(geo)
                s = time.clock()
                f.forward()
                e = time.clock()
                t.append(e - s)

            ax.plot(layers, t, label=str(c), marker='+')

        ax.legend()
        plt.savefig(figure=fig, fname='compare')

    except Exception as e:
        traceback.print_exc()
        log.error(repr(e))
    finally:
        log.debug('forward_test finished')
Beispiel #6
0
def test_nn_class(model, x_test, y_test):
    """
    NNを学習させる
    Adamと呼ばれるパラメータの最適化手法を使用
    @param model: NNの構造モデルオブジェクト
    @param x_test: テストデータの特徴量
    @param y_test: テストデータの教師信号
    @return: 識別率、ロス、識別結果のリスト、ロスのリスト、識別結果の確信度のリスト
    """
    # テストサンプル数
    test_sample_size = len(x_test)
    sum_loss = 0
    sum_accuracy = 0
    
    # 実際の出力クラスとその確信度
    result_class_list = []
    result_loss_list = []
    result_class_power_list = []

    for i in xrange(0, test_sample_size):
        x_batch = x_test[i:i+1]
        y_batch = y_test[i:i+1]
        # 順伝播させて誤差と精度を算出
        loss, acc, output= forward_data(model, x_batch, y_batch, train=False)

        # 結果の格納
        result_class_list.append(np.argmax(output.data))
        result_loss_list.append(float(cuda.to_cpu(loss.data)))
        result_class_power_list.append(output.data)
        sum_loss += float(cuda.to_cpu(loss.data))
        sum_accuracy += float(cuda.to_cpu(acc.data))
    
    loss = sum_loss / test_sample_size
    accuracy = sum_accuracy / test_sample_size
    
    return accuracy, loss, result_class_list, result_loss_list, result_class_power_list
Beispiel #7
0
def train_nn_class(model,
                   x_train,
                   y_train,
                   batchsize,
                   epoch_num,
                   test_flag=False,
                   x_test=None,
                   y_test=None,
                   print_flag=False):
    """
    NNを学習させる
    Adamと呼ばれるパラメータの最適化手法を使用
    @param model: NNの構造モデルオブジェクト
    @param x_train: トレーニングデータの特徴量
    @param y_train: トレーニングデータの教師信号
    @param batchsize: 確率的勾配降下法で学習させる際の1回分のバッチサイズ
    @param epoch_num: エポック数(1データセットの学習繰り返し数) 
    @keyword test_flag: テストデータの識別率を学習と同時並行して出力 
    """
    #
    opts = optimizers.Adam()
    opts.setup(model)
    #optimizer = optimizers.MomentumSGD(lr=0.01, momentum=0.9)

    if print_flag:
        pace = 10  # 何epochごとに結果を出力するか
    if test_flag:
        pace = 10  # 何epochごとに結果を出力するか
        test_sample_size = len(x_test)

    #データ数
    sample_num = len(x_train)

    #学習ループ
    # 識別率とロス
    sum_accuracy = 0
    sum_loss = 0
    for epoch in xrange(1, epoch_num + 1):
        # サンプルの順番をランダムに並び替える
        perm = np.random.permutation(
            sample_num)  # permutationはshuffleと違い、新しいリストを生成する

        # 1epochにおける識別率
        sum_accuracy_on_epoch = 0
        sum_loss_on_epoch = 0

        # データをバッチサイズごとに使って学習
        # 今回バッチサイズは1なので、サンプルサイズ数分学習
        for i in xrange(0, sample_num, batchsize):
            x_batch = x_train[perm[i:i + batchsize]]
            y_batch = y_train[perm[i:i + batchsize]]

            # 勾配(多分、偏微分分のgrad)を初期化
            opts.zero_grads()

            # 順伝播させて誤差と精度を算出
            loss, acc, output = forward_data(model,
                                             x_batch,
                                             y_batch,
                                             train=True)

            # 誤差逆伝播で勾配を計算
            loss.backward()

            # 勾配(gradを使って本物のweight)を更新
            opts.update()

            # 識別率,ロスの算出
            sum_loss_on_epoch += float(cuda.to_cpu(loss.data))
            sum_accuracy_on_epoch += float(cuda.to_cpu(acc.data))  # 田村さんに確認

        # 識別率とロスの積算
        accuracy_on_epoch = sum_accuracy_on_epoch / (sample_num / batchsize)
        loss_on_epoch = sum_loss_on_epoch / (sample_num / batchsize)
        sum_accuracy += accuracy_on_epoch
        sum_loss += loss_on_epoch

        if print_flag:
            if epoch % pace == 0:
                print 'epoch', epoch
                print 'train now: accuracy={}, loss={}'.format(
                    accuracy_on_epoch, loss_on_epoch)
                #print 'train mean: loss={}, accuracy={}'.format(sum_loss / epoch, sum_accuracy / epoch)

        # テストデータでの誤差と、正解精度を表示。汎化性能を確認。 #########################
        if test_flag:
            if epoch % pace == 0:
                # テストデータでの誤差と、正解精度を表示
                accuracy, loss, result_class_list, result_loss_list, result_class_power_list = test_nn_class(
                    model, x_test, y_test)
                print 'test: accuracy={}, loss={}'.format(accuracy, loss)


#                test_sum_accuracy = 0
#                test_sum_loss     = 0
#
#                acc_t=np.zeros(((test_sample_size/batchsize), 5))
#                for i in xrange(0, test_sample_size, batchsize):
#                    x_batch = x_test[i:i+batchsize]
#                    y_batch = y_test[i:i+batchsize]
#
#                    # 順伝播させて誤差と精度を算出
#                    loss, acc, output= forward_data(model, x_batch, y_batch, train=False)
#                    acc_t[i][0]=loss.data
#                    acc_t[i][1]=acc.data
#                    acc_t[i][2]=y_batch #教師信号
#                    acc_t[i][3]=np.max(output.data)#value of max
#                    acc_t[i][4]=np.argmax(output.data)# 実際の出力結果
#
#                    test_sum_loss += float(cuda.to_cpu(loss.data))
#                    test_sum_accuracy += float(cuda.to_cpu(acc.data))
#
#                    print 'test: accuracy={}, loss={}'.format(test_sum_accuracy / (test_sample_size / batchsize), test_sum_loss / (test_sample_size / batchsize))

    return opts
Beispiel #8
0
def train_nn_class(model, x_train, y_train, batchsize, epoch_num, test_flag = False, x_test = None, y_test = None, print_flag = False):
    """
    NNを学習させる
    Adamと呼ばれるパラメータの最適化手法を使用
    @param model: NNの構造モデルオブジェクト
    @param x_train: トレーニングデータの特徴量
    @param y_train: トレーニングデータの教師信号
    @param batchsize: 確率的勾配降下法で学習させる際の1回分のバッチサイズ
    @param epoch_num: エポック数(1データセットの学習繰り返し数) 
    @keyword test_flag: テストデータの識別率を学習と同時並行して出力 
    """
    # 
    opts = optimizers.Adam()
    opts.setup(model)
    #optimizer = optimizers.MomentumSGD(lr=0.01, momentum=0.9)
    
    if print_flag:
        pace = 10 # 何epochごとに結果を出力するか
    if test_flag:
        pace = 10 # 何epochごとに結果を出力するか
        test_sample_size = len(x_test)
        
    #データ数
    sample_num = len(x_train)
        
    #学習ループ
    # 識別率とロス
    sum_accuracy = 0
    sum_loss = 0
    for epoch in xrange(1, epoch_num+1):        
        # サンプルの順番をランダムに並び替える
        perm = np.random.permutation(sample_num) # permutationはshuffleと違い、新しいリストを生成する
        
        # 1epochにおける識別率
        sum_accuracy_on_epoch = 0
        sum_loss_on_epoch = 0

        # データをバッチサイズごとに使って学習
        # 今回バッチサイズは1なので、サンプルサイズ数分学習
        for i in xrange(0, sample_num, batchsize):
            x_batch = x_train[perm[i:i+batchsize]]
            y_batch = y_train[perm[i:i+batchsize]]
    
            # 勾配(多分、偏微分分のgrad)を初期化
            opts.zero_grads()
            
            # 順伝播させて誤差と精度を算出
            loss, acc, output = forward_data(model, x_batch, y_batch, train=True)
            
            # 誤差逆伝播で勾配を計算
            loss.backward()
            
            # 勾配(gradを使って本物のweight)を更新
            opts.update()
            
            # 識別率,ロスの算出
            sum_loss_on_epoch += float(cuda.to_cpu(loss.data))
            sum_accuracy_on_epoch += float(cuda.to_cpu(acc.data)) # 田村さんに確認
        
        # 識別率とロスの積算
        accuracy_on_epoch = sum_accuracy_on_epoch / (sample_num / batchsize)
        loss_on_epoch = sum_loss_on_epoch / (sample_num / batchsize)
        sum_accuracy += accuracy_on_epoch
        sum_loss += loss_on_epoch
        
        if print_flag:
            if epoch % pace == 0:
                print 'epoch', epoch
                print 'train now: accuracy={}, loss={}'.format(accuracy_on_epoch, loss_on_epoch)
                #print 'train mean: loss={}, accuracy={}'.format(sum_loss / epoch, sum_accuracy / epoch)

        # テストデータでの誤差と、正解精度を表示。汎化性能を確認。 #########################
        if test_flag:
            if epoch % pace == 0:
                # テストデータでの誤差と、正解精度を表示
                accuracy, loss, result_class_list, result_loss_list, result_class_power_list = test_nn_class(model, x_test, y_test)
                print 'test: accuracy={}, loss={}'.format(accuracy, loss)

                
#                test_sum_accuracy = 0
#                test_sum_loss     = 0
#    
#                acc_t=np.zeros(((test_sample_size/batchsize), 5))
#                for i in xrange(0, test_sample_size, batchsize):
#                    x_batch = x_test[i:i+batchsize]
#                    y_batch = y_test[i:i+batchsize]
#                    
#                    # 順伝播させて誤差と精度を算出
#                    loss, acc, output= forward_data(model, x_batch, y_batch, train=False)
#                    acc_t[i][0]=loss.data
#                    acc_t[i][1]=acc.data
#                    acc_t[i][2]=y_batch #教師信号
#                    acc_t[i][3]=np.max(output.data)#value of max
#                    acc_t[i][4]=np.argmax(output.data)# 実際の出力結果
#                    
#                    test_sum_loss += float(cuda.to_cpu(loss.data))
#                    test_sum_accuracy += float(cuda.to_cpu(acc.data))
#                    
#                    print 'test: accuracy={}, loss={}'.format(test_sum_accuracy / (test_sample_size / batchsize), test_sum_loss / (test_sample_size / batchsize))

    return opts
Beispiel #9
0
def inversion():
    f = forward.forward_gpu()

    real_geo_model = forward.geoelectric_model()
    real_geo_model.load_from_file('../test_data/test_geo_model.json')

    coef = forward.filter_coefficient()
    coef.load_cos_coef('../test_data/cos_xs.txt')
    coef.load_hkl_coef('../test_data/hankel1.txt')

    time = forward.forward_data()
    time.generate_time_stamp_by_count(-5, -2, 20)

    f.load_general_params(10, 100, 50)
    f.load_filter_coef(coef)
    f.load_geo_model(real_geo_model)
    f.load_time_stamp(time)

    f.forward()

    real_response = f.get_result_magnetic()
    real_response.name = real_geo_model.name

    # helper.add_noise(m, 0.05)
    real_response_m = real_response['response']

    inversion_geo_model = forward.isometric_model()
    inversion_geo_model.resize(20)
    inversion_geo_model.height = 10
    inversion_geo_model.name = 'inversion'

    inv = Inversion(inversion_geo_model, f)
    inv.set_initial_geo_model(npy.ones([inversion_geo_model.count]) * 50.0)
    inv.set_target_magnetic(real_response_m)

    for i in range(500):
        log.info('iteration %d ' % i)

        inv.rand_grad()
        inv.update_model()
        inv.update_step()

        log.info('iteration %d, loss = %f' % (i, inv.loss()))
        if i % 10 == 0:
            inversion_geo_model.set_item_s('resistivity',
                                           inv.geo_model.tolist())
            inversion_geo_model.name = 'Inversion'
            f.load_geo_model(forward.iso_to_geo(inversion_geo_model))

            f.forward()
            inversion_response = f.get_result_magnetic()
            inversion_response.name = 'Inversion'

            fig = helper.draw_resistivity(
                real_geo_model,
                forward.iso_to_geo(inversion_geo_model),
                last_height=240)
            fig.show()

            fig = helper.draw_forward_result(real_response, inversion_response)
            fig.show()

            print('loss = %f' % helper.loss(real_response, inversion_response))
            pass

        pass