Exemplo n.º 1
0
        print('Prediction:\n{0}'.format(xparray(pred)))

        # モデルをpickleで保存する
        if epoch == 1 or epoch % args.snapshot == 0:
            model_fn = '%s/%s_epoch_%d.chainermodel' % (
                result_dir, args.model, epoch)
            pickle.dump(model, open(model_fn, 'wb'), -1)

        """
        CNNで各層を可視化
        1,2層の可視化
        """
        if epoch % args.visualize == 0:
            draw_filters(xparray(model.conv11.W), '%s/log_conv11_epoch_%d.jpg' % (result_dir, epoch))
            draw_filters(xparray(model.conv21.W), '%s/log_conv21_epoch_%d.jpg' % (result_dir, epoch))
            draw_filters_sq(xparray(model.conv12.W), '%s/log_conv12_epoch_%d.jpg' % (result_dir, epoch), 16)
            draw_filters_sq(xparray(model.conv22.W), '%s/log_conv22_epoch_%d.jpg' % (result_dir, epoch), 16)

        # 学習曲線を出力
        draw_loss_curve(log_fn, '%s/loss_accuracy.jpg' % result_dir, result_dir, args.epoch_offset)

        labels = "sf123456789012345678901234567890123456789012345678901234567890"[0:num_labels]

        # 混同行列を表示
        print("Confusion Matrix for train data:")
        print_confmat(conf_array_train)

        print("Confusion Matrix for test data:")
        print_confmat(conf_array_test)

        # 混同行列をcsvに出力
Exemplo n.º 2
0
    print(result)
    f.write(json.dumps(result))
    f.close()
    overall_accuracy = np.sum([x == y for x, y in zip(result, answer)]).astype(
        np.float32) / len(result)
    logging.info("Overall Word Prediction Accuracy:")
    logging.info(overall_accuracy)

    print_confmat(conf_array_test)
    """
    CNNで各層を可視化
    1,2層の可視化
    """
    draw_filters(xparray(model.conv11.W), '%s/log_conv11.jpg' % result_dir)
    draw_filters(xparray(model.conv21.W), '%s/log_conv21.jpg' % result_dir)
    draw_filters_sq(xparray(model.conv12.W), '%s/log_conv12.jpg' % result_dir,
                    16)
    draw_filters_sq(xparray(model.conv22.W), '%s/log_conv22.jpg' % result_dir,
                    16)
    """
    テストデータのビデオの1部について、各フレームをpool1に通したあとの出力を行う
    """
    if args.pool1_output_index != -1:
        visualize_pool1(
            xp.asarray(test_vis[args.pool1_output_index]).astype(np.float32),
            xp.asarray(test_dep[i]).astype(np.float32),
            args.pool1_output_index, result_dir)

    # 混同行列をcsvに出力
    np.savetxt('%s/confmat_test.csv' % result_dir,
               conf_array_test,
               delimiter=',',
Exemplo n.º 3
0
        logging.info(msg)
        print('\n%s' % msg)
        print('Prediction:\n{0}'.format(xparray(pred)))

        # エポックごとにモデルを打ち出す
        if epoch == 1 or epoch % args.snapshot == 0:
            model_fn = '%s/%s_epoch_%d.chainermodel' % (
                result_dir, args.model, epoch + args.epoch_offset)
            pickle.dump(model, open(model_fn, 'wb'), -1)

        # CNNで各層を可視化
        # 1,2層の可視化
        if epoch % args.visualize == 0:
            draw_filters(xparray(model.conv11.W), '%s/log_conv11_epoch_%d.jpg' % (result_dir, epoch))
            draw_filters(xparray(model.conv21.W), '%s/log_conv21_epoch_%d.jpg' % (result_dir, epoch))
            draw_filters_sq(xparray(model.conv12.W), '%s/log_conv12_epoch_%d.jpg' % (result_dir, epoch), 16)
            draw_filters_sq(xparray(model.conv22.W), '%s/log_conv22_epoch_%d.jpg' % (result_dir, epoch), 16)

            # 適当なファイルの画像を入力する
            video_num = int(np.random.random() * len(test_vis))
            s_image_vis = xp.asarray(test_vis[video_num]).astype(np.float32)
            s_image_dep = xp.asarray(test_dep[video_num]).astype(np.float32)
       
            # プーリング1層まで通した結果を出力
            frame_num = int(s_image_vis.shape[0]/ 2)
            draw_image(xparray(model.extract_pool11(s_image_vis[frame_num,:,:]).data), '%s/sample_vis_pool1_%d.jpg' % (result_dir, epoch))
            draw_image(xparray(model.extract_pool21(s_image_dep[frame_num,:,:]).data), '%s/sample_dep_pool1_%d.jpg' % (result_dir, epoch))

        # 学習曲線を出力
        draw_loss_curve(log_fn, '%s/log.jpg' % result_dir)
Exemplo n.º 4
0
    answer = []
    for i in xrange(len(test_vis)):
        result.append(predict_sign_simple(pred[i], 5, [0, 1]))
        answer.append(predict_sign_simple(test_labels[i][0], 5, [0, 1]))
    print(result)
    f.write(json.dumps(result))
    f.close()
    overall_accuracy = np.sum([x == y for x, y in zip(result, answer)]).astype(np.float32) / len(result)    
    logging.info("Overall Word Prediction Accuracy:")
    logging.info(overall_accuracy)

    print_confmat(conf_array_test)

    """
    CNNで各層を可視化
    1,2層の可視化
    """
    draw_filters(xparray(model.conv11.W), '%s/log_conv11.jpg' % result_dir)
    draw_filters(xparray(model.conv21.W), '%s/log_conv21.jpg' % result_dir)
    draw_filters_sq(xparray(model.conv12.W), '%s/log_conv12.jpg' % result_dir, 16)
    draw_filters_sq(xparray(model.conv22.W), '%s/log_conv22.jpg' % result_dir, 16)

    """
    テストデータのビデオの1部について、各フレームをpool1に通したあとの出力を行う
    """
    if args.pool1_output_index != -1:
        visualize_pool1(xp.asarray(test_vis[args.pool1_output_index]).astype(np.float32), xp.asarray(test_dep[i]).astype(np.float32), args.pool1_output_index, result_dir)

    # 混同行列をcsvに出力
    np.savetxt('%s/confmat_test.csv' % result_dir, conf_array_test, delimiter=',', fmt='%d')