import colorama
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from progressbar import progressbar
from scipy import stats

import rospy
from link_bot_data import base_dataset
from link_bot_data.classifier_dataset import ClassifierDatasetLoader
from link_bot_data.dataset_utils import add_predicted
from link_bot_pycommon.pycommon import print_dict
from moonshine.gpu_config import limit_gpu_mem
from moonshine.moonshine_utils import remove_batch

limit_gpu_mem(1)


def main():
    colorama.init(autoreset=True)

    plt.style.use("slides")
    np.set_printoptions(suppress=True, linewidth=200, precision=3)
    parser = argparse.ArgumentParser()
    parser.add_argument('dataset_dirs', type=pathlib.Path, nargs='+')
    parser.add_argument('--display-type', choices=['just_count', '3d', 'stdev'], default='3d')
    parser.add_argument('--mode', choices=['train', 'val', 'test', 'all'], default='train')
    parser.add_argument('--shuffle', action='store_true')
    parser.add_argument('--save', action='store_true')
    parser.add_argument('--threshold', type=float, default=None)
    parser.add_argument('--seed', type=int, default=1)
import gzip
import json
import pathlib
from time import sleep

import colorama
import numpy as np
from colorama import Fore

import rospy
from link_bot_classifiers import classifier_utils
from moonshine.gpu_config import limit_gpu_mem
from moonshine.moonshine_utils import numpify, listify
from state_space_dynamics import dynamics_utils

limit_gpu_mem(7)


def main():
    colorama.init(autoreset=True)
    parser = argparse.ArgumentParser()
    parser.add_argument("results_dir",
                        type=pathlib.Path,
                        help='dir containing *_metrics.json.gz')
    parser.add_argument("plan_idx", type=int)

    args = parser.parse_args()

    rospy.init_node("postprocess_result")

    with (args.results_dir / 'metadata.json').open('r') as metadata_file:
#!/usr/bin/env python
import argparse
import logging
import pathlib

import colorama
import tensorflow as tf
from colorama import Fore

import rospy
from arc_utilities.filesystem_utils import mkdir_and_ask
from link_bot_data.classifier_dataset_utils import make_classifier_dataset
from link_bot_pycommon.args import my_formatter
from moonshine.gpu_config import limit_gpu_mem

limit_gpu_mem(6)


def main():
    colorama.init(autoreset=True)
    rospy.init_node("make_classifier_dataset")

    tf.get_logger().setLevel(logging.ERROR)
    parser = argparse.ArgumentParser(formatter_class=my_formatter)
    parser.add_argument('dataset_dir', type=pathlib.Path, help='dataset directory')
    parser.add_argument('labeling_params', type=pathlib.Path)
    parser.add_argument('fwd_model_dir', type=pathlib.Path, help='forward model', nargs="+")
    parser.add_argument('--total-take', type=int, help="will be split up between train/test/val")
    parser.add_argument('--start-at', type=str, help='mode:batch_index, ex train:10')
    parser.add_argument('--stop-at', type=str, help='mode:batch_index, ex train:10')
    parser.add_argument('--batch-size', type=int, help='batch size', default=8)
예제 #4
0
from matplotlib import cm
from tabulate import tabulate

import rospy
from link_bot_data.dynamics_dataset import DynamicsDatasetLoader
from link_bot_data.dataset_utils import batch_tf_dataset
from link_bot_pycommon.args import my_formatter
from link_bot_pycommon.get_scenario import get_scenario
from link_bot_pycommon.metric_utils import row_stats, dict_to_pvalue_table
from link_bot_pycommon.pycommon import paths_from_json
from merrrt_visualization.rviz_animation_controller import RvizAnimationController
from moonshine.gpu_config import limit_gpu_mem
from moonshine.moonshine_utils import listify, numpify, remove_batch
from state_space_dynamics import dynamics_utils

limit_gpu_mem(8.5)


def load_dataset_and_models(args):
    comparison_info = json.load(args.comparison.open("r"))
    models = {}
    for name, model_info in comparison_info.items():
        model_dir = paths_from_json(model_info['model_dir'])
        model, _ = dynamics_utils.load_generic_model(model_dir)
        models[name] = model

    dataset = DynamicsDatasetLoader(args.dataset_dirs)
    tf_dataset = dataset.get_datasets(mode=args.mode,
                                      shard=args.shard,
                                      take=args.take)
    tf_dataset = batch_tf_dataset(tf_dataset, 1)
#!/usr/bin/env python

import numpy as np

import rospy
import tf2_ros
from link_bot_pycommon import grid_utils
from link_bot_pycommon.grid_utils import environment_to_occupancy_msg
from moonshine.get_local_environment import get_local_env_and_origin_3d_tf
from moonshine.gpu_config import limit_gpu_mem
from mps_shape_completion_msgs.msg import OccupancyStamped

limit_gpu_mem(1.0)

res = [0.01]
full_h_rows = 100
full_w_cols = 100
full_c_channels = 50
local_h_rows = 50
local_w_cols = 50
local_c_channels = 50

center_point = np.array([[-.25, -.25, 0.25]], np.float32)

full_env = np.zeros([1, full_h_rows, full_w_cols, full_c_channels],
                    dtype=np.float32)
full_env_origin = np.array([[full_h_rows / 2, full_w_cols / 2, 0]],
                           dtype=np.float32)
# full_env[:, 0:20, 0:20, 0:20] = 1.0
# full_env[:, 50:91, 60:91, 20:50] = 1.0
from unittest import TestCase

import tensorflow as tf

from moonshine.classifier_losses_and_metrics import reconverging_weighted_binary_classification_sequence_loss_function, \
    negative_weighted_binary_classification_sequence_loss_function
from moonshine.gpu_config import limit_gpu_mem
from moonshine.tests.testing_utils import assert_close_tf

limit_gpu_mem(0.1)


class Test(TestCase):
    def test_reconverging_weighted_binary_classification_sequence_loss_function_correct(
            self):
        data = {
            'is_close': tf.constant([[1, 1, 1], [1, 1, 0], [1, 0, 1]],
                                    tf.float32)
        }
        pred = {
            'logits':
            tf.constant([[[100], [100]], [[100], [-100]], [[-100], [100]]],
                        tf.float32),
            'mask':
            tf.constant(
                [[True, True, True], [True, True, True], [True, True, True]],
                tf.bool),
        }
        expected_loss = tf.constant([0.0])
        out_loss = reconverging_weighted_binary_classification_sequence_loss_function(
            data, pred)
예제 #7
0
import tensorflow as tf
from colorama import Fore

import rospy
import state_space_dynamics
from link_bot_data.dynamics_dataset import DynamicsDatasetLoader
from link_bot_data.dataset_utils import batch_tf_dataset
from merrrt_visualization.rviz_animation_controller import RvizAnimationController
from moonshine.gpu_config import limit_gpu_mem
from moonshine.moonshine_utils import remove_batch, numpify
from my_cfm.cfm import CFM
from shape_completion_training.model.filepath_tools import load_trial
from shape_completion_training.model_runner import ModelRunner
from state_space_dynamics import train_test

limit_gpu_mem(8)


def train_main(args):
    dataset_dirs = args.dataset_dirs
    checkpoint = args.checkpoint
    epochs = args.epochs
    trial_path, params = load_trial(checkpoint.parent.absolute())
    now = str(time())
    trial_path = trial_path.parent / (trial_path.name + '-observer-' + now)
    trial_path.mkdir(parents=True)
    batch_size = params['batch_size']
    params['encoder_trainable'] = False
    params['use_observation_feature_loss'] = True
    params['use_cfm_loss'] = False
    out_hparams_filename = trial_path / 'params.json'