예제 #1
0
def main():
    argp = ArgumentParser()
    argp.add_argument("-v", "--view", action="store_true")
    argp.add_argument("-n", "--no-visgraph", action="store_true")
    argv = argp.parse_args()

    objs, start, end = parse_input()
    G = visgraph(objs, start, end)
    path = astar(G, start, end)

    if argv.view:
        import visualization
        visualization.show(objs, path, adj={} if argv.no_visgraph else G)

    print(pathlength(path))
예제 #2
0
def test_show_matrix():
    w = np.random.random((8, 10))
    title = " "
    visualization.show(w, title)
            path.reverse()
            return path

        # Add each neighbor
        for neighbor in adj[current]:
            new_pathlen = pathlen[current] + d2(current, neighbor)
            if new_pathlen < pathlen.get(neighbor, new_pathlen + 1):
                parents[neighbor] = current
                pathlen[neighbor] = new_pathlen
                heuristic[neighbor] = h = new_pathlen + d2(neighbor, end)

                if neighbor not in explored:
                    boundary.put((h, neighbor))
                explored.add(neighbor)

    print("No path found", file=stderr)
    return []


if __name__ == "__main__":
    # Sample obstacles and path
    from sample import objs, start, end
    from visgraph import visgraph

    adj = visgraph(objs, start, end)

    apath = astar(adj, start, end)

    import visualization
    visualization.show(objs, apath, adj=adj)
예제 #4
0
                points.append((6, (x, y, z + .15)))
#
x = float(sys.argv[1])
y = float(sys.argv[2])
z = float(sys.argv[3])
# example 0, 0, 0.438

# later on, need to input initial angles, or the first 6 inputs, plus the 3 inputs of the coordinates to get to
#
def handler(signum, frame):
    raise Exception("end of time")

# for i in range(0, 20):
#     for j in range(0, 20):
#         for k in range(0, 20):
#             signal.signal(signal.SIGALRM, handler)
#             signal.alarm(20)
#             try:
#                 angles = run_km(i/10, j/10, k/10)
#                 print(i/10, j/10, k/10, "OK")
#             except Exception as exc:
#                 print(i/10, j/10, k/10, "timed out")



for i in range(1, 5):
    angles = run_km(x/4*i, y/4*i, z/4*i)
    print(110+i)
    visualization.show(math.radians(angles[1]), math.radians(angles[0]), math.radians(angles[3]), math.radians(angles[2]), x, y, z)
visualization.show_plot()
예제 #5
0
def run(args):
    # Read config file
    topic_cmdline_mappings = {"tensorflow": "tf"}
    topics = ["tensorflow", "io", "training", "data"]
    cfg = configuration.get_config_from_cmdline(args, topics,
                                                topic_cmdline_mappings)
    if args.config is not None:
        with open(args.config, "r") as config_file:
            tmp_cfg = yaml.load(config_file)
            configuration.update_config_from_other(cfg, tmp_cfg)

    # Read model config
    if "model" in cfg:
        model_config = cfg["model"]
    elif args.model_config is not None:
        model_config = {}
    else:
        logger.fatal(
            "ERROR: Model configuration must be in general config file or provided in extra config file."
        )
        import sys
        sys.exit(1)

    if args.model_config is not None:
        with open(args.model_config, "r") as config_file:
            tmp_model_config = yaml.load(config_file)
            configuration.update_config_from_other(model_config,
                                                   tmp_model_config)

    cfg = AttributeDict.convert_deep(cfg)
    model_config = AttributeDict.convert_deep(model_config)

    if args.hdf5_data_stats_path is not None:
        logger.info("Loading data stats from HDF5 file")
        data_stats_dict = hdf5_utils.read_hdf5_file_to_numpy_dict(
            args.hdf5_data_stats_path)
    else:
        data_stats_dict = None
    if args.use_train_data or data_stats_dict is None:
        logger.info("Creating train dataflow")
        train_dataflow = input_pipeline.InputAndTargetDataFlow(
            cfg.data.train_path,
            cfg.data,
            shuffle_lmdb=args.shuffle,
            override_data_stats=data_stats_dict,
            verbose=True)
        data_stats_dict = train_dataflow.get_data_stats()
        if args.use_train_data:
            dataflow = train_dataflow
    if not args.use_train_data:
        assert cfg.data.test_path is not None, "Test data path has to be specified if not using train data"
        logger.info("Creating test dataflow")
        dataflow = input_pipeline.InputAndTargetDataFlow(
            cfg.data.test_path,
            cfg.data,
            shuffle_lmdb=args.shuffle,
            override_data_stats=data_stats_dict,
            verbose=True)

    logger.info("# samples in dataset: {}".format(dataflow.size()))

    logger.info("Input and target shapes:")
    dataflow.reset_state()
    first_sample = next(dataflow.get_data())
    tensor_shapes = [tensor.shape for tensor in first_sample]
    tensor_dtypes = [tensor.dtype for tensor in first_sample]
    logger.info("  Shape of input: {}".format(first_sample[0].shape))
    logger.info("  Type of input: {}".format(first_sample[0].dtype))
    logger.info("  Shape of target: {}".format(first_sample[1].shape))
    logger.info("  Type of target: {}".format(first_sample[1].dtype))

    # Create tensorflow session
    logger.info("Creating tensorflow session")
    gpu_options = tf.GPUOptions(
        per_process_gpu_memory_fraction=cfg.tensorflow.gpu_memory_fraction)
    tf_config = tf.ConfigProto(gpu_options=gpu_options)
    tf_config.intra_op_parallelism_threads = cfg.tensorflow.intra_op_parallelism
    tf_config.inter_op_parallelism_threads = cfg.tensorflow.inter_op_parallelism
    tf_config.log_device_placement = cfg.tensorflow.log_device_placement
    with tf.Session(config=tf_config) as sess:
        coord = tf.train.Coordinator()

        # def signal_handler(signal, frame):
        #     sess.close()
        #
        # batch_size = 1
        #
        # sample_stats = dataflow.get_sample_stats()
        # pipeline = input_pipeline.TFDataFlowPipeline(
        #     dataflow.get_batch_dataflow(), tensor_shapes, tensor_dtypes, sess, coord, batch_size,
        #         cfg.tensorflow, is_training=False, sample_stats=sample_stats, is_batch_dataflow=True)
        #
        # Create model
        #
        # with tf.device("/gpu:0"):
        #     with tf.variable_scope("model"):
        #             model = models.Model(model_config,
        #                                  pipeline.tensors_batch[0],
        #                                  pipeline.tensors[1],
        #                                  is_training=False,
        #                                  verbose=args.verbose)

        input_placeholder = tf.placeholder(dtype=tensor_dtypes[0],
                                           shape=(1, ) + tensor_shapes[0],
                                           name="Input")
        target_placeholder = tf.placeholder(dtype=tensor_dtypes[1],
                                            shape=(1, ) + tensor_shapes[1],
                                            name="Target")
        gpu_device_name = tf_utils.gpu_device_name()
        print(tf_utils.get_available_cpu_ids(),
              tf_utils.get_available_cpu_names())
        print(tf_utils.get_available_gpu_ids(),
              tf_utils.get_available_gpu_names())
        print(gpu_device_name)
        with tf.device(gpu_device_name):
            with tf.variable_scope("model"):
                model = models.Model(model_config,
                                     input_placeholder,
                                     target_placeholder,
                                     is_training=False,
                                     verbose=args.verbose)

    try:
        saver = tf.train.Saver(model.global_variables)

        if args.check_numerics:
            # Run numeric checks on all model checkpoints
            if args.checkpoint is None:
                ckpt = tf.train.get_checkpoint_state(args.model_dir)
                checkpoint_paths = ckpt.all_model_checkpoint_paths
            else:
                checkpoint_path = os.path.join(args.model_dir, args.checkpoint)
                checkpoint_paths = [checkpoint_path]
            for checkpoint_path in checkpoint_paths:
                if args.verbose:
                    logger.info(
                        "Checking numerics on model checkpoint {}".format(
                            checkpoint_path))
                saver.restore(sess, checkpoint_path)
                for var in model.variables:
                    if args.verbose:
                        logger.info("  Checking tensor {}".format(var.name))
                    sess.run(
                        tf.check_numerics(
                            var, "Numeric check for tensor {} failed".format(
                                var.name)))
            return

        # Restore model
        if args.checkpoint is None:
            logger.info("Reading latest checkpoint from {}".format(
                args.model_dir))
            ckpt = tf.train.get_checkpoint_state(args.model_dir)
            if ckpt is None:
                raise IOError("No previous checkpoint found at {}".format(
                    args.model_dir))
            else:
                logger.info('Found previous checkpoint... restoring')
                checkpoint_path = ckpt.model_checkpoint_path
            saver.restore(sess, checkpoint_path)
        else:
            checkpoint_path = os.path.join(args.model_dir, args.checkpoint)
        if checkpoint_path is not None:
            logger.info("Trying to restore model from checkpoint {}".format(
                checkpoint_path))
            saver.restore(sess, checkpoint_path)

        # custom_threads = []
        # pipeline.start()
        # custom_threads.extend(pipeline.threads)
        # # Start data provider threads
        # custom_threads.extend(tf.train.start_queue_runners(sess=sess))

        sess.graph.finalize()

        # Running statistics
        stats = None

        denorm_target_list = []
        denorm_output_list = []

        logger.info("Starting evaluating")
        for i, (input, target) in enumerate(dataflow.get_data()):
            if args.verbose:
                logger.info("  sample # {}".format(i))

            if stats is None:
                stats = AttributeDict()
                stats.output = math_utils.SinglePassStatistics(target.shape)
                stats.target = math_utils.SinglePassStatistics(target.shape)
                stats.diff = math_utils.SinglePassStatistics(target.shape)
                stats.squared_diff = math_utils.SinglePassStatistics(
                    target.shape)
                stats.loss = math_utils.SinglePassStatistics(target.shape)

            denorm_target = dataflow.input_and_target_retriever.denormalize_target(
                target)

            input_batch = input[np.newaxis, ...]
            target_batch = target[np.newaxis, ...]
            loss_v, loss_min_v, loss_max_v, output_batch = sess.run(
                [model.loss, model.loss_min, model.loss_max, model.output],
                feed_dict={
                    input_placeholder: input_batch,
                    target_placeholder: target_batch
                })
            output = output_batch[0, ...]
            denorm_output = dataflow.input_and_target_retriever.denormalize_target(
                output)
            diff = denorm_output - denorm_target
            squared_diff = np.square(diff)
            if args.verbose:
                logger.info("Output={}, Target={}, Diff={}, Diff^2={}".format(
                    denorm_output, denorm_target, diff, squared_diff))
                logger.info("  loss: {}, min loss: {}, max loss: {}".format(
                    loss_v, loss_min_v, loss_max_v))
            if diff > 80:
                import time
                time.sleep(5)
            # Update stats
            stats.output.add_value(denorm_output)
            stats.target.add_value(denorm_target)
            stats.diff.add_value(diff)
            stats.squared_diff.add_value(squared_diff)
            stats.loss.add_value(loss_v)

            denorm_output_list.append(denorm_output)
            denorm_target_list.append(denorm_target)

            if i % 100 == 0 and i > 0:
                logger.info("-----------")
                logger.info("Statistics after {} samples:".format(i + 1))
                for key in stats:
                    logger.info(
                        "  {:s}: mean={:.4f}, stddev={:.4f}, min={:.4f}, max={:.4f}"
                        .format(key, stats[key].mean[0], stats[key].stddev[0],
                                float(stats[key].min), float(stats[key].max)))
                    logger.info("-----------")

                import scipy.stats
                correlation, pvalue = scipy.stats.pearsonr(
                    np.array(denorm_target_list), np.array(denorm_output_list))
                logger.info("Pearson correlation: {} [p={}]".format(
                    correlation, pvalue))
                correlation, pvalue = scipy.stats.spearmanr(
                    np.array(denorm_target_list), np.array(denorm_output_list))
                logger.info("Spearman correlation: {} [p={}]".format(
                    correlation, pvalue))
                obj = {
                    "a": np.array(denorm_target_list),
                    "b": np.array(denorm_output_list)
                }
                np.savez("spearman.npz", **obj)
                hdf5_utils.write_numpy_dict_to_hdf5_file("spearman.hdf5", obj)

            if args.visualize:
                import visualization
                fig = 1
                fig = visualization.plot_grid(input[..., 2],
                                              input[..., 3],
                                              title_prefix="input",
                                              show=False,
                                              fig_offset=fig)
                # fig = visualization.plot_grid(record.in_grid_3d[..., 6], record.in_grid_3d[..., 7], title_prefix="in_grid_3d", show=False, fig_offset=fig)
                visualization.show(stop=True)

    except Exception as exc:
        logger.info("Exception in evaluation oop: {}".format(exc))
        traceback.print_exc()
        coord.request_stop(exc)
        raise exc
    finally:
        logger.info("Requesting stop")
        coord.request_stop()
        # pipeline.stop()
        # coord.join(custom_threads, stop_grace_period_secs=(2 * cfg.io.timeout))
        sess.close()
예제 #6
0
def run(args):
    timer = Timer()
    timer2 = Timer()
    wait_until_pose_set = args.wait_until_pose_set

    if args.measure_timing:
        timing_measurer = TimingMeasurer()
    else:
        timing_measurer = DummyTimingMeasurer()

    output_path = args.output_path
    if not os.path.isdir(output_path):
        os.makedirs(output_path)
    filename_template = os.path.join(output_path,
                                     file_helpers.DEFAULT_HDF5_TEMPLATE)

    records_per_file = args.records_per_file
    num_files = args.num_files
    num_records = num_files * records_per_file
    reset_interval = args.reset_interval
    reset_score_threshold = args.reset_score_threshold
    check_written_records = args.check_written_records

    dataset_kwargs = {}
    if args.compression_level >= 0:
        dataset_kwargs.update({
            "compression": "gzip",
            "compression_opts": args.compression_level
        })

    epsilon = args.epsilon

    obs_levels = [
        int(x.strip()) for x in args.obs_levels.strip("[]").split(",")
    ]
    obs_sizes = [int(x.strip()) for x in args.obs_sizes.strip("[]").split(",")]
    obs_sizes = [obs_sizes] * len(obs_levels)
    print("obs_levels={}".format(obs_levels))
    print("obs_sizes={}".format(obs_sizes))

    client_id = args.client_id
    with file(args.environment_config, "r") as fin:
        environment_config = yaml.load(fin)
    environment = env_factory.create_environment_from_config(
        environment_config, client_id)

    intrinsics = environment.get_engine().get_intrinsics()
    result = environment.get_mapper().perform_info()
    map_resolution = result.resolution
    axis_mode = environment_config["collect_data"]["axis_mode"]
    forward_factor = environment_config["collect_data"]["forward_factor"]
    downsample_to_grid = environment_config["collect_data"][
        "downsample_to_grid"]
    print("map_resolution={}".format(map_resolution))
    print("axis_mode={}".format(axis_mode))
    print("forward_factor={}".format(forward_factor))
    print("downsample_to_grid={}".format(downsample_to_grid))

    if args.manual:
        environment.get_engine().enable_input()

        import time
        environment.reset(keep_pose=True)
        while True:
            current_pose = environment.get_pose()

            rgb_image, depth_image, normal_image = environment.get_engine(
            ).get_rgb_depth_normal_images()
            rgb_image = np.asarray(rgb_image, dtype=np.float32)
            depth_image = np.asarray(depth_image, dtype=np.float32)
            normal_image = np.asarray(normal_image, dtype=np.float32)

            if args.visualize:
                if depth_image.shape[0] == 1:
                    import matplotlib.pyplot as plt
                    fig = plt.figure(1)
                    plt.clf()
                    plt.step(np.arange(depth_image.shape[1]), depth_image[0,
                                                                          ...])
                    plt.title("Depth image")
                    fig.canvas.draw()
                    plt.show(block=False)
                else:
                    import cv2
                    cv2.imshow("depth_image",
                               depth_image / np.max(depth_image))
                    cv2.waitKey(50)

                    import matplotlib.pyplot as plt
                    fig = plt.figure(1)
                    plt.clf()
                    i = depth_image.shape[0] / 2
                    print(i)
                    i = int(i)
                    plt.step(np.arange(depth_image.shape[1]), depth_image[i,
                                                                          ...])
                    plt.title("Depth image")
                    fig.canvas.draw()
                    plt.show(block=False)

            # Query octomap
            print("current_pose: {}".format(current_pose.orientation_rpy()))
            in_grid_3ds = query_octomap(environment,
                                        current_pose,
                                        obs_levels,
                                        obs_sizes,
                                        map_resolution,
                                        axis_mode=axis_mode,
                                        forward_factor=forward_factor)
            if args.visualize:
                fig = 1
                import visualization
                visualization.clear_figure(fig)
                visualization.plot_grid(in_grid_3ds[..., 0],
                                        in_grid_3ds[..., 1],
                                        title_prefix="input",
                                        show=False,
                                        fig_offset=fig)
                visualization.show(stop=True)

            result = environment.get_mapper().perform_insert_depth_map_rpy(
                current_pose.location(),
                current_pose.orientation_rpy(),
                depth_image,
                intrinsics,
                downsample_to_grid=downsample_to_grid,
                simulate=False)

            time.sleep(0.5)
        return

    # environment.get_engine().test()
    environment.get_engine().disable_input()

    next_file_num = 0
    records = []
    # environment.reset()
    environment.get_mapper().perform_load_surface_voxels()
    prev_action = np.random.randint(0, environment.get_num_of_actions())
    reset_env = False
    for i in xrange(num_records):
        if next_file_num >= num_files:
            break

        print("Record #{}".format(i))
        current_pose = environment.get_pose()

        result = environment.get_mapper().perform_info()
        score = result.score
        normalized_score = result.normalized_score
        prob_score = result.probabilistic_score
        normalized_prob_score = result.normalized_probabilistic_score
        scores = np.array(
            [score, normalized_score, prob_score, normalized_prob_score])

        print("  scores: {}".format(scores))

        if reset_env or \
                (i % reset_interval == 0) \
                or normalized_prob_score >= reset_score_threshold:
            print("Resetting environment")
            reset_env = False
            environment.reset()
            visited_poses = VisitedPoses(
                3 + 4, np.concatenate([np.ones((3, )), 10. * np.ones((4, ))]))

        visited_poses.increase_visited_pose(current_pose)

        if measure_timing:
            timer2.restart()

        # Simulate effect of actions and compute depth maps and rewards
        prob_rewards = np.zeros((environment.get_num_of_actions(), ))
        visit_counts = np.zeros((environment.get_num_of_actions(), ))
        collision_flags = np.zeros((environment.get_num_of_actions(), ))
        for action in xrange(environment.get_num_of_actions()):
            with timing_measurer.measurement("simulate_collision"):
                colliding = environment.is_action_colliding(
                    current_pose, action)
            if colliding:
                print("Action {} would collide".format(action))
                collision_flags[action] = 1
                continue
            with timing_measurer.measurement("simulate_action"):
                new_pose = environment.simulate_action_on_pose(
                    current_pose, action)
            with timing_measurer.measurement("simulate_set_pose"):
                environment.set_pose(new_pose,
                                     wait_until_set=wait_until_pose_set)
            # point_cloud = environment._get_depth_point_cloud(new_pose)
            # result = environment.get_mapper().perform_insert_point_cloud_rpy(
            #     new_pose.location(), new_pose.orientation_rpy(), point_cloud, simulate=True)
            with timing_measurer.measurement("simulate_get_depth_image"):
                depth_image = environment.get_engine().get_depth_image()
            with timing_measurer.measurement("simulate_insert_depth_image"):
                result = environment.get_mapper().perform_insert_depth_map_rpy(
                    new_pose.location(),
                    new_pose.orientation_rpy(),
                    depth_image,
                    intrinsics,
                    downsample_to_grid=downsample_to_grid,
                    simulate=True)
            prob_reward = result.probabilistic_reward

            prob_rewards[action] = prob_reward
            assert (prob_reward >= 0)

            visit_count = visited_poses.get_visit_count(new_pose)
            visit_counts[action] = visit_count

        with timing_measurer.measurement("select_action"):
            print("Possible rewards: {}".format(prob_rewards))

            visit_counts = np.array(visit_counts, dtype=np.float32)
            visit_weights = 1. / (visit_counts + 1)
            adjusted_rewards = prob_rewards * visit_weights
            adjusted_rewards[collision_flags > 0] = -np.finfo(np.float32).max
            print("Adjusted expected rewards:", adjusted_rewards)

            if np.all(collision_flags[action] > 0):
                reset_env = True
                continue

            # Perform epsilon-greedy action.
            if np.random.rand() < epsilon:
                valid_action_indices = np.arange(
                    environment.get_num_of_actions())
                valid_action_indices = valid_action_indices[collision_flags ==
                                                            0]
                assert (len(valid_action_indices) > 0)
                action = np.random.choice(valid_action_indices)
            else:
                max_prob_reward = np.max(adjusted_rewards)
                actions = np.arange(environment.get_num_of_actions())[
                    adjusted_rewards == max_prob_reward]
                if len(actions) == 1:
                    action = actions[0]
                else:
                    # If there is not a single best action, redo the previous one if it is one of the best.
                    if prev_action in actions:
                        action = prev_action
                    else:
                        action = np.random.choice(actions)
            # print("Selected action: {}".format(action))

            if np.all(collision_flags[action] > 0):
                reset_env = True
                continue

        with timing_measurer.measurement("simulate_action"):
            new_pose = environment.simulate_action_on_pose(
                current_pose, action)
        with timing_measurer.measurement("set_pose"):
            environment.set_pose(new_pose, wait_until_set=wait_until_pose_set)

        # Get current scores
        with timing_measurer.measurement("get_info"):
            result = environment.get_mapper().perform_info()
            scores = np.asarray([
                result.score, result.normalized_score,
                result.probabilistic_score,
                result.normalized_probabilistic_score
            ],
                                dtype=np.float32)

        with timing_measurer.measurement("simulate_rgb_depth_normal_images"):
            rgb_image, depth_image, normal_image = environment.get_engine(
            ).get_rgb_depth_normal_images()
            rgb_image = np.asarray(rgb_image, dtype=np.float32)
            depth_image = np.asarray(depth_image, dtype=np.float32)
            normal_image = np.asarray(normal_image, dtype=np.float32)

        if args.visualize:
            if depth_image.shape[0] == 1:
                import matplotlib.pyplot as plt
                fig = plt.figure(1)
                plt.clf()
                # plt.plot(np.arange(depth_image.shape[1]), depth_image[0, ...])
                plt.step(np.arange(depth_image.shape[1]), depth_image[0, ...])
                plt.title("Depth image")
                fig.canvas.draw()
                plt.show(block=False)
            else:
                import cv2
                cv2.imshow("depth_image", depth_image / np.max(depth_image))
                cv2.waitKey(50)

        # Query octomap
        with timing_measurer.measurement("query_octomap"):
            in_grid_3ds = query_octomap(environment,
                                        new_pose,
                                        obs_levels,
                                        obs_sizes,
                                        map_resolution,
                                        axis_mode=axis_mode,
                                        forward_factor=forward_factor)
        if args.visualize:
            fig = 1
            import visualization
            fig = visualization.plot_grid(in_grid_3ds[..., 2],
                                          in_grid_3ds[..., 3],
                                          title_prefix="input_1",
                                          show=False,
                                          fig_offset=fig)
            visualization.clear_figure(fig)
            visualization.plot_grid(in_grid_3ds[..., 4],
                                    in_grid_3ds[..., 5],
                                    title_prefix="input_2",
                                    show=False,
                                    fig_offset=fig)
            visualization.show(stop=True)

        # sim_result = environment.get_mapper().perform_insert_depth_map_rpy(
        #     new_pose.location(), new_pose.orientation_rpy(),
        #     depth_image, intrinsics, downsample_to_grid=downsample_to_grid, simulate=True)
        with timing_measurer.measurement("insert_depth_image"):
            result = environment.get_mapper().perform_insert_depth_map_rpy(
                new_pose.location(),
                new_pose.orientation_rpy(),
                depth_image,
                intrinsics,
                downsample_to_grid=downsample_to_grid,
                simulate=False)
        # print("result diff:", sim_result.probabilistic_reward - result.probabilistic_reward)
        # assert(sim_result.probabilistic_reward - result.probabilistic_reward == 0)

        # Query octomap
        with timing_measurer.measurement("query_octomap"):
            out_grid_3ds = query_octomap(environment,
                                         new_pose,
                                         obs_levels,
                                         obs_sizes,
                                         map_resolution,
                                         axis_mode=axis_mode,
                                         forward_factor=forward_factor)

        print("Selected action={}, probabilistic reward={}".format(
            action, result.probabilistic_reward))
        print("Grid differences:", [
            np.sum(out_grid_3ds[..., i] - in_grid_3ds[..., i])
            for i in xrange(in_grid_3ds.shape[-1])
        ])

        # Keep record for saving later
        rewards = np.asarray([
            result.reward, result.normalized_reward,
            result.probabilistic_reward, result.normalized_probabilistic_reward
        ],
                             dtype=np.float32)
        # scores = np.array([result.score, result.normalized_score,
        #                    result.probabilistic_score, result.normalized_probabilistic_score])
        record = data_record.RecordV4(intrinsics, map_resolution, axis_mode,
                                      forward_factor, obs_levels, in_grid_3ds,
                                      out_grid_3ds, rewards, scores, rgb_image,
                                      depth_image, normal_image)

        prev_action = action

        records.append(record)

        timing_measurer.print_times()

        if len(records) % records_per_file == 0:
            # filename, next_file_num = get_next_output_tf_filename(next_file_num)
            filename, next_file_num = file_helpers.get_next_output_hdf5_filename(
                next_file_num, template=filename_template)
            print("Writing records to file {}".format(filename))
            # write_tf_records(filename, records)
            if not args.dry_run:
                data_record.write_hdf5_records_v4(
                    filename, records, dataset_kwargs=dataset_kwargs)
                if check_written_records:
                    print("Reading records from file {}".format(filename))
                    records_read = data_record.read_hdf5_records_v4_as_list(
                        filename)
                    for record, record_read in zip(records, records_read):
                        assert (np.all(
                            record.intrinsics == record_read.intrinsics))
                        assert (record.map_resolution ==
                                record_read.map_resolution)
                        assert (record.axis_mode == record_read.axis_mode)
                        assert (record.forward_factor ==
                                record_read.forward_factor)
                        assert (np.all(
                            record.obs_levels == record_read.obs_levels))
                        for in_grid_3d, in_grid_3d_read in zip(
                                record.in_grid_3d, record_read.in_grid_3d):
                            assert (np.all(in_grid_3d == in_grid_3d_read))
                        for out_grid_3d, out_grid_3d_read in zip(
                                record.out_grid_3d, record_read.out_grid_3d):
                            assert (np.all(out_grid_3d == out_grid_3d_read))
                        assert (np.all(record.rewards == record_read.rewards))
                        assert (np.all(record.scores == record_read.scores))
                        assert (np.all(
                            record.rgb_image == record_read.rgb_image))
                        assert (np.all(
                            record.normal_image == record_read.normal_image))
                        assert (np.all(
                            record.depth_image == record_read.depth_image))
            records = []
예제 #7
0
def main(num):
    generate_excel(num)
    fig = visualization.Figure(num)
    visualization.show()
예제 #8
0
def run(args):
    wait_until_pose_set = args.wait_until_pose_set
    measure_timing = args.measure_timing
    if measure_timing:
        time_meter = TimeMeter()
    else:
        time_meter = DummyTimeMeter()

    output_path = args.output_path
    if not os.path.isdir(output_path):
        os.makedirs(output_path)
    filename_template = os.path.join(output_path, file_helpers.DEFAULT_HDF5_TEMPLATE)

    samples_per_file = args.samples_per_file
    num_files = args.num_files
    num_samples = num_files * samples_per_file
    reset_interval = args.reset_interval
    reset_score_threshold = args.reset_score_threshold
    check_written_samples = args.check_written_samples

    dataset_kwargs = {}
    if args.compression:
        dataset_kwargs.update({"compression": args.compression})
        if args.compression_level >= 0:
            dataset_kwargs.update({"compression_opts": args.compression_level})

    epsilon = args.epsilon

    obs_levels = [int(x.strip()) for x in args.obs_levels.strip("[]").split(",")]
    obs_sizes = [int(x.strip()) for x in args.obs_sizes.strip("[]").split(",")]
    print("obs_levels={}".format(obs_levels))
    print("obs_sizes={}".format(obs_sizes))

    client_id = args.client_id
    with open(args.environment_config, "r") as fin:
        environment_config = yaml.load(fin)
    environment = env_factory.create_environment_from_config(environment_config, client_id)

    intrinsics = environment.get_engine().get_intrinsics()
    result = environment.get_mapper().perform_info()
    map_resolution = result.resolution
    axis_mode = environment_config["collect_data"]["axis_mode"]
    forward_factor = environment_config["collect_data"]["forward_factor"]
    downsample_to_grid = environment_config["collect_data"]["downsample_to_grid"]
    print("map_resolution={}".format(map_resolution))
    print("axis_mode={}".format(axis_mode))
    print("forward_factor={}".format(forward_factor))
    print("downsample_to_grid={}".format(downsample_to_grid))

    if args.manual:
        environment.get_engine().enable_input()

        import time
        environment.reset(keep_pose=True)
        while True:
            current_pose = environment.get_pose()

            rgb_image, depth_image, normal_image = environment.get_engine().get_rgb_depth_normal_images(use_trackball=True)
            rgb_image = np.asarray(rgb_image, dtype=np.float32)
            depth_image = np.asarray(depth_image, dtype=np.float32)
            normal_image = np.asarray(normal_image, dtype=np.float32)

            if args.visualize:
                if depth_image.shape[0] == 1:
                    import matplotlib.pyplot as plt
                    fig = plt.figure(1)
                    plt.clf()
                    plt.step(np.arange(depth_image.shape[1]), depth_image[0, ...])
                    plt.title("Depth image")
                    fig.canvas.draw()
                    plt.show(block=False)
                else:
                    import cv2
                    cv2.imshow("depth_image", depth_image / np.max(depth_image))
                    cv2.waitKey(50)

                    import matplotlib.pyplot as plt
                    fig = plt.figure(1)
                    plt.clf()
                    i = depth_image.shape[0] / 2
                    print(i)
                    i = int(i)
                    plt.step(np.arange(depth_image.shape[1]), depth_image[i, ...])
                    plt.title("Depth image")
                    fig.canvas.draw()
                    plt.show(block=False)

            # Query octomap
            print("current_pose: {}".format(current_pose.orientation_rpy()))
            in_grid_3ds = query_octomap(environment, current_pose, obs_levels, obs_sizes,
                                        map_resolution, axis_mode=axis_mode, forward_factor=forward_factor)
            in_grid_3ds = np.asarray(in_grid_3ds, dtype=np.float32)
            if args.visualize:
                fig = 1
                import visualization
                visualization.clear_figure(fig)
                visualization.plot_grid(in_grid_3ds[..., 0], in_grid_3ds[..., 1], title_prefix="input", show=False, fig_offset=fig)
                visualization.show(stop=True)

            environment.get_mapper().perform_insert_depth_map_rpy(
                current_pose.location(), current_pose.orientation_rpy(),
                depth_image, intrinsics, downsample_to_grid=downsample_to_grid, simulate=False)

            time.sleep(0.5)
        return

    # environment.get_engine().test()
    environment.get_engine().disable_input()

    def read_samples_from_file(filename):
        stacked_samples, attr_dict = hdf5_utils.read_hdf5_file_to_numpy_dict(filename, read_attributes=True)
        samples = []
        for key in stacked_samples:
            for i in range(len(stacked_samples[key])):
                if len(samples) <= i:
                    samples.append({})
                samples[i][key] = stacked_samples[key][i, ...]
        return samples, attr_dict

    def write_samples_to_next_file(samples, attr_dict, next_file_num):
        filename, next_file_num = file_helpers.get_next_output_hdf5_filename(
            next_file_num, template=filename_template)
        print("Writing samples to file {}".format(filename))
        if not args.dry_run:
            data_record.write_samples_to_hdf5_file(filename, samples, attr_dict, **dataset_kwargs)
            if check_written_samples:
                print("Reading samples from file {}".format(filename))
                samples_read, attr_dict_read = read_samples_from_file(filename)
                assert(len(samples) == len(samples_read))
                for i in range(len(samples)):
                    for key in samples[i]:
                        assert(np.all(samples[i][key] == samples_read[i][key]))
                for key in attr_dict:
                    assert(np.all(attr_dict[key] == attr_dict_read[key]))
        return next_file_num

    next_file_num = 0
    samples = []
    attr_dict = None
    prev_action = np.random.randint(0, environment.get_num_of_actions())
    normalized_prob_score = 0
    # Make sure we reset the environment at the start
    reset_env = True
    total_steps = -1
    while True:
        total_steps += 1
        print(next_file_num, num_files, total_steps, num_samples)
        if next_file_num >= num_files and total_steps >= num_samples:
            break

        i = total_steps

        # Get current normalized prob score to check termination
        tmp_result = environment.get_mapper().perform_info()
        normalized_prob_score = tmp_result.normalized_probabilistic_score

        if not args.keep_episodes_together and len(samples) >= samples_per_file:
            next_file_num = write_samples_to_next_file(samples, attr_dict, next_file_num)
            samples = []
            attr_dict = None

        if reset_env or \
                (i % reset_interval == 0) \
                or normalized_prob_score >= reset_score_threshold:
            print("Resetting environment")
            if len(samples) >= samples_per_file:
                print("Writing {} recorded samples to disk".format(len(samples)))
                next_file_num = write_samples_to_next_file(samples, attr_dict, next_file_num)
                samples = []
                attr_dict = None
            sample_id = 0
            reset_env = False
            environment.reset()
            visited_poses = VisitedPoses(3 + 4, np.concatenate([np.ones((3,)), 10. * np.ones((4,))]))
            episode_uuid = uuid.uuid1()
            episode_id = np.fromstring(episode_uuid.bytes, dtype=np.uint8)

            if args.collect_center_grid_of_previous_pose:
                center_grid_of_previous_pose = None

        print("Total step #{}, episode step #{}, # of samples {}".format(i, sample_id, len(samples)))
        current_pose = environment.get_pose()

        # Get current scores
        with time_meter.measure("get_info"):
            result = environment.get_mapper().perform_info()
            scores = np.asarray([result.score, result.normalized_score,
                               result.probabilistic_score, result.normalized_probabilistic_score], dtype=np.float32)

        print("  scores: {}".format(scores))

        visited_poses.increase_visited_pose(current_pose)

        # Simulate effect of actions and compute depth maps and rewards
        prob_rewards = np.zeros((environment.get_num_of_actions(),))
        visit_counts = np.zeros((environment.get_num_of_actions(),))
        collision_flags = np.zeros((environment.get_num_of_actions(),))
        if not args.collect_only_selected_action:
            in_grid_3ds_array = [None] * environment.get_num_of_actions()
            out_grid_3ds_array = [None] * environment.get_num_of_actions()
            result_array = [None] * environment.get_num_of_actions()
            rgb_images = [None] * environment.get_num_of_actions()
            depth_images = [None] * environment.get_num_of_actions()
            normal_images = [None] * environment.get_num_of_actions()
        for action in range(environment.get_num_of_actions()):
            with time_meter.measure("simulate_collision"):
                colliding = environment.is_action_colliding(current_pose, action)
            if colliding:
                print("Action {} would collide".format(action))
                collision_flags[action] = 1
                continue
            new_pose = environment.simulate_action_on_pose(current_pose, action)

            if not args.collect_only_selected_action:
                with time_meter.measure("simulate_query_octomap"):
                    in_grid_3ds_array[action] = query_octomap(
                        environment, new_pose, obs_levels, obs_sizes,
                        map_resolution, axis_mode=axis_mode, forward_factor=forward_factor)
                    in_grid_3ds_array[action] = np.asarray(in_grid_3ds_array[action], dtype=np.float32)

            with time_meter.measure("simulate_set_pose"):
                environment.set_pose(new_pose, wait_until_set=wait_until_pose_set, broadcast=False)
            new_pose_retrieved = environment.get_pose()
            assert np.allclose(new_pose_retrieved.location(), new_pose.location())
            assert np.allclose(new_pose_retrieved.orientation_rpy(), new_pose.orientation_rpy())

            # point_cloud = environment._get_depth_point_cloud(new_pose)
            # result = environment.get_mapper().perform_insert_point_cloud_rpy(
            #     new_pose.location(), new_pose.orientation_rpy(), point_cloud, simulate=True)
            with time_meter.measure("simulate_image_retrieval"):
                if args.collect_only_depth_image or args.collect_only_selected_action or args.collect_no_images:
                    depth_image = environment.get_engine().get_depth_image()
                    depth_image = np.asarray(depth_image, dtype=np.float32)

                else:
                    rgb_image, depth_image, normal_image = environment.get_engine().get_rgb_depth_normal_images()
                    rgb_image = np.asarray(rgb_image, dtype=np.float32)
                    depth_image = np.asarray(depth_image, dtype=np.float32)
                    normal_image = np.asarray(normal_image, dtype=np.float32)
                if not args.collect_only_selected_action:
                    depth_images[action] = depth_image
                    if not args.collect_only_depth_image:
                        rgb_images[action] = rgb_image
                        normal_images[action] = normal_image

            simulate = True
            if not args.collect_only_selected_action and args.collect_output_grid:
                simulate = False
                with time_meter.measure("push_octomap"):
                    environment.get_mapper().perform_push_octomap()
            with time_meter.measure("simulate_insert_depth_image"):
                result = environment.get_mapper().perform_insert_depth_map_rpy(
                    new_pose.location(), new_pose.orientation_rpy(),
                    depth_image, intrinsics, downsample_to_grid=downsample_to_grid, simulate=simulate)

            if not args.collect_only_selected_action:
                result_array[action] = result
            prob_reward = result.probabilistic_reward

            prob_rewards[action] = prob_reward
            assert(prob_reward >= 0)

            visit_count = visited_poses.get_visit_count(new_pose)
            visit_counts[action] = visit_count

            if not args.collect_only_selected_action and args.collect_output_grid:
                with time_meter.measure("simulate_query_octomap"):
                    out_grid_3ds_array[action] = query_octomap(
                        environment, new_pose, obs_levels, obs_sizes,
                        map_resolution, axis_mode=axis_mode, forward_factor=forward_factor)
                    out_grid_3ds_array[action] = np.asarray(out_grid_3ds_array[action], dtype=np.float32)
                with time_meter.measure("pop_octomap"):
                    environment.get_mapper().perform_pop_octomap()

        print("Possible rewards: {}".format(prob_rewards))

        with time_meter.measure("select_action"):
            visit_counts = np.array(visit_counts, dtype=np.float32)
            visit_weights = 1. / (visit_counts + 1)
            adjusted_rewards = prob_rewards * visit_weights
            adjusted_rewards[collision_flags > 0] = - np.finfo(np.float32).max
            print("Adjusted expected rewards:", adjusted_rewards)

            if np.all(collision_flags[action] > 0):
                reset_env = True
                continue

            # Perform epsilon-greedy action.
            if np.random.rand() < epsilon:
                valid_action_indices = np.arange(environment.get_num_of_actions())
                valid_action_indices = valid_action_indices[collision_flags == 0]
                assert(len(valid_action_indices) > 0)
                selected_action = np.random.choice(valid_action_indices)
            else:
                max_prob_reward = np.max(adjusted_rewards)
                actions = np.arange(environment.get_num_of_actions())[adjusted_rewards == max_prob_reward]
                if len(actions) == 1:
                    selected_action = actions[0]
                else:
                    # If there is not a single best action, redo the previous one if it is one of the best.
                    if prev_action in actions:
                        selected_action = prev_action
                    else:
                        selected_action = np.random.choice(actions)
            # print("Selected action: {}".format(action))

            if np.all(collision_flags[selected_action] > 0):
                reset_env = True
                continue

        new_pose = environment.simulate_action_on_pose(current_pose, selected_action)
        with time_meter.measure("set_pose"):
            environment.set_pose(new_pose, wait_until_set=wait_until_pose_set)

        with time_meter.measure("image_retrieval"):
            if args.collect_only_depth_image:
                depth_image = environment.get_engine().get_depth_image()
                depth_image = np.asarray(depth_image, dtype=np.float32)
            else:
                rgb_image, depth_image, normal_image = environment.get_engine().get_rgb_depth_normal_images()
                rgb_image = np.asarray(rgb_image, dtype=np.float32)
                depth_image = np.asarray(depth_image, dtype=np.float32)
                normal_image = np.asarray(normal_image, dtype=np.float32)

        if args.visualize:
            if depth_image.shape[0] == 1:
                import matplotlib.pyplot as plt
                fig = plt.figure(1)
                plt.clf()
                # plt.plot(np.arange(depth_image.shape[1]), depth_image[0, ...])
                plt.step(np.arange(depth_image.shape[1]), depth_image[0, ...])
                plt.title("Depth image")
                fig.canvas.draw()
                plt.show(block=False)
            else:
                import cv2
                cv2.imshow("depth_image", depth_image / np.max(depth_image))
                cv2.waitKey(50)

        # Query octomap
        if args.collect_only_selected_action:
            with time_meter.measure("query_octomap"):
                in_grid_3ds = query_octomap(environment, new_pose, obs_levels, obs_sizes,
                                            map_resolution, axis_mode=axis_mode, forward_factor=forward_factor)
                in_grid_3ds = np.asarray(in_grid_3ds, dtype=np.float32)

        if args.collect_center_grid_of_previous_pose:
            with time_meter.measure("query_octomap"):
                center_in_grid_3ds = query_octomap(environment, current_pose, obs_levels, obs_sizes,
                                                   map_resolution, axis_mode=axis_mode, forward_factor=0.0)
                center_in_grid_3ds = np.asarray(center_in_grid_3ds, dtype=np.float32)

        if args.visualize:
            fig = 1
            import visualization
            fig = visualization.plot_grid(in_grid_3ds[..., 2], in_grid_3ds[..., 3], title_prefix="input_1", show=False, fig_offset=fig)
            visualization.clear_figure(fig)
            visualization.plot_grid(in_grid_3ds[..., 4], in_grid_3ds[..., 5], title_prefix="input_2", show=False, fig_offset=fig)
            visualization.show(stop=True)

        # sim_result = environment.get_mapper().perform_insert_depth_map_rpy(
        #     new_pose.location(), new_pose.orientation_rpy(),
        #     depth_image, intrinsics, downsample_to_grid=downsample_to_grid, simulate=True)
        with time_meter.measure("insert_depth_image"):
            result = environment.get_mapper().perform_insert_depth_map_rpy(
                new_pose.location(), new_pose.orientation_rpy(),
                depth_image, intrinsics, downsample_to_grid=downsample_to_grid, simulate=False)
        # print("result diff:", sim_result.probabilistic_reward - result.probabilistic_reward)
        # assert(sim_result.probabilistic_reward - result.probabilistic_reward == 0)

        print("Selected action={}, probabilistic reward={}".format(selected_action, result.probabilistic_reward))
        if args.collect_output_grid:
            print("Grid differences:", [np.sum(out_grid_3ds[..., i] - in_grid_3ds[..., i]) for i in range(in_grid_3ds.shape[-1])])

        if attr_dict is None:
            attr_dict = {}
            attr_dict["intrinsics"] = intrinsics
            attr_dict["map_resolution"] = map_resolution
            attr_dict["axis_mode"] = axis_mode
            attr_dict["forward_factor"] = forward_factor
            attr_dict["obs_levels"] = obs_levels

        if args.collect_center_grid_of_previous_pose and center_grid_of_previous_pose is None:
            skip_sample = True
        else:
            skip_sample = False

        # Create and keep samples for saving later
        if (not skip_sample) and args.collect_only_selected_action:
            if args.collect_output_grid:
                # Query octomap
                with time_meter.measure("query_octomap"):
                    out_grid_3ds = query_octomap(environment, new_pose, obs_levels, obs_sizes,
                                                 map_resolution, axis_mode=axis_mode, forward_factor=forward_factor)
                    out_grid_3ds = np.asarray(out_grid_3ds, dtype=np.float32)

            rewards = np.asarray([result.reward, result.normalized_reward,
                                  result.probabilistic_reward, result.normalized_probabilistic_reward], dtype=np.float32)
            new_scores = np.asarray([result.score, result.normalized_score,
                                     result.probabilistic_score, result.normalized_probabilistic_score], dtype=np.float32)

            sample = {"in_grid_3ds": in_grid_3ds,
                      "rewards": rewards,
                      "scores": scores,
                      "new_scores": new_scores,
                      "episode_id": episode_id,
                      "sample_id": np.array(sample_id, dtype=np.int32),
                      "selected_action": np.array(True, dtype=np.int8),
                      "action_index": np.array(selected_action, dtype=np.int8)}
            if not args.collect_no_images:
                sample["depth_image"] = depth_image
            if args.collect_output_grid:
                sample["out_grid_3ds"] = out_grid_3ds
            if not args.collect_only_depth_image:
                sample["rgb_image"] = rgb_image
                sample["normal_image"] = normal_image
            if args.collect_center_grid_of_previous_pose:
                sample["center_in_grid_3ds"] = center_in_grid_3ds
            samples.append(sample)
        elif not skip_sample:
            for action in range(environment.get_num_of_actions()):
                result = result_array[action]
                if result is None:
                    continue
                rewards = np.asarray([result.reward, result.normalized_reward,
                                    result.probabilistic_reward, result.normalized_probabilistic_reward], dtype=np.float32)
                new_scores = np.asarray([result.score, result.normalized_score,
                                   result.probabilistic_score, result.normalized_probabilistic_score], dtype=np.float32)
                in_grid_3ds = in_grid_3ds_array[action]
                assert(in_grid_3ds is not None)
                sample = {"in_grid_3ds": in_grid_3ds,
                          "rewards": rewards,
                          "scores": scores,
                          "new_scores": new_scores,
                          "episode_id": episode_id,
                          "sample_id": np.array(sample_id, dtype=np.int32),
                          "selected_action": np.array(action == selected_action, dtype=np.int8),
                          "action_index": np.array(selected_action, dtype=np.int8)}
                if not args.collect_no_images:
                    assert(depth_images[action] is not None)
                    sample["depth_image"] = depth_images[action]
                if args.collect_output_grid:
                    assert(out_grid_3ds_array[action] is not None)
                    sample["out_grid_3ds"] = out_grid_3ds_array[action]
                if not args.collect_only_depth_image:
                    assert(rgb_images[action] is not None)
                    assert(normal_images[action] is not None)
                    sample["rgb_image"] = rgb_image
                    sample["normal_image"] = normal_image
                if args.collect_center_grid_of_previous_pose:
                    sample["center_in_grid_3ds"] = center_in_grid_3ds
                samples.append(sample)

        sample_id += 1
        prev_action = selected_action

        time_meter.print_times()

    if len(samples) > 0:
        write_samples_to_next_file(samples, attr_dict, next_file_num)
예제 #9
0
def train(unit, dropout, learning_rate, num_epoch, tuning=True):

    num_epoch = int(num_epoch)
    log_dir = './results/'

    # Load dataset
    path = os.getcwd()
    train_file = path + '/hapt_tfrecords/hapt_train.tfrecords'
    val_file = path + '/hapt_tfrecords/hapt_val.tfrecords'
    test_file = path + '/hapt_tfrecords/hapt_test.tfrecords'

    train_dataset = make_dataset(train_file, overlap=True)
    val_dataset = make_dataset(val_file)
    test_dataset = make_dataset(test_file)
    class_names = [
        'WALKING', 'WALKING_UPSTAIRS', 'WALKING_DOWNSTAIRS', 'SITTING',
        'STANDING', 'LAYING', 'STAND_TO_SIT', 'SIT_TO_STAND', ' SIT_TO_LIE',
        'LIE_TO_SIT', 'STAND_TO_LIE', 'LIE_TO_STAND'
    ]

    # set a random batch number to visualize the result in test dataset.
    len_test = len(list(test_dataset))
    show_index = random.randint(10, len_test)

    # Model
    model = Lstm(unit=unit, drop_out=dropout)

    # set optimizer
    optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)

    # set Metrics
    train_accuracy = tf.keras.metrics.CategoricalAccuracy()
    val_accuracy = tf.keras.metrics.Accuracy()
    val_con_mat = ConfusionMatrix(num_class=13)
    test_accuracy = tf.keras.metrics.Accuracy()
    test_con_mat = ConfusionMatrix(num_class=13)

    # Save Checkpoint
    if not tuning:
        ckpt = tf.train.Checkpoint(step=tf.Variable(1),
                                   optimizer=optimizer,
                                   net=model)
        manager = tf.train.CheckpointManager(ckpt, './tf_ckpts', max_to_keep=5)

    # Set up summary writers
    current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
    tb_log_dir = log_dir + current_time
    summary_writer = tf.summary.create_file_writer(tb_log_dir)

    # Restore Checkpoint
    if not tuning:
        ckpt.restore(manager.latest_checkpoint)
        if manager.latest_checkpoint:
            logging.info('Restored from {}'.format(manager.latest_checkpoint))
        else:
            logging.info('Initializing from scratch.')

    # calculate losses, update network and metrics.
    @tf.function
    def train_step(inputs, label):
        # Optimize the model
        loss_value, grads = grad(model, inputs, label)
        optimizer.apply_gradients(zip(grads, model.trainable_variables))
        train_pred = model(inputs, training=True)
        train_pred = tf.squeeze(train_pred)
        label = tf.squeeze(label)
        train_accuracy.update_state(label,
                                    train_pred,
                                    sample_weight=sample_weight)

    for epoch in range(num_epoch):
        begin = time()

        # Training loop
        for exp_num, index, label, train_inputs in train_dataset:
            train_inputs = tf.expand_dims(train_inputs, axis=0)
            # One-hot coding is applied.
            label = label - 1
            sample_weight = tf.cast(tf.math.not_equal(label, -1), tf.int64)
            label = tf.expand_dims(tf.one_hot(label, depth=12), axis=0)
            train_step(train_inputs, label)

        for exp_num, index, label, val_inputs in val_dataset:
            val_inputs = tf.expand_dims(val_inputs, axis=0)
            sample_weight = tf.cast(
                tf.math.not_equal(label, tf.constant(0, dtype=tf.int64)),
                tf.int64)
            val_pred = model(val_inputs, training=False)
            val_pred = tf.squeeze(val_pred)
            val_pred = tf.cast(tf.argmax(val_pred, axis=1), dtype=tf.int64) + 1
            val_con_mat.update_state(label,
                                     val_pred,
                                     sample_weight=sample_weight)
            val_accuracy.update_state(label,
                                      val_pred,
                                      sample_weight=sample_weight)
        # Log the confusion matrix as an image summary
        cm_valid = val_con_mat.result()
        figure = plot_confusion_matrix(cm_valid, class_names=class_names)
        cm_valid_image = plot_to_image(figure)

        with summary_writer.as_default():
            tf.summary.scalar('Train Accuracy',
                              train_accuracy.result(),
                              step=epoch)
            tf.summary.scalar('Valid Accuracy',
                              val_accuracy.result(),
                              step=epoch)
            tf.summary.image('Valid ConfusionMatrix',
                             cm_valid_image,
                             step=epoch)
        end = time()
        logging.info(
            "Epoch {:d} Training Accuracy: {:.3%} Validation Accuracy: {:.3%} Time:{:.5}s"
            .format(epoch + 1, train_accuracy.result(), val_accuracy.result(),
                    (end - begin)))

        train_accuracy.reset_states()
        val_accuracy.reset_states()
        val_con_mat.reset_states()

        if not tuning:
            if int(ckpt.step) % 5 == 0:
                save_path = manager.save()
                logging.info('Saved checkpoint for epoch {}: {}'.format(
                    int(ckpt.step), save_path))
            ckpt.step.assign_add(1)

    i = 0
    for exp_num, index, label, test_inputs in test_dataset:
        test_inputs = tf.expand_dims(test_inputs, axis=0)
        sample_weight = tf.cast(
            tf.math.not_equal(label, tf.constant(0, dtype=tf.int64)), tf.int64)
        test_pred = model(test_inputs, training=False)
        test_pred = tf.cast(tf.argmax(test_pred, axis=2), dtype=tf.int64)
        test_pred = tf.squeeze(test_pred, axis=0) + 1
        test_accuracy.update_state(label,
                                   test_pred,
                                   sample_weight=sample_weight)
        test_con_mat.update_state(label,
                                  test_pred,
                                  sample_weight=sample_weight)
        i += 1

        # visualize the result
        if i == show_index:
            if not tuning:
                visualization_path = path + '/visualization/'
                image_path = visualization_path + current_time + '.png'
                inputs = tf.squeeze(test_inputs)
                show(index, label, inputs, test_pred, image_path)

    # Log the confusion matrix as an image summary
    cm_test = test_con_mat.result()
    figure = plot_confusion_matrix(cm_test, class_names=class_names)
    cm_test_image = plot_to_image(figure)

    with summary_writer.as_default():
        tf.summary.scalar('Test Accuracy', test_accuracy.result(), step=epoch)
        tf.summary.image('Test ConfusionMatrix', cm_test_image, step=epoch)

    logging.info("Trained finished. Final Accuracy in test set: {:.3%}".format(
        test_accuracy.result()))

    return test_accuracy.result()