def execute_motion(self, joint_configs):
     print('\nSpline path optimized.\n')
     if self._interactive:
         input('\nPress enter in the terminal to execute motion.\n')
     print('\nExecuting motion...\n')
     if self.with_pyrep:
         for i, config in enumerate(joint_configs):
             [
                 j.set_joint_position(p, False)
                 for j, p in zip(self._robot.joints,
                                 ivy.to_numpy(config).tolist())
             ]
             time.sleep(0.05)
     elif self._interactive:
         this_dir = os.path.dirname(os.path.realpath(__file__))
         for i in range(11):
             plt.ion()
             plt.imshow(
                 mpimg.imread(
                     os.path.join(this_dir, 'msp_no_sim',
                                  'motion_{}.png'.format(i))))
             plt.show()
             plt.pause(0.1)
             plt.ioff()
Exemple #2
0
def test_execute_with_gradients(func_n_xs_n_ty_n_te_n_tg, dtype_str, tensor_fn,
                                dev_str, call):
    # smoke test
    func, xs_raw, true_y, true_extra, true_dydxs = func_n_xs_n_ty_n_te_n_tg
    xs = xs_raw.map(lambda x, _: ivy.variable(ivy.array(x)))
    if true_extra is None:
        y, dydxs = ivy.execute_with_gradients(func, xs)
        extra_out = None
    else:
        y, dydxs, extra_out = ivy.execute_with_gradients(func, xs)
    # type test
    assert ivy.is_array(y) or isinstance(y, Number)
    if call is not helpers.np_call:
        assert isinstance(dydxs, dict)
    # cardinality test
    if call is not helpers.mx_call:
        # mxnet cannot slice array down to shape (), it remains fixed at size (1,)
        assert y.shape == true_y.shape
    if call is not helpers.np_call:
        for (g, g_true) in zip(dydxs.values(), true_dydxs.values()):
            assert g.shape == g_true.shape
    # value test
    xs = xs_raw.map(lambda x, _: ivy.variable(ivy.array(x)))
    if true_extra is None:
        y, dydxs = call(ivy.execute_with_gradients, func, xs)
    else:
        y, dydxs, extra_out = call(ivy.execute_with_gradients, func, xs)
    assert np.allclose(y, true_y)
    if true_extra:
        assert np.allclose(extra_out, true_extra)
    if call is helpers.np_call:
        # numpy doesn't support autodiff
        assert dydxs is None
    else:
        for (g, g_true) in zip(dydxs.values(), true_dydxs.values()):
            assert np.allclose(ivy.to_numpy(g), g_true)
Exemple #3
0
    def test_single(self, dev_str, f, call, array_shape, num_processes):

        if call is helpers.mx_call and num_processes == 2:
            pytest.skip()

        self._init(array_shape, num_processes)

        assert list(self._dataset[0].x.shape) == array_shape
        assert list(self._dataset[4].x.shape) == array_shape
        assert list(self._dataset[8].x.shape) == array_shape

        assert np.allclose(ivy.to_numpy(self._dataset[0].x),
                           ivy.to_numpy(self._x[0]))
        assert np.allclose(ivy.to_numpy(self._dataset[4].x),
                           ivy.to_numpy(self._x[4]))
        assert np.allclose(ivy.to_numpy(self._dataset[8].x),
                           ivy.to_numpy(self._x[8]))

        # close
        self._dataset.close()
        del self._dataset
Exemple #4
0
def test_container_to_disk_shuffle_and_from_disk():
    for lib, call in helpers.calls:
        if call in [helpers.tf_graph_call, helpers.mx_graph_call]:
            # container disk saving requires eager execution
            continue
        save_filepath = 'container_on_disk.hdf5'
        dict_in = {
            'a': ivy.array([1, 2, 3], f=lib),
            'b': {
                'c': ivy.array([1, 2, 3], f=lib),
                'd': ivy.array([1, 2, 3], f=lib)
            }
        }
        container = Container(dict_in)

        # saving
        container.to_disk(save_filepath, max_batch_size=3)
        assert os.path.exists(save_filepath)

        # shuffling
        Container.shuffle_h5_file(save_filepath)

        # loading
        container_shuffled = Container.from_disk(save_filepath, lib, slice(3))

        # testing
        data = np.array([1, 2, 3])
        random.seed(0)
        random.shuffle(data)

        assert (ivy.to_numpy(container_shuffled['a'], lib) == data).all()
        assert (ivy.to_numpy(container_shuffled.a, lib) == data).all()
        assert (ivy.to_numpy(container_shuffled['b']['c'], lib) == data).all()
        assert (ivy.to_numpy(container_shuffled.b.c, lib) == data).all()
        assert (ivy.to_numpy(container_shuffled['b']['d'], lib) == data).all()
        assert (ivy.to_numpy(container_shuffled.b.d, lib) == data).all()

        os.remove(save_filepath)
Exemple #5
0
def test_container_to_disk_shuffle_and_from_disk(dev_str, call):
    if call in [helpers.tf_graph_call]:
        # container disk saving requires eager execution
        pytest.skip()
    save_filepath = 'container_on_disk.hdf5'
    dict_in = {
        'a': ivy.array([1, 2, 3]),
        'b': {
            'c': ivy.array([1, 2, 3]),
            'd': ivy.array([1, 2, 3])
        }
    }
    container = Container(dict_in)

    # saving
    container.to_disk(save_filepath, max_batch_size=3)
    assert os.path.exists(save_filepath)

    # shuffling
    Container.shuffle_h5_file(save_filepath)

    # loading
    container_shuffled = Container.from_disk(save_filepath, slice(3))

    # testing
    data = np.array([1, 2, 3])
    random.seed(0)
    random.shuffle(data)

    assert (ivy.to_numpy(container_shuffled['a']) == data).all()
    assert (ivy.to_numpy(container_shuffled.a) == data).all()
    assert (ivy.to_numpy(container_shuffled['b']['c']) == data).all()
    assert (ivy.to_numpy(container_shuffled.b.c) == data).all()
    assert (ivy.to_numpy(container_shuffled['b']['d']) == data).all()
    assert (ivy.to_numpy(container_shuffled.b.d) == data).all()

    os.remove(save_filepath)
Exemple #6
0
    def test_slice(self, dev_str, f, call, array_shape, num_processes):

        if call is helpers.mx_call and num_processes == 2:
            pytest.skip()

        self._init(array_shape, num_processes)

        assert len(self._dataset[0:3].x) == 3
        assert list(self._dataset[0:3].x[0].shape) == array_shape
        assert len(self._dataset[3:6].x) == 3
        assert list(self._dataset[3:6].x[0].shape) == array_shape
        assert len(self._dataset[6:9].x) == 3
        assert list(self._dataset[6:9].x[0].shape) == array_shape

        assert np.allclose(ivy.to_numpy(self._dataset[0:3].x[0]),
                           ivy.to_numpy(self._x[0]))
        assert np.allclose(ivy.to_numpy(self._dataset[3:6].x[1]),
                           ivy.to_numpy(self._x[4]))
        assert np.allclose(ivy.to_numpy(self._dataset[6:9].x[2]),
                           ivy.to_numpy(self._x[8]))

        # close
        self._dataset.close()
        del self._dataset
Exemple #7
0
    def _get_dataset(self, starting_example, ending_example):
        class ContainerIdxMap:
            def __init__(self,
                         sizes,
                         fpath_template=None,
                         seq_idxs=None,
                         start=None,
                         end=None,
                         max_seq_len=None,
                         conts_to_skip=None,
                         pruned_sizes=None):
                if isinstance(sizes, (tuple, list)):
                    pruned_sizes = ivy.default(pruned_sizes, [
                        SeqDataLoader._compute_seq_len(i, sl, conts_to_skip)
                        for i, sl in enumerate(sizes)
                    ])
                    num_empty = sum([ps == 0 for ps in pruned_sizes])
                    self._raw_sizes = dict(
                        zip(range(start, end + 1 + num_empty),
                            sizes[start:end + 1 + num_empty]))
                    self._pruned_sizes = dict(
                        zip(range(start, end + 1 + num_empty),
                            pruned_sizes[start:end + 1 + num_empty]))
                elif isinstance(sizes, (int, dict)):
                    self._raw_sizes = sizes
                    self._pruned_sizes = ivy.default(pruned_sizes, sizes)
                    if isinstance(self._pruned_sizes, int):
                        pruned_dict = dict()
                        for seq_idx, win_idx in conts_to_skip:
                            if seq_idx not in pruned_dict:
                                pruned_dict[seq_idx] = list()
                            pruned_dict[seq_idx].append(win_idx)
                        pruned_dict = {
                            k: len(set(v))
                            for k, v in pruned_dict.items()
                        }
                        pruned_sizes_dict = {
                            k: self._pruned_sizes - num_pruned
                            for k, num_pruned in pruned_dict.items()
                        }
                        num_empty = sum(
                            [size == 0 for size in pruned_sizes_dict.values()])
                        pruned_sizes = collections.defaultdict(
                            lambda: self._pruned_sizes, pruned_sizes_dict)
                    else:
                        num_empty = sum([ps == 0 for ps in self._pruned_sizes])
                else:
                    raise Exception(
                        'Invalid type for sizes, expected one of int, dict, tuple or list,'
                        'but found {} or type {}'.format(sizes, type(sizes)))
                self._constant_size = isinstance(self._raw_sizes, int)
                if max_seq_len:
                    self._max_seq_len = max_seq_len
                else:
                    self._max_seq_len = self._pruned_sizes if self._constant_size else max(
                        self._pruned_sizes.values())
                self._fpath_template = fpath_template
                self._conts_to_skip = conts_to_skip
                if seq_idxs:
                    self._seq_idxs = seq_idxs
                else:
                    vals = [
                        v
                        for i, v in enumerate(range(start, end + 1 +
                                                    num_empty))
                        if pruned_sizes[i] > 0
                    ]
                    keys = range(0, min(end - start + 1 + num_empty,
                                        len(vals)))
                    self._seq_idxs = dict(zip(keys, vals))

            def __getitem__(self, slice_obj):
                if isinstance(slice_obj, slice):
                    seq_idxs = collections.OrderedDict([
                        (i, self._seq_idxs[idx]) for i, idx in enumerate(
                            range(slice_obj.start, slice_obj.stop,
                                  ivy.default(slice_obj.step, 1)))
                    ])
                elif isinstance(slice_obj, int):
                    seq_idxs = collections.OrderedDict(
                        {0: self._seq_idxs[slice_obj]})
                else:
                    raise Exception(
                        'Invalid type for slice_obj, expected either slice or int,'
                        'but found {} of type {}'.format(
                            slice_obj, type(slice_obj)))
                if self._constant_size:
                    sizes = self._raw_sizes
                else:
                    sizes = collections.OrderedDict({
                        seq_idx: self._raw_sizes[seq_idx]
                        for seq_idx in seq_idxs.values()
                    })
                return ContainerIdxMap(sizes,
                                       self._fpath_template,
                                       seq_idxs,
                                       max_seq_len=self._max_seq_len,
                                       conts_to_skip=self._conts_to_skip,
                                       pruned_sizes=self._pruned_sizes)

            def __len__(self):
                return len(self._seq_idxs)

            def shuffle(self):
                mapped_idxs = list(self._seq_idxs.values())
                np.random.shuffle(mapped_idxs)
                self._seq_idxs = collections.OrderedDict(
                    zip(self._seq_idxs.keys(), mapped_idxs))

            def to_idxs(self):
                seq_idxs = self._seq_idxs.values()
                sizes = [
                    self._raw_sizes
                    if self._constant_size else self._raw_sizes[seq_idx]
                    for seq_idx in seq_idxs
                ]
                rets = [[(seq_idx, win_idx) for win_idx in range(size)
                         if not SeqDataLoader._skip_cont(
                             seq_idx, win_idx, self._conts_to_skip)]
                        for seq_idx, size in zip(seq_idxs, sizes)]
                return [
                    r + [(None, None)] * (self._max_seq_len - len(r))
                    for r in rets if list(set(r)) != [None]
                ]

            def to_filepaths(self):
                if not ivy.exists(self._fpath_template):
                    raise Exception(
                        'to_filepaths method is not valid if fpath_template has not been specified'
                        'in the constructor.')
                seq_idxs = self._seq_idxs.values()
                sizes = [
                    self._raw_sizes
                    if self._constant_size else self._raw_sizes[seq_idx]
                    for seq_idx in seq_idxs
                ]
                rets = [[
                    self._fpath_template % (seq_idx, win_idx)
                    for win_idx in range(size) if not SeqDataLoader._skip_cont(
                        seq_idx, win_idx, self._conts_to_skip)
                ] for seq_idx, size in zip(seq_idxs, sizes)]
                return [
                    r + [''] * (self._max_seq_len - len(r)) for r in rets
                    if ''.join(r) != ''
                ]

            @property
            def sizes(self):
                return self._pruned_sizes

        # container filepaths
        if self._spec.container_load_mode in ['preload', 'dynamic']:
            fpath_template = os.path.join(
                self._container_data_dir,
                self._spec.dataset_spec.cont_fname_template)
        else:
            fpath_template = None
        container_idx_map = ContainerIdxMap(
            self._spec.dataset_spec.unpruned_sequence_lengths,
            fpath_template,
            start=starting_example,
            end=ending_example,
            conts_to_skip=self._spec.containers_to_skip)

        if self._spec.num_sequences != -1:
            container_idx_map = container_idx_map[0:self._spec.num_sequences]

        # shuffle sequences
        if self._spec.preshuffle_data:
            container_idx_map.shuffle()

        # extract sequence lengths
        if self._fixed_sequence_length:
            self._sequence_lengths =\
                collections.OrderedDict(zip(range(len(container_idx_map)),
                                            [self._spec.dataset_spec.sequence_lengths] * len(container_idx_map)))
            self._windows_per_seq = self._sequence_lengths[
                0] - self._window_size + 1
            # windowing values
            window_idxs_per_seq = ivy.reshape(
                ivy.arange(self._windows_per_seq, 0, 1),
                (self._windows_per_seq, 1))
            gather_idxs_list = list()
            for x in window_idxs_per_seq:
                gather_idxs_list.append(
                    ivy.expand_dims(
                        ivy.arange(x[0] + self._window_size, x[0], 1), 0))
            gather_idxs = ivy.concatenate(gather_idxs_list, 0)
            self._gather_idxs = \
                ivy.to_numpy(ivy.reshape(gather_idxs, (self._windows_per_seq * self._window_size, 1))).tolist()
        else:
            self._sequence_lengths = container_idx_map.sizes

        # maybe pre-load containers
        if self._spec.container_load_mode == 'preload':
            # load containers with vector data and image filepath entries
            container_slices = self._get_containers_w_filepath_img_entries_as_tensor_slices(
                container_idx_map.to_filepaths())
            if self._first_frame_validity_fn is not None:
                container_slices =\
                    self._first_frame_validity_fn(container_slices, [ending_example - starting_example + 1])

            # prune unwanted chains of keys
            if 'unused_key_chains' in self._spec:
                container_slices = self._prune_unused_key_chains(
                    container_slices)

            dataset = Dataset(ivy.Container.list_stack([
                c[0]
                for c in container_slices.unstack(0, container_slices.shape[0])
            ], 0),
                              'base',
                              container_slices.shape[0],
                              numpy_loading=True,
                              cache_size=self._base_cache_size,
                              queue_timeout=self._spec.queue_timeout)
        else:
            if self._spec.container_load_mode == 'dynamic':
                # load containers with filepath entries
                dataset = Dataset(ivy.Container({'fpaths': container_idx_map}),
                                  'base',
                                  len(container_idx_map),
                                  trans_fn=lambda cont: cont.map(
                                      lambda x_, kc: x_.to_filepaths()),
                                  elementwise_query_fn=False,
                                  numpy_loading=True,
                                  cache_size=self._base_cache_size,
                                  queue_timeout=self._spec.queue_timeout)
                dataset = dataset.map('loaded_json', self._load_json_files,
                                      self._num_workers.loaded_json)
                dataset = dataset.map('parsed_json', self._parse_json_strings,
                                      self._num_workers.parsed_json)
            else:
                dataset = Dataset(ivy.Container({'idx_map':
                                                 container_idx_map}),
                                  'base',
                                  len(container_idx_map),
                                  trans_fn=lambda cont: self._spec.
                                  custom_container_load_fn(self, cont),
                                  elementwise_query_fn=False,
                                  numpy_loading=True,
                                  cache_size=self._base_cache_size,
                                  queue_timeout=self._spec.queue_timeout)
            if 'unused_key_chains' in self._spec:
                dataset = dataset.map('keychain_pruned',
                                      self._prune_unused_key_chains,
                                      self._num_workers.keychain_pruned)
            if self._first_frame_validity_fn is not None:
                dataset = dataset.map(
                    'valid_first_frames',
                    lambda x_: self._first_frame_validity_fn(x_, None),
                    self._num_workers.valid_first_frames)
        if not (self._spec.dataset_spec.sequence_lengths == 1
                and self._window_size == 1):
            # ToDo: add other conditionals which make the loading more efficient if only one of the
            #  above two conditions is True
            dataset = dataset.map(
                'windowed', self._group_container_into_windowed_container,
                self._num_workers.windowed)
            dataset = dataset.unbatch(
                'unbatched',
                self._num_workers.unbatched,
                batch_sizes=[
                    max(seq_len, self._window_size) - self._window_size + 1
                    for seq_len in self._sequence_lengths.values()
                    if seq_len > 0
                ])
        if self._spec.shuffle_buffer_size > 0:
            dataset = dataset.shuffle('shuffled',
                                      self._spec.shuffle_buffer_size,
                                      self._num_workers.shuffled)
        dataset = dataset.map('loaded_data',
                              self._load_data_from_filepath_tensors,
                              self._num_workers.loaded_data)
        dataset = dataset.batch('batched', self._batch_size,
                                self._num_workers.batched)
        dataset = dataset.map(
            'from_np',
            lambda cont: cont.map(lambda x_, kc: ivy.array(x_, dev_str='cpu')),
            self._num_workers.from_np,
            numpy_loading=False)
        if ivy.exists(self._spec.post_proc_fn):
            dataset = dataset.map('post_processed', self._spec.post_proc_fn,
                                  self._num_workers.post_processed)
        if self._spec.with_prefetching:
            dataset = dataset.prefetch('prefetch')
        # ToDo: find way to make pre-fetching to GPU actually pre-fetch, ideally using multi-processing.
        #  For example, swapping prefetch and to_gpu ops around would work if to_gpu could accept self._num_workers.
        if self._spec.prefetch_to_devs:
            if isinstance(self._spec.prefetch_to_devs, str):
                dataset = dataset.to_dev('to_dev', self._spec.prefetch_to_devs)
            elif len(self._spec.prefetch_to_devs) == 1:
                dataset = dataset.to_dev('to_dev',
                                         self._spec.prefetch_to_devs[0])
            else:
                dataset = dataset.to_devs('to_devs',
                                          self._spec.prefetch_to_devs)
        return dataset
Exemple #8
0
def test_seq_loader_wo_cont_load(dev_str, f, call, with_prefetching,
                                 shuffle_buffer_size):

    # seed
    f.seed(0)
    np.random.seed(0)

    # config
    batch_size = 3
    window_size = 2
    _seq_lengths = [2, 3, 2, 3, 3, 1]
    _padded_seq_lenghts = [max(sl, window_size) for sl in _seq_lengths]
    _seq_wind_sizes = [psl - window_size + 1 for psl in _padded_seq_lenghts]

    seq_lengths_train = _seq_lengths[0:3]
    padded_seq_lengths_train = _padded_seq_lenghts[0:3]
    seq_wind_sizes_train = _seq_wind_sizes[0:3]
    cum_seq_wind_sizes_train = np.cumsum(seq_wind_sizes_train)

    seq_lengths_valid = _seq_lengths[3:6]
    padded_seq_lengths_valid = _padded_seq_lenghts[3:6]
    seq_wind_sizes_valid = _seq_wind_sizes[3:6]
    cum_seq_wind_sizes_valid = np.cumsum(seq_wind_sizes_valid)

    # dataset dir
    current_dir = os.path.dirname(os.path.realpath(__file__))
    ds_dir = os.path.join(current_dir, 'dataset')
    dataset_dirs = DatasetDirs(dataset_dir=ds_dir)

    # custom init function
    def custom_init_fn(self):
        alternative_data_dir = os.path.join(
            self._spec.dataset_spec.dirs.dataset_dir,
            'containers_alternative/')
        actions_fpath = os.path.join(alternative_data_dir, 'actions.json')
        with open(actions_fpath, 'r') as file:
            self._actions_dict = json.loads(file.read())

    # custom load function
    def custom_container_load_fn(self, cont):

        new_cont = ivy.Container()
        all_idxs = cont.idx_map.to_idxs()

        actions_seqs_list = list()

        seq_idxs_seqs_list = list()
        idxs_seqs_list = list()
        lengths_seqs_list = list()

        for seq in all_idxs:

            action_arrays_list = list()

            seq_idx_arrays_list = list()
            idx_arrays_list = list()

            found_end = False
            j = 0
            idx = 0
            last_idx = 0
            seq_idx = seq[0][0]

            for j, (_, idx) in enumerate(seq):
                if not ivy.exists(idx) and not found_end:
                    found_end = True
                    last_idx = j - 1
                if found_end:
                    idx = last_idx

                action_as_list = self._actions_dict[str(seq_idx)][str(idx)]
                action_arrays_list.append(
                    ivy.array(action_as_list, dtype_str='float32')[0])

                seq_idx_arrays_list.append(
                    ivy.array([seq_idx], dtype_str='float32'))
                idx_arrays_list.append(ivy.array([idx], dtype_str='float32'))
            length_arrays_list = [
                ivy.array([last_idx + 1 if found_end else idx + 1],
                          dtype_str='float32')
            ] * (j + 1)

            action_arrays = ivy.concatenate(action_arrays_list, 0)
            actions_seqs_list.append(action_arrays)

            seq_idx_arrays = ivy.concatenate(seq_idx_arrays_list, 0)
            seq_idxs_seqs_list.append(seq_idx_arrays)
            idx_arrays = ivy.concatenate(idx_arrays_list, 0)
            idxs_seqs_list.append(idx_arrays)
            length_arrays = ivy.concatenate(length_arrays_list, 0)
            lengths_seqs_list.append(length_arrays)

        new_cont.actions = actions_seqs_list

        new_cont.seq_info = ivy.Container()
        new_cont.seq_info.seq_idx = seq_idxs_seqs_list
        new_cont.seq_info.idx = idxs_seqs_list
        new_cont.seq_info.length = lengths_seqs_list

        return new_cont

    # data loader specifications
    dataset_spec = DatasetSpec(dataset_dirs,
                               sequence_lengths=[2, 3, 2, 3, 3, 1],
                               cont_fname_template='%06d_%06d.json')
    train_data_loader_spec = SeqDataLoaderSpec(
        dataset_spec,
        batch_size=batch_size,
        window_size=window_size,
        starting_idx=0,
        num_sequences=3,
        array_strs=['array'],
        with_prefetching=with_prefetching,
        container_load_mode='custom',
        shuffle_buffer_size=shuffle_buffer_size,
        preshuffle_data=False,
        custom_init_fn=custom_init_fn,
        custom_container_load_fn=custom_container_load_fn)
    valid_data_loader_spec = SeqDataLoaderSpec(
        dataset_spec,
        batch_size=batch_size,
        window_size=window_size,
        starting_idx=3,
        num_sequences=3,
        array_strs=['array'],
        with_prefetching=with_prefetching,
        container_load_mode='custom',
        shuffle_buffer_size=shuffle_buffer_size,
        preshuffle_data=False,
        custom_init_fn=custom_init_fn,
        custom_container_load_fn=custom_container_load_fn)

    # training data loader
    train_data_loader = SeqDataLoader(train_data_loader_spec)

    # validation data loader
    valid_data_loader = SeqDataLoader(valid_data_loader_spec)

    # testing
    for i in range(5):

        # get training batch
        train_batch = train_data_loader.get_next_batch()

        # test cardinality
        assert train_batch.actions.shape == (3, 2, 6)

        # test values
        window_idxs = [
            i % 4
            for i in list(range(i * batch_size, i * batch_size + batch_size))
        ]
        seq_idxs = [
            np.argmax(wi < cum_seq_wind_sizes_train) for wi in window_idxs
        ]
        seq_lens = [seq_lengths_train[si] for si in seq_idxs]
        padded_seq_lens = [padded_seq_lengths_train[si] for si in seq_idxs]
        unpadded_mask = [
            sl == psl for sl, psl in zip(seq_lens, padded_seq_lens)
        ]
        in_seq_win_idxs = [
            (wi - cum_seq_wind_sizes_train[si - 1]) if si != 0 else wi
            for si, wi in zip(seq_idxs, window_idxs)
        ]
        if shuffle_buffer_size == 0:
            assert np.allclose(
                ivy.to_numpy(train_batch.seq_info.length),
                np.tile(np.array(padded_seq_lens).reshape(-1, 1), (1, 2)))
            assert np.allclose(
                ivy.to_numpy(train_batch.seq_info.idx),
                np.concatenate((np.array(in_seq_win_idxs).reshape(
                    -1, 1), np.array(in_seq_win_idxs).reshape(-1, 1) +
                                np.array(unpadded_mask).reshape(-1, 1)), -1))

        # get validation batch
        valid_batch = valid_data_loader.get_next_batch()

        # test cardinality
        assert valid_batch.actions.shape == (3, 2, 6)

        # test values
        window_idxs = [
            i % 5
            for i in list(range(i * batch_size, i * batch_size + batch_size))
        ]
        seq_idxs = [
            np.argmax(wi < cum_seq_wind_sizes_valid) for wi in window_idxs
        ]
        seq_lens = [seq_lengths_valid[si] for si in seq_idxs]
        padded_seq_lens = [padded_seq_lengths_valid[si] for si in seq_idxs]
        unpadded_mask = [
            sl == psl for sl, psl in zip(seq_lens, padded_seq_lens)
        ]
        in_seq_win_idxs = [
            (wi - cum_seq_wind_sizes_valid[si - 1]) if si != 0 else wi
            for si, wi in zip(seq_idxs, window_idxs)
        ]
        if shuffle_buffer_size == 0:
            assert np.allclose(
                ivy.to_numpy(valid_batch.seq_info.length),
                np.tile(np.array(seq_lens).reshape(-1, 1), (1, 2)))
            assert np.allclose(
                ivy.to_numpy(valid_batch.seq_info.idx),
                np.concatenate((np.array(in_seq_win_idxs).reshape(
                    -1, 1), np.array(in_seq_win_idxs).reshape(-1, 1) +
                                np.array(unpadded_mask).reshape(-1, 1)), -1))

    # delete
    train_data_loader.close()
    del train_data_loader
    valid_data_loader.close()
    del valid_data_loader
Exemple #9
0
def test_seq_loader_containers_to_skip(dev_str, f, call, container_load_mode,
                                       array_mode, with_prefetching,
                                       batch_size, containers_to_skip):

    # seed
    f.seed(0)
    np.random.seed(0)

    # dataset dir
    current_dir = os.path.dirname(os.path.realpath(__file__))
    ds_dir = os.path.join(current_dir, 'dataset')
    dataset_dirs = DatasetDirs(dataset_dir=ds_dir,
                               containers_dir=os.path.join(
                                   ds_dir, 'containers'))

    dataset_spec = DatasetSpec(dataset_dirs,
                               sequence_lengths=[2, 3, 2, 3, 3, 1],
                               cont_fname_template='%06d_%06d.json')
    data_loader_spec = SeqDataLoaderSpec(
        dataset_spec,
        batch_size=batch_size,
        window_size=1,
        starting_idx=0,
        container_load_mode=container_load_mode,
        array_mode=array_mode,
        num_sequences=6,
        array_strs=['array'],
        float_strs=['depth'],
        uint8_strs=['rgb'],
        preshuffle_data=False,
        with_prefetching=with_prefetching,
        num_workers=1,
        containers_to_skip=containers_to_skip)

    seq_idx_length_and_idxs = [[0, 2, 0], [0, 2, 1], [1, 3, 0], [1, 3, 1],
                               [1, 3, 2], [2, 2, 0], [2, 2, 1], [3, 3, 0],
                               [3, 3, 1], [3, 3, 2], [4, 3, 0], [4, 3, 1],
                               [4, 3, 2], [5, 1, 0]]

    def _skip(seq_idx_length_and_idx):
        seq_idx_, _, idx_ = seq_idx_length_and_idx
        if (seq_idx_, None) in containers_to_skip or (None, idx_) in containers_to_skip or \
                (seq_idx_, idx_) in containers_to_skip:
            return True
        return False

    seq_idxs = [i[0] for i in seq_idx_length_and_idxs if not _skip(i)]
    lengths = [i[1] for i in seq_idx_length_and_idxs if not _skip(i)]
    idxs = [i[2] for i in seq_idx_length_and_idxs if not _skip(i)]

    # data loader
    data_loader = SeqDataLoader(data_loader_spec)

    # testing
    for i in range(7):

        # get ground truth
        idx = list()
        length = list()
        seq_idx = list()
        for j in range(batch_size):
            idx.append(idxs[(i * batch_size + j) % len(idxs)])
            length.append(lengths[(i * batch_size + j) % len(lengths)])
            seq_idx.append(seq_idxs[(i * batch_size + j) % len(seq_idxs)])

        # get training batch
        batch = data_loader.get_next_batch()

        # test seq_info
        assert np.array_equal(ivy.to_numpy(batch.seq_info.idx),
                              np.expand_dims(np.asarray(idx), -1))
        assert np.array_equal(ivy.to_numpy(batch.seq_info.length),
                              np.expand_dims(np.asarray(length), -1))
        assert np.array_equal(ivy.to_numpy(batch.seq_info.seq_idx),
                              np.expand_dims(np.asarray(seq_idx), -1))

    # delete
    data_loader.close()
    del data_loader
Exemple #10
0
def test_seq_loader(dev_str, f, call, container_load_mode, array_mode,
                    with_prefetching, shuffle_buffer_size):

    # seed
    f.seed(0)
    np.random.seed(0)

    # config
    batch_size = 3
    window_size = 2
    _seq_lengths = [2, 3, 2, 3, 3, 1]
    _padded_seq_lenghts = [max(sl, window_size) for sl in _seq_lengths]
    _seq_wind_sizes = [psl - window_size + 1 for psl in _padded_seq_lenghts]

    seq_lengths_train = _seq_lengths[0:3]
    padded_seq_lengths_train = _padded_seq_lenghts[0:3]
    seq_wind_sizes_train = _seq_wind_sizes[0:3]
    cum_seq_wind_sizes_train = np.cumsum(seq_wind_sizes_train)

    seq_lengths_valid = _seq_lengths[3:6]
    padded_seq_lengths_valid = _padded_seq_lenghts[3:6]
    seq_wind_sizes_valid = _seq_wind_sizes[3:6]
    cum_seq_wind_sizes_valid = np.cumsum(seq_wind_sizes_valid)

    # dataset dir
    current_dir = os.path.dirname(os.path.realpath(__file__))
    ds_dir = os.path.join(current_dir, 'dataset')
    dataset_dirs = DatasetDirs(dataset_dir=ds_dir,
                               containers_dir=os.path.join(
                                   ds_dir, 'containers'))

    # data loader specifications
    dataset_spec = DatasetSpec(dataset_dirs,
                               sequence_lengths=[2, 3, 2, 3, 3, 1],
                               cont_fname_template='%06d_%06d.json')
    train_data_loader_spec = SeqDataLoaderSpec(
        dataset_spec,
        batch_size=batch_size,
        window_size=window_size,
        starting_idx=0,
        num_sequences=3,
        container_load_mode=container_load_mode,
        array_mode=array_mode,
        array_strs=['array'],
        float_strs=['depth'],
        uint8_strs=['rgb'],
        with_prefetching=with_prefetching,
        shuffle_buffer_size=shuffle_buffer_size,
        preshuffle_data=False)
    valid_data_loader_spec = SeqDataLoaderSpec(
        dataset_spec,
        batch_size=batch_size,
        window_size=window_size,
        starting_idx=3,
        num_sequences=3,
        container_load_mode=container_load_mode,
        array_mode=array_mode,
        array_strs=['array'],
        float_strs=['depth'],
        uint8_strs=['rgb'],
        with_prefetching=with_prefetching,
        shuffle_buffer_size=shuffle_buffer_size,
        preshuffle_data=False)

    # training data loader
    train_data_loader = SeqDataLoader(train_data_loader_spec)

    # validation data loader
    valid_data_loader = SeqDataLoader(valid_data_loader_spec)

    # testing
    for i in range(5):

        # get training batch
        train_batch = train_data_loader.get_next_batch()

        # test cardinality
        assert train_batch.actions.shape == (3, 2, 6)
        assert train_batch.observations.image.ego.ego_cam_px.rgb.shape == (3,
                                                                           2,
                                                                           32,
                                                                           32,
                                                                           3)
        assert train_batch.observations.image.ego.ego_cam_px.rgb.shape == (3,
                                                                           2,
                                                                           32,
                                                                           32,
                                                                           3)
        assert train_batch.array.data.shape == (3, 2, 3)

        # test values
        window_idxs = [
            i % 4
            for i in list(range(i * batch_size, i * batch_size + batch_size))
        ]
        seq_idxs = [
            np.argmax(wi < cum_seq_wind_sizes_train) for wi in window_idxs
        ]
        seq_lens = [seq_lengths_train[si] for si in seq_idxs]
        padded_seq_lens = [padded_seq_lengths_train[si] for si in seq_idxs]
        unpadded_mask = [
            sl == psl for sl, psl in zip(seq_lens, padded_seq_lens)
        ]
        in_seq_win_idxs = [
            (wi - cum_seq_wind_sizes_train[si - 1]) if si != 0 else wi
            for si, wi in zip(seq_idxs, window_idxs)
        ]
        if shuffle_buffer_size == 0:
            assert np.allclose(
                ivy.to_numpy(train_batch.seq_info.length),
                np.tile(np.array(padded_seq_lens).reshape(-1, 1), (1, 2)))
            assert np.allclose(
                ivy.to_numpy(train_batch.seq_info.idx),
                np.concatenate((np.array(in_seq_win_idxs).reshape(
                    -1, 1), np.array(in_seq_win_idxs).reshape(-1, 1) +
                                np.array(unpadded_mask).reshape(-1, 1)), -1))

        # get validation batch
        valid_batch = valid_data_loader.get_next_batch()

        # test cardinality
        assert valid_batch.actions.shape == (3, 2, 6)
        assert valid_batch.observations.image.ego.ego_cam_px.rgb.shape == (3,
                                                                           2,
                                                                           32,
                                                                           32,
                                                                           3)
        assert valid_batch.array.data.shape == (3, 2, 3)

        # test values
        window_idxs = [
            i % 5
            for i in list(range(i * batch_size, i * batch_size + batch_size))
        ]
        seq_idxs = [
            np.argmax(wi < cum_seq_wind_sizes_valid) for wi in window_idxs
        ]
        seq_lens = [seq_lengths_valid[si] for si in seq_idxs]
        padded_seq_lens = [padded_seq_lengths_valid[si] for si in seq_idxs]
        unpadded_mask = [
            sl == psl for sl, psl in zip(seq_lens, padded_seq_lens)
        ]
        in_seq_win_idxs = [
            (wi - cum_seq_wind_sizes_valid[si - 1]) if si != 0 else wi
            for si, wi in zip(seq_idxs, window_idxs)
        ]
        if shuffle_buffer_size == 0:
            assert np.allclose(
                ivy.to_numpy(valid_batch.seq_info.length),
                np.tile(np.array(seq_lens).reshape(-1, 1), (1, 2)))
            assert np.allclose(
                ivy.to_numpy(valid_batch.seq_info.idx),
                np.concatenate((np.array(in_seq_win_idxs).reshape(
                    -1, 1), np.array(in_seq_win_idxs).reshape(-1, 1) +
                                np.array(unpadded_mask).reshape(-1, 1)), -1))

    # delete
    train_data_loader.close()
    del train_data_loader
    valid_data_loader.close()
    del valid_data_loader

    # test keychain pruning, no container pre-loading
    data_loader_spec = SeqDataLoaderSpec(
        dataset_spec,
        batch_size=3,
        window_size=2,
        starting_idx=0,
        num_sequences=3,
        container_load_mode=container_load_mode,
        array_mode=array_mode,
        shuffle_buffer_size=shuffle_buffer_size,
        unused_key_chains=['observations/image/ego/ego_cam_px/depth'],
        array_strs=['array'],
        float_strs=['depth'],
        uint8_strs=['rgb'],
        with_prefetching=with_prefetching,
        preshuffle_data=False)
    data_loader = SeqDataLoader(data_loader_spec)

    # get training batch
    batch = data_loader.get_next_batch()

    # test cardinality
    assert batch.actions.shape == (3, 2, 6)
    assert batch.observations.image.ego.ego_cam_px.rgb.shape == (3, 2, 32, 32,
                                                                 3)
    assert batch.observations.image.ego.ego_cam_px.rgb.shape == (3, 2, 32, 32,
                                                                 3)
    assert batch.array.data.shape == (3, 2, 3)

    # test removed key chain
    assert 'depth' not in batch.observations.image.ego.ego_cam_px

    # delete
    data_loader.close()
    del data_loader
Exemple #11
0
def main(interactive=True, try_use_sim=True, f=None):
    f = choose_random_framework() if f is None else f
    set_framework(f)
    with_mxnd = f is ivy.mxnd
    if with_mxnd:
        print('\nMXnet does not support "sum" or "min" reductions for scatter_nd,\n'
              'instead it only supports non-deterministic replacement for duplicates.\n'
              'Depth buffer rendering (requies min) or fusion buffer (requies sum) are therefore unsupported.\n'
              'The rendering in this demo with MXNet backend exhibits non-deterministic jagged edges as a result.')
    sim = Simulator(interactive, try_use_sim)
    import matplotlib.pyplot as plt
    xyzs = list()
    rgbs = list()
    iterations = 10 if sim.with_pyrep else 1
    for _ in range(iterations):
        for cam in sim.cams:
            depth, rgb = cam.cap()
            xyz = sim.depth_to_xyz(depth, cam.get_inv_ext_mat(), cam.inv_calib_mat, [1024]*2)
            xyzs.append(xyz)
            rgbs.append(rgb)
        xyz = ivy.reshape(ivy.concatenate(xyzs, 1), (-1, 3))
        rgb = ivy.reshape(ivy.concatenate(rgbs, 1), (-1, 3))
        cam_coords = ivy_vision.world_to_cam_coords(ivy_mech.make_coordinates_homogeneous(ivy.expand_dims(xyz, 1)),
                                                    sim.target_cam.get_ext_mat())
        ds_pix_coords = ivy_vision.cam_to_ds_pixel_coords(cam_coords, sim.target_cam.calib_mat)
        depth = ds_pix_coords[..., -1]
        pix_coords = ds_pix_coords[..., 0, 0:2] / depth
        final_image_dims = [512]*2
        feat = ivy.concatenate((depth, rgb), -1)
        rendered_img_no_db, _, _ = ivy_vision.quantize_to_image(
            pix_coords, final_image_dims, feat, ivy.zeros(final_image_dims + [4]), with_db=False)
        with_db = not with_mxnd
        rendered_img_with_db, _, _ = ivy_vision.quantize_to_image(
            pix_coords, final_image_dims, feat, ivy.zeros(final_image_dims + [4]), with_db=with_db)

        a_img = cv2.resize(ivy.to_numpy(rgbs[0]), (256, 256))
        a_img[0:50, 0:50] = np.zeros_like(a_img[0:50, 0:50])
        a_img[5:45, 5:45] = np.ones_like(a_img[5:45, 5:45])
        cv2.putText(a_img, 'a', (13, 33), cv2.FONT_HERSHEY_SIMPLEX, 1.2, tuple([0] * 3), 2)

        b_img = cv2.resize(ivy.to_numpy(rgbs[1]), (256, 256))
        b_img[0:50, 0:50] = np.zeros_like(b_img[0:50, 0:50])
        b_img[5:45, 5:45] = np.ones_like(b_img[5:45, 5:45])
        cv2.putText(b_img, 'b', (13, 33), cv2.FONT_HERSHEY_SIMPLEX, 1.2, tuple([0] * 3), 2)

        c_img = cv2.resize(ivy.to_numpy(rgbs[2]), (256, 256))
        c_img[0:50, 0:50] = np.zeros_like(c_img[0:50, 0:50])
        c_img[5:45, 5:45] = np.ones_like(c_img[5:45, 5:45])
        cv2.putText(c_img, 'c', (13, 33), cv2.FONT_HERSHEY_SIMPLEX, 1.2, tuple([0] * 3), 2)

        target_img = cv2.resize(ivy.to_numpy(sim.target_cam.cap()[1]), (256, 256))
        target_img[0:50, 0:140] = np.zeros_like(target_img[0:50, 0:140])
        target_img[5:45, 5:135] = np.ones_like(target_img[5:45, 5:135])
        cv2.putText(target_img, 'target', (13, 33), cv2.FONT_HERSHEY_SIMPLEX, 1.2, tuple([0] * 3), 2)

        msg = 'non-deterministic' if with_mxnd else 'no depth buffer'
        width = 360 if with_mxnd else 320
        no_db_img = np.copy(ivy.to_numpy(rendered_img_no_db[..., 3:]))
        no_db_img[0:50, 0:width+5] = np.zeros_like(no_db_img[0:50, 0:width+5])
        no_db_img[5:45, 5:width] = np.ones_like(no_db_img[5:45, 5:width])
        cv2.putText(no_db_img, msg, (13, 33), cv2.FONT_HERSHEY_SIMPLEX, 1.2, tuple([0] * 3), 2)

        with_db_img = np.copy(ivy.to_numpy(rendered_img_with_db[..., 3:]))
        with_db_img[0:50, 0:350] = np.zeros_like(with_db_img[0:50, 0:350])
        with_db_img[5:45, 5:345] = np.ones_like(with_db_img[5:45, 5:345])
        cv2.putText(with_db_img, 'with depth buffer', (13, 33), cv2.FONT_HERSHEY_SIMPLEX, 1.2, tuple([0] * 3), 2)

        raw_imgs = np.concatenate((np.concatenate((a_img, b_img), 1),
                                   np.concatenate((c_img, target_img), 1)), 0)
        to_concat = (raw_imgs, no_db_img) if with_mxnd else (raw_imgs, no_db_img, with_db_img)
        final_img = np.concatenate(to_concat, 1)

        if interactive:
            print('\nClose the image window when you are ready.\n')
            plt.imshow(final_img)
            plt.show()
        xyzs.clear()
        rgbs.clear()
    sim.close()
    unset_framework()
Exemple #12
0
def main(interactive=True, try_use_sim=True, f=None):

    # setup framework
    f = choose_random_framework() if f is None else f
    set_framework(f)

    # simulator and drone
    sim = Simulator(interactive, try_use_sim)
    drone = sim.drone

    # ESM
    esm = ivy_mem.ESM()
    esm_mem = esm.empty_memory(1, 1)

    # demo loop
    for _ in range(1000 if interactive and sim.with_pyrep else 100):

        # log iteration
        print('timestep {}'.format(_))

        # acquire image measurements
        depth, rgb = drone.cam.cap()

        # convert to ESM format
        ds_pix_coords = ivy_vision.depth_to_ds_pixel_coords(depth)
        cam_coords = ivy_vision.ds_pixel_to_cam_coords(ds_pix_coords, drone.cam.inv_calib_mat)[..., 0:3]
        img_mean = ivy.concatenate((cam_coords, rgb), -1)

        # acquire pose measurements
        cam_rel_mat = drone.cam.mat_rel_to_drone
        agent_rel_mat = ivy.array(drone.measure_incremental_mat())

        # single esm camera measurement
        esm_cam_meas = ESMCamMeasurement(
            img_mean=img_mean,
            cam_rel_mat=cam_rel_mat
        )

        # total esm observation
        obs = ESMObservation(
            img_meas={'cam0': esm_cam_meas},
            agent_rel_mat=agent_rel_mat)

        esm_mem = esm(obs, esm_mem)

        # update esm visualization
        if not interactive:
            continue
        rgb_img = _add_image_border(
            cv2.resize(ivy.to_numpy(rgb).copy(), (180, 180)))
        rgb_img = _add_title(rgb_img, 25, 75, 2, 'raw rgb', 70)
        depth_img = _add_image_border(cv2.resize(np.clip(
            np.tile(ivy.to_numpy(depth), (1, 1, 3))/3, 0, 1).copy(), (180, 180)))
        depth_img = _add_title(depth_img, 25, 90, 2, 'raw depth', 85)
        raw_img_concatted = np.concatenate((rgb_img, depth_img), 0)
        esm_feat = _add_image_border(np.clip(ivy.to_numpy(esm_mem.mean[0, 0, ..., 3:]), 0, 1).copy())
        esm_feat = _add_title(esm_feat, 25, 80, 2, 'esm rgb', 75)
        esm_depth = _add_image_border(np.clip(np.tile(ivy.to_numpy(esm_mem.mean[0, 0, ..., 2:3])/3,
                                                      (1, 1, 3)), 0, 1).copy())
        esm_depth = _add_title(esm_depth, 25, 95, 2, 'esm depth', 90)
        esm_img_concatted = np.concatenate((esm_feat, esm_depth), 0)
        img_to_show = np.concatenate((raw_img_concatted, esm_img_concatted), 1)
        plt.imshow(img_to_show)
        plt.show(block=False)
        plt.pause(0.001)

    # end of demo
    sim.close()
    unset_framework()
Exemple #13
0
def main(batch_size=32,
         num_train_steps=31250,
         compile_flag=True,
         num_bits=8,
         seq_len=28,
         ctrl_output_size=100,
         memory_size=128,
         memory_vector_dim=28,
         overfit_flag=False,
         interactive=True,
         f=None):
    f = choose_random_framework() if f is None else f
    set_framework(f)

    # train config
    lr = 1e-3 if not overfit_flag else 1e-2
    batch_size = batch_size if not overfit_flag else 1
    num_train_steps = num_train_steps if not overfit_flag else 150
    max_grad_norm = 50

    # logging config
    vis_freq = 250 if not overfit_flag else 1

    # optimizer
    optimizer = ivy.Adam(lr=lr)

    # ntm
    ntm = NTM(input_dim=num_bits + 1,
              output_dim=num_bits,
              ctrl_output_size=ctrl_output_size,
              ctrl_layers=1,
              memory_size=memory_size,
              memory_vector_dim=memory_vector_dim,
              read_head_num=1,
              write_head_num=1)

    # compile loss fn
    total_seq_example = ivy.random_uniform(shape=(batch_size, 2 * seq_len + 1,
                                                  num_bits + 1))
    target_seq_example = total_seq_example[:, 0:seq_len, :-1]
    if compile_flag:
        loss_fn_maybe_compiled = ivy.compile_fn(
            lambda v, ttl_sq, trgt_sq, sq_ln: loss_fn(ntm, v, ttl_sq, trgt_sq,
                                                      sq_ln),
            dynamic=False,
            example_inputs=[
                ntm.v, total_seq_example, target_seq_example, seq_len
            ])
    else:
        loss_fn_maybe_compiled = lambda v, ttl_sq, trgt_sq, sq_ln: loss_fn(
            ntm, v, ttl_sq, trgt_sq, sq_ln)

    # init
    input_seq_m1 = ivy.cast(
        ivy.random_uniform(0., 1., (batch_size, seq_len, num_bits)) > 0.5,
        'float32')
    mw = None
    vw = None

    for i in range(num_train_steps):

        # sequence to copy
        if not overfit_flag:
            input_seq_m1 = ivy.cast(
                ivy.random_uniform(0., 1.,
                                   (batch_size, seq_len, num_bits)) > 0.5,
                'float32')
        target_seq = input_seq_m1
        input_seq = ivy.concatenate(
            (input_seq_m1, ivy.zeros((batch_size, seq_len, 1))), -1)
        eos = ivy.ones((batch_size, 1, num_bits + 1))
        output_seq = ivy.zeros_like(input_seq)
        total_seq = ivy.concatenate((input_seq, eos, output_seq), -2)

        # train step
        loss, pred_vals = train_step(loss_fn_maybe_compiled, optimizer, ntm,
                                     total_seq, target_seq, seq_len, mw, vw,
                                     ivy.array(i + 1,
                                               'float32'), max_grad_norm)

        # log
        print('step: {}, loss: {}'.format(i, ivy.to_numpy(loss).item()))

        # visualize
        if i % vis_freq == 0:
            target_to_vis = (ivy.to_numpy(target_seq[0] * 255)).astype(
                np.uint8)
            target_to_vis = np.transpose(
                cv2.resize(target_to_vis, (560, 160),
                           interpolation=cv2.INTER_NEAREST), (1, 0))

            pred_to_vis = (ivy.to_numpy(pred_vals[0] * 255)).astype(np.uint8)
            pred_to_vis = np.transpose(
                cv2.resize(pred_to_vis, (560, 160),
                           interpolation=cv2.INTER_NEAREST), (1, 0))

            img_to_vis = np.concatenate((pred_to_vis, target_to_vis), 0)
            img_to_vis = cv2.resize(img_to_vis, (1120, 640),
                                    interpolation=cv2.INTER_NEAREST)

            img_to_vis[0:60, -200:] = 0
            img_to_vis[5:55, -195:-5] = 255
            cv2.putText(img_to_vis, 'step {}'.format(i), (935, 42),
                        cv2.FONT_HERSHEY_SIMPLEX, 1.2, tuple([0] * 3), 2)

            img_to_vis[0:60, 0:200] = 0
            img_to_vis[5:55, 5:195] = 255
            cv2.putText(img_to_vis, 'prediction', (7, 42),
                        cv2.FONT_HERSHEY_SIMPLEX, 1.2, tuple([0] * 3), 2)

            img_to_vis[320:380, 0:130] = 0
            img_to_vis[325:375, 5:125] = 255
            cv2.putText(img_to_vis, 'target', (7, 362),
                        cv2.FONT_HERSHEY_SIMPLEX, 1.2, tuple([0] * 3), 2)

            if interactive:
                cv2.imshow('prediction_and_target', img_to_vis)
                if overfit_flag:
                    cv2.waitKey(1)
                else:
                    cv2.waitKey(100)
                    cv2.destroyAllWindows()
Exemple #14
0
 def set_pos(self, pos):
     return self._pr_obj.set_position(ivy.to_numpy(pos))
Exemple #15
0
    def render(self, mode='human'):
        """
        Renders the environment.
        The set of supported modes varies per environment. (And some
        environments do not support rendering at all.) By convention,
        if mode is:

        - human: render to the current display or terminal and
          return nothing. Usually for human consumption.
        - rgb_array: Return an numpy.ndarray with shape (x, y, 3),
          representing RGB values for an x-by-y pixel image, suitable
          for turning into a video.
        - ansi: Return a string (str) or StringIO.StringIO containing a
          terminal-style text representation. The text can include newlines
          and ANSI escape sequences (e.g. for colors).

        :param mode: Render mode, one of [human|rgb_array], default human
        :type mode: str, optional
        :return: Rendered image.
        """
        if self.viewer is None:
            # noinspection PyBroadException
            try:
                from gym.envs.classic_control import rendering
            except:
                if not self._logged_headless_message:
                    print(
                        'Unable to connect to display. Running the Ivy environment in headless mode...'
                    )
                    self._logged_headless_message = True
                return
            self.viewer = rendering.Viewer(500, 500)
            bound = self.num_joints + 0.2
            self.viewer.set_bounds(-bound, bound, -bound, bound)

            # Goal.
            goal_geom = rendering.make_circle(0.2)
            goal_geom.set_color(0.4, 0.6, 1.)
            self.goal_tr = rendering.Transform()
            goal_geom.add_attr(self.goal_tr)
            self.viewer.add_geom(goal_geom)

            # Arm segments and joints.
            l, r, t, b = 0, 1., 0.1, -0.1
            self.segment_trs = []
            for _ in range(self.num_joints):
                # Segment.
                segment_geom = rendering.FilledPolygon([(l, b), (l, t), (r, t),
                                                        (r, b)])
                segment_tr = rendering.Transform()
                self.segment_trs.append(segment_tr)
                segment_geom.add_attr(segment_tr)
                segment_geom.set_color(0., 0., 0.)
                self.viewer.add_geom(segment_geom)

                # Joint.
                joint_geom = rendering.make_circle(0.1)
                joint_geom.set_color(0.5, 0.5, 0.5)
                joint_geom.add_attr(segment_tr)
                self.viewer.add_geom(joint_geom)

            # End effector.
            self.end_geom = rendering.make_circle(0.1)
            self.end_tr = rendering.Transform()
            self.end_geom.add_attr(self.end_tr)
            self.viewer.add_geom(self.end_geom)

        self.goal_tr.set_translation(*ivy.to_numpy(self.goal_xy).tolist())

        x, y = 0., 0.
        for segment_tr, angle in zip(self.segment_trs,
                                     ivy.reshape(self.angles, (-1, 1))):
            segment_tr.set_rotation(ivy.to_numpy(angle)[0])
            segment_tr.set_translation(x, y)
            x = ivy.to_numpy(x + ivy.cos(ivy.expand_dims(angle, 0))[0])[0]
            y = ivy.to_numpy(y + ivy.sin(ivy.expand_dims(angle, 0))[0])[0]
        self.end_tr.set_translation(x, y)
        rew = ivy.to_numpy(self.get_reward())[0]
        self.end_geom.set_color(1 - rew, rew, 0.)

        return self.viewer.render(return_rgb_array=mode == 'rgb_array')
Exemple #16
0
def show_full_spline_path(anchor_poses, interpolated_poses, sc, tc, x_label,
                          y_label, title, start_label, target_label,
                          connect_anchors):

    if not INTERACTIVE:
        return

    fig = plt.figure()
    ax = fig.add_subplot(111)

    anchor_poses_trans = ivy.to_numpy(ivy.swapaxes(anchor_poses, 0, 1))
    interpolated_poses_trans = ivy.to_numpy(
        ivy.swapaxes(interpolated_poses, 0, 1))
    colors = [[0.2, 0.2, 0.8], [0.8, 0.2, 0.2], [0.2, 0.8, 0.8],
              [0.8, 0.2, 0.8], [0.8, 0.8, 0.2]]

    if connect_anchors:
        for a_poses in anchor_poses:
            ax.plot(ivy.to_numpy(a_poses[:, 0]).tolist(),
                    ivy.to_numpy(a_poses[:, 1]).tolist(),
                    c=[0., 0., 0.],
                    linestyle='solid',
                    linewidth=3)

    for a_poses, i_poses, col in zip(anchor_poses_trans,
                                     interpolated_poses_trans, colors):

        ax.scatter(ivy.to_numpy(i_poses[..., 0]).tolist(),
                   ivy.to_numpy(i_poses[..., 1]).tolist(),
                   s=15,
                   c=[col])
        ax.scatter(ivy.to_numpy(a_poses[1:4, 0]).tolist(),
                   ivy.to_numpy(a_poses[1:4, 1]).tolist(),
                   s=80,
                   c=[[1., 1., 1.]],
                   edgecolors=[col],
                   linewidths=2)

        ax.scatter(ivy.to_numpy(a_poses[0:1, 0]).tolist(),
                   ivy.to_numpy(a_poses[0:1, 1]).tolist(),
                   s=100,
                   c=[[1.0, 0.6, 0.]])

        ax.scatter(ivy.to_numpy(a_poses[-1:, 0]).tolist(),
                   ivy.to_numpy(a_poses[-1:, 1]).tolist(),
                   s=100,
                   c=[[0.2, 0.8, 0.2]])

    ax.set_xlabel(x_label, size=15)
    ax.set_ylabel(y_label, size=15).set_rotation(0)
    ax.text(sc[0], sc[1], start_label)
    ax.text(tc[0], tc[1], target_label)
    ax.set_title(title)
    ax.axis('equal')
    plt.show()
Exemple #17
0
def main():

    # LSTM #
    # -----#

    # using the Ivy LSTM memory module, dual stacked, in a PyTorch model

    class TorchModelWithLSTM(torch.nn.Module):
        def __init__(self, channels_in, channels_out):
            torch.nn.Module.__init__(self)
            self._linear = torch.nn.Linear(channels_in, 64)
            self._lstm = ivy_mem.LSTM(64, channels_out, 2, return_state=False)
            self._assign_variables()

        def _assign_variables(self):
            self._lstm.v.map(lambda x, kc: self.register_parameter(
                name=kc, param=torch.nn.Parameter(x)))
            self._lstm.v = self._lstm.v.map(lambda x, kc: self._parameters[kc])

        def forward(self, x):
            x = self._linear(x)
            return self._lstm(x)

    # create model
    in_channels = 32
    out_channels = 8
    ivy.set_framework('torch')
    model = TorchModelWithLSTM(in_channels, out_channels)

    # define inputs
    batch_shape = [1, 2]
    timesteps = 3
    input_shape = batch_shape + [timesteps, in_channels]
    input_seq = torch.rand(batch_shape + [timesteps, in_channels])

    # call model and test output
    output_seq = model(input_seq)
    assert input_seq.shape[:-1] == output_seq.shape[:-1]
    assert input_seq.shape[-1] == in_channels
    assert output_seq.shape[-1] == out_channels

    # define loss function
    target = torch.zeros_like(output_seq)

    def loss_fn():
        pred = model(input_seq)
        return torch.sum((pred - target)**2)

    # define optimizer
    optimizer = torch.optim.SGD(model.parameters(), lr=1e-2)

    # train model
    print('\ntraining dummy PyTorch LSTM model...\n')
    for i in range(10):
        loss = loss_fn()
        loss.backward()
        optimizer.step()
        print('step {}, loss = {}'.format(i, loss))
    print('\ndummy PyTorch LSTM model trained!\n')
    ivy.unset_framework()

    # NTM #
    # ----#

    # using the Ivy NTM memory module in a TensorFlow model

    class TfModelWithNTM(tf.keras.Model):
        def __init__(self, channels_in, channels_out):
            tf.keras.Model.__init__(self)
            self._linear = tf.keras.layers.Dense(64)
            memory_size = 4
            memory_vector_dim = 1
            self._ntm = ivy_mem.NTM(input_dim=64,
                                    output_dim=channels_out,
                                    ctrl_output_size=channels_out,
                                    ctrl_layers=1,
                                    memory_size=memory_size,
                                    memory_vector_dim=memory_vector_dim,
                                    read_head_num=1,
                                    write_head_num=1)
            self._assign_variables()

        def _assign_variables(self):
            self._ntm.v.map(
                lambda x, kc: self.add_weight(name=kc, shape=x.shape))
            self.set_weights(
                [ivy.to_numpy(v) for k, v in self._ntm.v.to_iterator()])
            self.trainable_weights_dict = dict()
            for weight in self.trainable_weights:
                self.trainable_weights_dict[weight.name] = weight
            self._ntm.v = self._ntm.v.map(
                lambda x, kc: self.trainable_weights_dict[kc + ':0'])

        def call(self, x, **kwargs):
            x = self._linear(x)
            return self._ntm(x)

    # create model
    in_channels = 32
    out_channels = 8
    ivy.set_framework('tensorflow')
    model = TfModelWithNTM(in_channels, out_channels)

    # define inputs
    batch_shape = [1, 2]
    timesteps = 3
    input_shape = batch_shape + [timesteps, in_channels]
    input_seq = tf.random.uniform(batch_shape + [timesteps, in_channels])

    # call model and test output
    output_seq = model(input_seq)
    assert input_seq.shape[:-1] == output_seq.shape[:-1]
    assert input_seq.shape[-1] == in_channels
    assert output_seq.shape[-1] == out_channels

    # define loss function
    target = tf.zeros_like(output_seq)

    def loss_fn():
        pred = model(input_seq)
        return tf.reduce_sum((pred - target)**2)

    # define optimizer
    optimizer = tf.keras.optimizers.Adam(1e-2)

    # train model
    print('\ntraining dummy TensorFlow NTM model...\n')
    for i in range(10):
        with tf.GradientTape() as tape:
            loss = loss_fn()
        grads = tape.gradient(loss, model.trainable_weights)
        optimizer.apply_gradients(zip(grads, model.trainable_weights))
        print('step {}, loss = {}'.format(i, loss))
    print('\ndummy TensorFlow NTM model trained!\n')
    ivy.unset_framework()

    # ESM #
    # ----#

    # using the Ivy ESM memory module in a pure-Ivy model, with a JAX backend
    # ToDo: add pre-ESM conv layers to this demo

    class IvyModelWithESM(ivy.Module):
        def __init__(self, channels_in, channels_out):
            self._channels_in = channels_in
            self._esm = ivy_mem.ESM(omni_image_dims=(16, 32))
            self._linear = ivy_mem.Linear(channels_in, channels_out)
            ivy.Module.__init__(self, 'cpu')

        def _forward(self, obs):
            mem = self._esm(obs)
            x = ivy.reshape(mem.mean, (-1, self._channels_in))
            return self._linear(x)

    # create model
    in_channels = 32
    out_channels = 8
    ivy.set_framework('torch')
    model = IvyModelWithESM(in_channels, out_channels)

    # input config
    batch_size = 1
    image_dims = [5, 5]
    num_timesteps = 2
    num_feature_channels = 3

    # create image of pixel co-ordinates
    uniform_pixel_coords =\
        ivy_vision.create_uniform_pixel_coords_image(image_dims, [batch_size, num_timesteps])

    # define camera measurement
    depths = ivy.random_uniform(shape=[batch_size, num_timesteps] +
                                image_dims + [1])
    ds_pixel_coords = ivy_vision.depth_to_ds_pixel_coords(depths)
    inv_calib_mats = ivy.random_uniform(
        shape=[batch_size, num_timesteps, 3, 3])
    cam_coords = ivy_vision.ds_pixel_to_cam_coords(ds_pixel_coords,
                                                   inv_calib_mats)[..., 0:3]
    features = ivy.random_uniform(shape=[batch_size, num_timesteps] +
                                  image_dims + [num_feature_channels])
    img_mean = ivy.concatenate((cam_coords, features), -1)
    cam_rel_mat = ivy.identity(4, batch_shape=[batch_size,
                                               num_timesteps])[..., 0:3, :]

    # place these into an ESM camera measurement container
    esm_cam_meas = ESMCamMeasurement(img_mean=img_mean,
                                     cam_rel_mat=cam_rel_mat)

    # define agent pose transformation
    agent_rel_mat = ivy.identity(4, batch_shape=[batch_size,
                                                 num_timesteps])[..., 0:3, :]

    # collect together into an ESM observation container
    esm_obs = ESMObservation(img_meas={'camera_0': esm_cam_meas},
                             agent_rel_mat=agent_rel_mat)

    # call model and test output
    output = model(esm_obs)
    assert output.shape[-1] == out_channels

    # define loss function
    target = ivy.zeros_like(output)

    def loss_fn(v):
        pred = model(esm_obs, v=v)
        return ivy.reduce_mean((pred - target)**2)

    # optimizer
    optimizer = ivy.SGD(lr=1e-4)

    # train model
    print('\ntraining dummy Ivy ESM model...\n')
    for i in range(10):
        loss, grads = ivy.execute_with_gradients(loss_fn, model.v)
        model.v = optimizer.step(model.v, grads)
        print('step {}, loss = {}'.format(i, ivy.to_numpy(loss).item()))
    print('\ndummy Ivy ESM model trained!\n')
    ivy.unset_framework()

    # message
    print('End of Run Through Demo!')
Exemple #18
0
    def render(self, mode='human'):
        """
        Renders the environment.
        The set of supported modes varies per environment. (And some
        environments do not support rendering at all.) By convention,
        if mode is:

        - human: render to the current display or terminal and
          return nothing. Usually for human consumption.
        - rgb_array: Return an numpy.ndarray with shape (x, y, 3),
          representing RGB values for an x-by-y pixel image, suitable
          for turning into a video.
        - ansi: Return a string (str) or StringIO.StringIO containing a
          terminal-style text representation. The text can include newlines
          and ANSI escape sequences (e.g. for colors).

        :param mode: Render mode, one of [human|rgb_array], default human
        :type mode: str, optional
        :return: Rendered image.
        """
        screen_width = 500
        screen_height = 500
        x_min = -1.2
        x_max = 0.6
        world_width = x_max - x_min
        scale = screen_width / world_width
        car_width = 40
        car_height = 20

        if self.viewer is None:
            # noinspection PyBroadException
            try:
                from gym.envs.classic_control import rendering
            except:
                if not self._logged_headless_message:
                    print('Unable to connect to display. Running the Ivy environment in headless mode...')
                    self._logged_headless_message = True
                return

            self.viewer = rendering.Viewer(screen_width, screen_height)

            # Track.
            xs = ivy.linspace(x_min, x_max, 100)
            ys = self._height(xs)
            xys = list((ivy.to_numpy(xt).item(), ivy.to_numpy(yt).item())
                       for xt, yt in zip((xs - x_min) * scale, ys * scale))
            self.track = rendering.make_polyline(xys)
            self.track.set_linewidth(2)
            self.viewer.add_geom(self.track)

            # Car.
            clearance = 10
            l, r, t, b = -car_width / 2, car_width / 2, car_height, 0
            self.car_geom = rendering.FilledPolygon(
                [(l, b), (l, t), (r, t), (r, b)])
            self.car_geom.add_attr(
                rendering.Transform(translation=(0, clearance)))
            self.car_tr = rendering.Transform()
            self.car_geom.add_attr(self.car_tr)
            self.viewer.add_geom(self.car_geom)

            # Wheels.
            front_wheel = rendering.make_circle(car_height / 2.5)
            front_wheel.set_color(0.5, 0.5, 0.5)
            front_wheel.add_attr(
                rendering.Transform(translation=(car_width / 4, clearance)))
            front_wheel.add_attr(self.car_tr)
            self.viewer.add_geom(front_wheel)
            back_wheel = rendering.make_circle(car_height / 2.5)
            back_wheel.add_attr(
                rendering.Transform(translation=(-car_width / 4, clearance)))
            back_wheel.add_attr(self.car_tr)
            back_wheel.set_color(0.5, 0.5, 0.5)
            self.viewer.add_geom(back_wheel)

            # Flag.
            flag_x = (ivy.to_numpy(self.goal_x)[0] - x_min) * scale
            flagy_y1 = ivy.to_numpy(self._height(self.goal_x))[0] * scale
            flagy_y2 = flagy_y1 + 50
            flagpole = rendering.Line((flag_x, flagy_y1), (flag_x, flagy_y2))
            self.viewer.add_geom(flagpole)
            flag = rendering.FilledPolygon(
                [(flag_x, flagy_y2), (flag_x, flagy_y2 - 10),
                 (flag_x + 25, flagy_y2 - 5)])
            flag.set_color(0.4, 0.6, 1.)
            self.viewer.add_geom(flag)

        self.car_tr.set_translation(
            (ivy.to_numpy(self.x)[0] - x_min) * scale, ivy.to_numpy(self._height(self.x))[0] * scale)
        self.car_tr.set_rotation(ivy.to_numpy(ivy.cos(3 * self.x))[0])
        rew = ivy.to_numpy(self.get_reward()).item()
        self.car_geom.set_color(1 - rew, rew, 0.)

        return self.viewer.render(return_rgb_array=mode == 'rgb_array')
Exemple #19
0
    def render(self, mode='human'):
        """
        Renders the environment.
        The set of supported modes varies per environment. (And some
        environments do not support rendering at all.) By convention,
        if mode is:

        - human: render to the current display or terminal and
          return nothing. Usually for human consumption.
        - rgb_array: Return an numpy.ndarray with shape (x, y, 3),
          representing RGB values for an x-by-y pixel image, suitable
          for turning into a video.
        - ansi: Return a string (str) or StringIO.StringIO containing a
          terminal-style text representation. The text can include newlines
          and ANSI escape sequences (e.g. for colors).

        :param mode: Render mode, one of [human|rgb_array], default human
        :type mode: str, optional
        :return: Rendered image.
        """
        if self.viewer is None:
            # noinspection PyBroadException
            try:
                from gym.envs.classic_control import rendering
            except:
                if not self._logged_headless_message:
                    print(
                        'Unable to connect to display. Running the Ivy environment in headless mode...'
                    )
                    self._logged_headless_message = True
                return
            from pyglet import gl

            class _StarGeom(rendering.Geom):
                def __init__(self, r1, r2, n):
                    super().__init__()
                    self.r1 = r1
                    self.r2 = r2
                    self.n = n

                def render1(self):
                    n = self.n * 2
                    for i in range(0, n, 2):
                        gl.glBegin(gl.GL_TRIANGLES)
                        a0 = 2 * np.pi * i / n
                        a1 = 2 * np.pi * (i + 1) / n
                        a2 = 2 * np.pi * (i - 1) / n
                        gl.glVertex3f(
                            np.cos(a0) * self.r1,
                            np.sin(a0) * self.r1, 0)
                        gl.glVertex3f(
                            np.cos(a1) * self.r2,
                            np.sin(a1) * self.r2, 0)
                        gl.glVertex3f(
                            np.cos(a2) * self.r2,
                            np.sin(a2) * self.r2, 0)
                        gl.glEnd()
                    gl.glBegin(gl.GL_POLYGON)
                    for i in range(0, n, 2):
                        a = 2 * np.pi * (i + 1) / n
                        gl.glVertex3f(
                            np.cos(a) * self.r2,
                            np.sin(a) * self.r2, 0)
                    gl.glEnd()

            class _FishGeom(rendering.Geom):
                def __init__(self):
                    super().__init__()
                    self.color = 0., 0., 0.

                def render1(self):
                    points = [[0.08910714285714288, -0.009017857142857133],
                              [0.13910714285714287, -0.04026785714285712],
                              [0.12285714285714289, 0.07098214285714288],
                              [0.08535714285714285, 0.03348214285714288],
                              [0.10535714285714287, 0.07848214285714286],
                              [0.04910714285714285, 0.13348214285714285],
                              [-0.03589285714285714, 0.11723214285714287],
                              [-0.14964285714285713, 0.08598214285714287],
                              [-0.21714285714285714, 0.023482142857142868],
                              [-0.18589285714285714, -0.004017857142857129],
                              [-0.12714285714285714, -0.11151785714285713],
                              [-0.039642857142857146, -0.15651785714285713],
                              [0.044107142857142845, -0.15651785714285713],
                              [0.12035714285714288, -0.06526785714285713]]
                    gl.glColor3f(*self.color)
                    gl.glBegin(gl.GL_POLYGON)
                    for p0, p1 in points:
                        gl.glVertex3f(p0, -p1, 0)
                    gl.glEnd()
                    points = [[-0.14964285714285713, -0.016517857142857112],
                              [-0.11214285714285714, 0.020982142857142866],
                              [-0.15839285714285714, 0.06973214285714288],
                              [-0.17089285714285712, 0.013482142857142887]]
                    gl.glColor3f(0.5, 0.4, 0.3)
                    gl.glBegin(gl.GL_POLYGON)
                    for p0, p1 in points:
                        gl.glVertex3f(p0, -p1, 0)
                    gl.glEnd()
                    points = []
                    for i in range(20):
                        ang = 2 * np.pi * i / 20
                        points.append((np.cos(ang) * 0.018 - 0.16,
                                       np.sin(ang) * 0.018 - 0.01))
                    gl.glColor3f(0, 0, 0)
                    gl.glBegin(gl.GL_POLYGON)
                    for p0, p1 in points:
                        gl.glVertex3f(p0, p1, 0)
                    gl.glEnd()

                def set_color(self, r, g, b):
                    self.color = r, g, b

            self.viewer = rendering.Viewer(500, 500)
            self.viewer.set_bounds(-1.5, 1.5, -1.5, 1.5)

            # Goal.
            goal_geom = rendering.make_circle(.2)
            self.goal_tr = rendering.Transform()
            goal_geom.add_attr(self.goal_tr)
            goal_geom.set_color(0.4, 0.6, 1.)
            self.viewer.add_geom(goal_geom)

            # Urchins.
            self.urchin_trs = []
            for _ in range(self.num_urchins):
                urchin_geom = _StarGeom(0.2, 0.05, 15)
                urchin_tr = rendering.Transform()
                self.urchin_trs.append(urchin_tr)
                urchin_geom.add_attr(urchin_tr)
                urchin_geom.set_color(0., 0., 0.)
                self.viewer.add_geom(urchin_geom)

            # Fish.
            self.fish_geom = _FishGeom()
            self.fish_tr = rendering.Transform()
            self.fish_geom.add_attr(self.fish_tr)
            self.viewer.add_geom(self.fish_geom)

        self.goal_tr.set_translation(*ivy.to_numpy(self.goal_xy).tolist())
        for urchin_tr, (x, y) in zip(self.urchin_trs,
                                     ivy.reshape(self.urchin_xys, (5, 2, 1))):
            urchin_tr.set_translation(ivy.to_numpy(x)[0], ivy.to_numpy(y)[0])
        self.fish_tr.set_translation(
            *ivy.to_numpy(ivy.reshape(self.xy, (2, ))).tolist())
        rew = ivy.to_numpy(self.get_reward())[0]
        self.fish_geom.set_color(1 - rew, rew, 0.)

        return self.viewer.render(return_rgb_array=mode == 'rgb_array')
Exemple #20
0
def test_conv3d_transpose(x_n_filters_n_pad_n_outshp_n_res, dtype_str, tensor_fn, dev_str, call):
    if call in [helpers.tf_call, helpers.tf_graph_call] and 'cpu' in dev_str:
        # tf conv3d transpose does not work when CUDA is installed, but array is on CPU
        pytest.skip()
    # smoke test
    if call in [helpers.np_call, helpers.jnp_call, helpers.mx_call]:
        # numpy and jax do not yet support 3d transpose convolutions, and mxnet only supports with CUDNN
        pytest.skip()
    if call in [helpers.mx_call] and 'cpu' in dev_str:
        # mxnet only supports 3d transpose convolutions with CUDNN
        pytest.skip()
    x, filters, padding, output_shape, true_res = x_n_filters_n_pad_n_outshp_n_res
    x = tensor_fn(x, dtype_str, dev_str)
    filters = tensor_fn(filters, dtype_str, dev_str)
    true_res = tensor_fn(true_res, dtype_str, dev_str)
    ret = ivy.conv3d_transpose(x, filters, 1, padding, output_shape)
    # type test
    assert ivy.is_array(ret)
    # cardinality test
    assert ret.shape == true_res.shape
    # value test
    assert np.allclose(call(ivy.conv3d_transpose, x, filters, 1, padding, output_shape), ivy.to_numpy(true_res))
    # compilation test
    helpers.assert_compilable(ivy.conv3d_transpose)
Exemple #21
0
    def show_voxel_grid(self, voxels, interactive, cuboid_inv_ext_mats=None, cuboid_dims=None):

        if not interactive:
            return

        cuboid_inv_ext_mats = list() if cuboid_inv_ext_mats is None else cuboid_inv_ext_mats
        cuboid_dims = list() if cuboid_dims is None else cuboid_dims

        voxel_grid_data = ivy.to_numpy(voxels[0])
        res = ivy.to_numpy(voxels[2])
        bb_mins = ivy.to_numpy(voxels[3])
        rgb_grid = voxel_grid_data[..., 3:6]
        occupancy_grid = voxel_grid_data[..., -1:]

        boxes = list()
        for x, (x_slice, x_col_slice) in enumerate(zip(occupancy_grid, rgb_grid)):
            for y, (y_slice, y_col_slice) in enumerate(zip(x_slice, x_col_slice)):
                for z, (z_slice, z_col_slice) in enumerate(zip(y_slice, y_col_slice)):
                    if z_slice[0] > 0:
                        box = o3d.geometry.TriangleMesh.create_box(res[0], res[1], res[2])
                        box.vertex_colors = o3d.utility.Vector3dVector(np.ones((8, 3)) * z_col_slice)
                        xtrue = bb_mins[0] + res[0]*x
                        ytrue = bb_mins[1] + res[1]*y
                        ztrue = bb_mins[2] + res[2]*z
                        box.translate(np.array([xtrue, ytrue, ztrue]) - res/2)
                        boxes.append(box)

        all_vertices = np.concatenate([np.asarray(box.vertices) for box in boxes], 0)
        all_vertex_colors = np.concatenate([np.asarray(box.vertex_colors) for box in boxes], 0)
        all_triangles = np.concatenate([np.asarray(box.triangles) + i*8 for i, box in enumerate(boxes)], 0)
        final_mesh = o3d.geometry.TriangleMesh(o3d.utility.Vector3dVector(all_vertices),
                                               o3d.utility.Vector3iVector(all_triangles))
        final_mesh.vertex_colors = o3d.utility.Vector3dVector(all_vertex_colors)

        # add to visualizer
        self._vis.clear_geometries()
        self._vis.add_geometry(o3d.geometry.TriangleMesh.create_coordinate_frame(0.15, [0., 0., 0.]), self._first_pass)
        self._vis.add_geometry(final_mesh, self._first_pass)

        # cuboids
        self._cuboids = list()
        for cuboid_inv_ext_mat, cuboid_dim in zip(cuboid_inv_ext_mats, cuboid_dims):
            cuboid = o3d.geometry.TriangleMesh.create_box(cuboid_dim[0], cuboid_dim[1], cuboid_dim[2])
            cuboid.translate(-cuboid_dim/2)
            cuboid.paint_uniform_color(np.array([[0.], [0.], [0.]]))
            cuboid.transform(cuboid_inv_ext_mat)
            self._cuboids.append(cuboid)
            self._vis.add_geometry(cuboid, self._first_pass)

        # camera matrix
        if not self._cam_pose_initialized:
            cam_params = o3d.camera.PinholeCameraParameters()
            cam_params.extrinsic = self._cam_ext_mat
            cam_params.intrinsic = self._ctr.convert_to_pinhole_camera_parameters().intrinsic
            self._ctr.convert_from_pinhole_camera_parameters(cam_params)
            self._cam_pose_initialized = True

        # update flag
        self._first_pass = False

        # spin visualizer until key-pressed
        self._listen_for_enter_in_thread()
        while not self._pressend_enter:
            self._vis.poll_events()
        self._join_enter_listener_thread()
Exemple #22
0
 def set_rot_mat(self, rot_mat):
     inv_ext_mat = np.concatenate((ivy.to_numpy(rot_mat),
                                   np.reshape(self._pr_obj.get_position(), (3, 1))), -1)
     self._pr_obj.set_matrix(inv_ext_mat.reshape((-1,)).tolist())
Exemple #23
0
    def test_single_wrapped(self, dev_str, f, call, num_processes):

        if call is helpers.mx_call and num_processes == 2:
            pytest.skip()

        self._init(num_processes)

        assert np.allclose(ivy.to_numpy(self._dataset[3].x),
                           ivy.to_numpy(ivy.array([9, 0, 1])))
        assert np.allclose(ivy.to_numpy(self._dataset[4].x),
                           ivy.to_numpy(ivy.array([2, 3, 4])))
        assert np.allclose(ivy.to_numpy(self._dataset[5].x),
                           ivy.to_numpy(ivy.array([5, 6, 7])))
        assert np.allclose(ivy.to_numpy(self._dataset[6].x),
                           ivy.to_numpy(ivy.array([8, 9, 0])))
        assert np.allclose(ivy.to_numpy(self._dataset[7].x),
                           ivy.to_numpy(ivy.array([1, 2, 3])))
        assert np.allclose(ivy.to_numpy(self._dataset[8].x),
                           ivy.to_numpy(ivy.array([4, 5, 6])))
        assert np.allclose(ivy.to_numpy(self._dataset[9].x),
                           ivy.to_numpy(ivy.array([7, 8, 9])))

        assert np.allclose(ivy.to_numpy(self._dataset[-1].x),
                           ivy.to_numpy(ivy.array([7, 8, 9])))
        assert np.allclose(ivy.to_numpy(self._dataset[-2].x),
                           ivy.to_numpy(ivy.array([4, 5, 6])))
        assert np.allclose(ivy.to_numpy(self._dataset[-3].x),
                           ivy.to_numpy(ivy.array([1, 2, 3])))
        assert np.allclose(ivy.to_numpy(self._dataset[-4].x),
                           ivy.to_numpy(ivy.array([8, 9, 0])))
        assert np.allclose(ivy.to_numpy(self._dataset[-5].x),
                           ivy.to_numpy(ivy.array([5, 6, 7])))

        # close
        self._dataset.close()
        del self._dataset
    def __init__(self, interactive, try_use_sim):
        super().__init__(interactive, try_use_sim)

        # initialize scene
        if self.with_pyrep:
            self._spherical_vision_sensor.remove()
            for i in range(6):
                self._vision_sensors[i].remove()
                self._vision_sensor_bodies[i].remove()
                [ray.remove() for ray in self._vision_sensor_rays[i]]
            self._box.set_position(np.array([0.55, 0, 0.9]))
            self._robot.set_position(np.array([0.85003, -0.024983, 0.77837]))
            self._robot._ik_target.set_position(np.array([0, 0, -1]))
            self._robot.get_tip().set_parent(self._robot._ik_target)
            self._robot.get_tip().set_position(np.array([0, 0, -1]))
            robot_start_config = ivy.array(
                [100., 100., 240., 180., 180., 120.]) * np.pi / 180
            [
                j.set_joint_position(p, False)
                for j, p in zip(self._robot.joints,
                                ivy.to_numpy(robot_start_config).tolist())
            ]
            robot_target_config = ivy.array([260., 100., 220., 0., 180., 45.
                                             ]) * np.pi / 180
            self._robot_target.set_position(
                np.array([0.85003, -0.024983, 0.77837]))
            self._robot_target._ik_target.set_position(np.array([0, 0, -1]))
            self._robot_target.get_tip().set_parent(
                self._robot_target._ik_target)
            self._robot_target.get_tip().set_position(np.array([0, 0, -1]))
            [
                j.set_joint_position(p, False)
                for j, p in zip(self._robot_target.joints,
                                ivy.to_numpy(robot_target_config).tolist())
            ]
            self._default_camera.set_position(
                np.array([0.094016, -1.2767, 1.7308]))
            self._default_camera.set_orientation(
                np.array([i * np.pi / 180
                          for i in [-121.32, 27.760, -164.18]]))

            input(
                '\nScene initialized.\n\n'
                'The simulator visualizer can be translated and rotated by clicking either the left mouse button or the wheel, '
                'and then dragging the mouse.\n'
                'Scrolling the mouse wheel zooms the view in and out.\n\n'
                'You can click on any object either in the scene or the left hand panel, '
                'then select the box icon with four arrows in the top panel of the simulator, '
                'and then drag the object around dynamically.\n'
                'Starting to drag and then holding ctrl allows you to also drag the object up and down.\n'
                'Clicking the top icon with a box and two rotating arrows similarly allows rotation of the object.\n\n'
                'The joint angles of either the robot or target robot configuration can also be changed.\n'
                'To do this, Open the Mico or MicoTarget drop-downs on the left, and click on one of the joints "Mico_jointx", '
                'and then click on the magnifying glass over a box on the left-most panel.\n'
                'In the window that opens, change the value in the field Position [deg], and close the window again.\n\n'
                'Once you have aranged the scene as desired, press enter in the terminal to continue with the demo...\n'
            )

            # primitive scene
            self.setup_primitive_scene()

            # robot configs
            robot_start_config = ivy.array(self._robot.get_joint_positions(),
                                           'float32')
            robot_target_config = ivy.array(
                self._robot_target.get_joint_positions(), 'float32')

            # ivy robot
            self._ivy_manipulator = MicoManipulator(
                ivy_mech.make_transformation_homogeneous(
                    ivy.reshape(ivy.array(self._robot_base.get_matrix()),
                                (3, 4))))

            # spline path
            interpolated_joint_path = ivy.transpose(
                ivy.linspace(robot_start_config, robot_target_config, 100),
                (1, 0))
            multi_spline_points = ivy.transpose(
                self._ivy_manipulator.sample_links(interpolated_joint_path),
                (1, 0, 2))
            multi_spline_sdf_vals = ivy.reshape(
                self.sdf(ivy.reshape(multi_spline_points, (-1, 3))),
                (-1, 100, 1))
            self.update_path_visualization(multi_spline_points,
                                           multi_spline_sdf_vals, None)

            # public objects
            self.ivy_manipulator = self._ivy_manipulator
            self.robot_start_config = robot_start_config
            self.robot_target_config = robot_target_config

            # wait for user input
            self._user_prompt(
                '\nInitialized scene with a robot and a target robot configuration to reach.'
                '\nPress enter in the terminal to use method ivy_robot.interpolate_spline_points '
                'to plan a spline path which reaches the target configuration whilst avoiding the objects in the scene...\n'
            )

        else:

            # primitive scene
            self.setup_primitive_scene_no_sim(box_pos=np.array([0.55, 0, 0.9]))

            # ivy robot
            base_inv_ext_mat = ivy.array([[1, 0, 0, 0.84999895],
                                          [0, 1, 0, -0.02500308],
                                          [0, 0, 1, 0.70000124]])
            self.ivy_manipulator = MicoManipulator(
                ivy_mech.make_transformation_homogeneous(base_inv_ext_mat))
            self.robot_start_config = ivy.array(
                [100., 100., 240., 180., 180., 120.]) * np.pi / 180
            self.robot_target_config = ivy.array(
                [260., 100., 220., 0., 180., 45.]) * np.pi / 180

            # message
            print(
                '\nInitialized dummy scene with a robot and a target robot configuration to reach.'
                '\nClose the visualization window to use method ivy_robot.interpolate_spline_points '
                'to plan a spline path which reaches the target configuration whilst avoiding the objects in the scene...\n'
            )

            # plot scene before rotation
            if interactive:
                plt.imshow(
                    mpimg.imread(
                        os.path.join(
                            os.path.dirname(os.path.realpath(__file__)),
                            'msp_no_sim', 'path_0.png')))
                plt.show()

        # message
        print('\nOptimizing spline path...\n')
Exemple #25
0
    def test_single(self, dev_str, f, call, array_shape, num_processes):

        if call is helpers.mx_call and num_processes == 2:
            pytest.skip()

        ivy.seed(0)
        np.random.seed(0)
        self._init(array_shape, num_processes)

        assert list(self._dataset[0].x.shape) == array_shape
        assert list(self._dataset[4].x.shape) == array_shape
        assert list(self._dataset[8].x.shape) == array_shape

        check0 = not np.allclose(ivy.to_numpy(self._dataset[0].x),
                                 ivy.to_numpy(self._x[0]))
        check1 = not np.allclose(ivy.to_numpy(self._dataset[1].x),
                                 ivy.to_numpy(self._x[1]))
        check2 = not np.allclose(ivy.to_numpy(self._dataset[2].x),
                                 ivy.to_numpy(self._x[2]))
        check3 = not np.allclose(ivy.to_numpy(self._dataset[3].x),
                                 ivy.to_numpy(self._x[3]))
        check4 = not np.allclose(ivy.to_numpy(self._dataset[4].x),
                                 ivy.to_numpy(self._x[4]))
        check5 = not np.allclose(ivy.to_numpy(self._dataset[5].x),
                                 ivy.to_numpy(self._x[5]))
        check6 = not np.allclose(ivy.to_numpy(self._dataset[6].x),
                                 ivy.to_numpy(self._x[6]))
        check7 = not np.allclose(ivy.to_numpy(self._dataset[7].x),
                                 ivy.to_numpy(self._x[7]))
        check8 = not np.allclose(ivy.to_numpy(self._dataset[8].x),
                                 ivy.to_numpy(self._x[8]))

        assert check0 or check1 or check2 or check3 or check4 or check5 or check6 or check7 or check8

        # close
        self._dataset.close()
        del self._dataset
Exemple #26
0
 def _write_scalar_summaries(self, data_loader, network, training_batch,
                             global_step):
     logging.info('step ' + str(self._global_step) + ': cost = ' +
                  str(ivy.to_numpy(self._total_cost)))
Exemple #27
0
    def render(self, mode='human'):
        """
        Renders the environment.
        The set of supported modes varies per environment. (And some
        environments do not support rendering at all.) By convention,
        if mode is:

        - human: render to the current display or terminal and
          return nothing. Usually for human consumption.
        - rgb_array: Return an numpy.ndarray with shape (x, y, 3),
          representing RGB values for an x-by-y pixel image, suitable
          for turning into a video.
        - ansi: Return a string (str) or StringIO.StringIO containing a
          terminal-style text representation. The text can include newlines
          and ANSI escape sequences (e.g. for colors).

        :param mode: Render mode, one of [human|rgb_array], default human
        :type mode: str, optional
        :return: Rendered image.
        """
        screen_width = 500
        screen_height = 500
        world_width = 4
        scale = screen_width / world_width
        pole_width = 10.0
        pole_len = scale * (2 * self.pole_length)
        cart_width = 50.0
        cart_height = 30.0
        cart_y = screen_height / 2

        if self.viewer is None:
            # noinspection PyBroadException
            try:
                from gym.envs.classic_control import rendering
            except:
                if not self._logged_headless_message:
                    print(
                        'Unable to connect to display. Running the Ivy environment in headless mode...'
                    )
                    self._logged_headless_message = True
                return

            self.viewer = rendering.Viewer(screen_width, screen_height)

            # Track.
            track = rendering.Line((0., cart_y), (screen_width, cart_y))
            track.set_color(0., 0., 0.)
            self.viewer.add_geom(track)

            # Cart.
            l = -cart_width / 2
            r = cart_width / 2
            t = cart_height / 2
            b = -cart_height / 2
            cart_geom = rendering.FilledPolygon([(l, b), (l, t), (r, t),
                                                 (r, b)])
            self.cart_tr = rendering.Transform()
            cart_geom.add_attr(self.cart_tr)
            cart_geom.set_color(0., 0., 0.)
            self.viewer.add_geom(cart_geom)

            # Pole.
            l = -pole_width / 2
            r = pole_width / 2
            t = pole_len - pole_width / 2
            b = -pole_width / 2
            self.pole_geom = rendering.FilledPolygon([(l, b), (l, t), (r, t),
                                                      (r, b)])
            self.pole_tr = rendering.Transform(translation=(0, 0))
            self.pole_geom.add_attr(self.pole_tr)
            self.pole_geom.add_attr(self.cart_tr)
            self.viewer.add_geom(self.pole_geom)

            # Axle.
            axle_geom = rendering.make_circle(pole_width / 2)
            axle_geom.add_attr(self.pole_tr)
            axle_geom.add_attr(self.cart_tr)
            axle_geom.set_color(0.5, 0.5, 0.5)
            self.viewer.add_geom(axle_geom)

        cart_x = ivy.to_numpy(self.x * scale + screen_width / 2.0)[0]
        self.cart_tr.set_translation(cart_x, cart_y)
        self.pole_tr.set_rotation(-ivy.to_numpy(self.angle)[0])
        rew = ivy.to_numpy(self.get_reward())[0]
        self.pole_geom.set_color(1 - rew, rew, 0.)

        return self.viewer.render(return_rgb_array=mode == 'rgb_array')