def test_container_to_disk_shuffle_and_from_disk(): for lib, call in helpers.calls: if call in [helpers.tf_graph_call, helpers.mx_graph_call]: # container disk saving requires eager execution continue save_filepath = 'container_on_disk.hdf5' dict_in = {'a': ivy.array([1, 2, 3], f=lib), 'b': {'c': ivy.array([1, 2, 3], f=lib), 'd': ivy.array([1, 2, 3], f=lib)}} container = Container(dict_in) # saving container.to_disk(save_filepath, max_batch_size=3) assert os.path.exists(save_filepath) # shuffling Container.shuffle_h5_file(save_filepath) # loading container_shuffled = Container.from_disk(save_filepath, lib, slice(3)) # testing data = np.array([1, 2, 3]) random.seed(0) random.shuffle(data) assert (ivy.to_numpy(container_shuffled['a'], lib) == data).all() assert (ivy.to_numpy(container_shuffled.a, lib) == data).all() assert (ivy.to_numpy(container_shuffled['b']['c'], lib) == data).all() assert (ivy.to_numpy(container_shuffled.b.c, lib) == data).all() assert (ivy.to_numpy(container_shuffled['b']['d'], lib) == data).all() assert (ivy.to_numpy(container_shuffled.b.d, lib) == data).all() os.remove(save_filepath)
def _get_containers_w_filepath_img_entries_as_tensor_slices( self, container_filepaths): # noinspection PyUnusedLocal all_containers = list() logging.info('loading containers into RAM...') num_seqs = len(container_filepaths) max_seq_len = max(max(len(item) for item in container_filepaths), self._window_size) for seq_idx, seq in enumerate(container_filepaths): if seq_idx % 10000 == 0: logging.info('sequence {} of {}'.format(seq_idx, num_seqs)) window_containers = list() container = None seq_len = 0 for seq_len, filepath in enumerate(seq): if filepath == '': seq_len -= 1 break with open(filepath) as fp: container_dict = json.load(fp) container = Container(container_dict).map(self._to_tensor) window_containers.append(container) window_containers += [container] * ( max_seq_len - seq_len - 1) # padding for shorter sequences joined_window_containers = Container.concat(window_containers, 1) all_containers.append(joined_window_containers) return Container.concat(all_containers, 0)
def _parse_json_strings(self, containers): json_strings_stack = containers.json_str highest_idx_entry = len( [item for item in containers.json_str if item != '']) - 1 json_container_stack = [ Container(json.loads(json_str)).map(self._to_tensor)[0] if json_str != '' else Container( json.loads(json_strings_stack[highest_idx_entry])).map( self._to_tensor)[0] for json_str in json_strings_stack ] return Container.concat(json_container_stack, 0)
def test_gradient_descent_update(): for lib, call in helpers.calls: if call is helpers.mx_graph_call: # mxnet symbolic does not support ivy gradient functions continue ws = Container({'w': ivy_grad.variable(ivy_gen.array([3.], f=lib))}) dcdws = Container({'w': ivy_gen.array([6.], f=lib)}) w_new = ivy_gen.array(ivy_grad.gradient_descent_update(ws, dcdws, 0.1, f=lib)['w'], f=lib) assert np.allclose(ivy_gen.to_numpy(w_new), np.array([2.4]))
def main(interactive=True, try_use_sim=True, f=None): # config this_dir = os.path.dirname(os.path.realpath(__file__)) f = choose_random_framework(excluded=['numpy']) if f is None else f set_framework(f) sim = Simulator(interactive, try_use_sim) lr = 0.5 num_anchors = 3 num_sample_points = 100 # spline start anchor_points = ivy.cast( ivy.expand_dims(ivy.linspace(0, 1, 2 + num_anchors), -1), 'float32') query_points = ivy.cast( ivy.expand_dims(ivy.linspace(0, 1, num_sample_points), -1), 'float32') # learnable parameters robot_start_config = ivy.array(ivy.cast(sim.robot_start_config, 'float32')) robot_target_config = ivy.array( ivy.cast(sim.robot_target_config, 'float32')) learnable_anchor_vals = ivy.variable( ivy.cast( ivy.transpose( ivy.linspace(robot_start_config, robot_target_config, 2 + num_anchors)[..., 1:-1], (1, 0)), 'float32')) # optimizer optimizer = ivy.SGD(lr=lr) # optimize it = 0 colliding = True clearance = 0 joint_query_vals = None while colliding: total_cost, grads, joint_query_vals, link_positions, sdf_vals = ivy.execute_with_gradients( lambda xs: compute_cost_and_sdfs(xs[ 'w'], anchor_points, robot_start_config, robot_target_config, query_points, sim), Container({'w': learnable_anchor_vals})) colliding = ivy.reduce_min(sdf_vals[2:]) < clearance sim.update_path_visualization( link_positions, sdf_vals, os.path.join(this_dir, 'msp_no_sim', 'path_{}.png'.format(it))) learnable_anchor_vals = optimizer.step( Container({'w': learnable_anchor_vals}), grads)['w'] it += 1 sim.execute_motion(joint_query_vals) sim.close() unset_framework()
def __init__(self, dev_str, v=None): """ Initialze Ivy layer, which is a stateful object consisting of trainable variables. :param dev_str: device on which to create the layer's variables 'cuda:0', 'cuda:1', 'cpu' etc. :type dev_str: str :param v: Ivy container of trainable variables. Created internally by default. :type v: ivy container, optional """ self._dev_str = dev_str if v is None: self.v = Container(self._find_and_create_variables()) else: self.v = Container(v)
def _convert_tuner_spec(spec, key_chain=''): new_spec = Container() for i, (key, val) in enumerate(spec.items()): key_chain = (key_chain + '/' + key) if key_chain != '' else key spec_key = [sk for sk in SPEC_KEYS if sk in key_chain] if not spec_key: new_spec[key] = val continue if not _is_leaf(val): if not isinstance(val, Container): new_spec[key] = val else: new_spec[key] = _convert_tuner_spec(val, key_chain) continue if _is_numeric_leaf(val): new_spec[key] = _convert_numeric_leaf(val) elif _is_config_leaf(val): keys = key.split('_AND_') if len(keys) == 1: new_spec[keys[0]] = _convert_config_leaf(val) else: new_spec[key] = _convert_multi_config_leaf(keys, val) else: raise Exception('invalid leaf') return new_spec
def test_container_prune_empty(): for lib, call in helpers.calls: if call is helpers.mx_graph_call: fn = func else: fn = lambda x: x dict_in = { 'a': ivy.array([1], f=lib), 'b': { 'c': {}, 'd': ivy.array([3], f=lib) } } container = Container(dict_in) container_pruned = container.prune_empty() assert (fn(container_pruned['a']) == fn(ivy.array([[1]], f=lib)))[0, 0] assert (fn(container_pruned.a) == fn(ivy.array([[1]], f=lib)))[0, 0] assert (fn(container_pruned['b']['d']) == fn(ivy.array([[3]], f=lib)))[0, 0] assert (fn(container_pruned.b.d) == fn(ivy.array([[3]], f=lib)))[0, 0] assert ('c' not in container_pruned['b'].keys()) def _test_exception(container_in): try: _ = container_in.b.c return False except AttributeError: return True assert _test_exception(container_pruned)
def test_container_shuffle(): for lib, call in helpers.calls: if call is helpers.tf_graph_call: # tf.random.set_seed is not compiled. The shuffle is then not aligned between container items. continue if call is helpers.mx_graph_call: fn = func else: fn = lambda x: x dict_in = { 'a': ivy.array([1, 2, 3], f=lib), 'b': { 'c': ivy.array([1, 2, 3], f=lib), 'd': ivy.array([1, 2, 3], f=lib) } } container = Container(dict_in) container_shuffled = container.shuffle(0) data = ivy.array([1, 2, 3], f=lib) ivy.core.random.seed(f=lib) shuffled_data = ivy.core.random.shuffle(data) assert np.array(fn(container_shuffled['a']) == fn(shuffled_data)).all() assert np.array(fn(container_shuffled.a) == fn(shuffled_data)).all() assert np.array( fn(container_shuffled['b']['c']) == fn(shuffled_data)).all() assert np.array(fn(container_shuffled.b.c) == fn(shuffled_data)).all() assert np.array( fn(container_shuffled['b']['d']) == fn(shuffled_data)).all() assert np.array(fn(container_shuffled.b.d) == fn(shuffled_data)).all()
def test_container_expand_dims(): for lib, call in helpers.calls: if call is helpers.mx_graph_call: fn = func else: fn = lambda x: x dict_in = { 'a': ivy.array([1], f=lib), 'b': { 'c': ivy.array([2], f=lib), 'd': ivy.array([3], f=lib) } } container = Container(dict_in) container_expanded_dims = container.expand_dims(0) assert (fn(container_expanded_dims['a']) == fn(ivy.array([[1]], f=lib)))[0, 0] assert (fn(container_expanded_dims.a) == fn(ivy.array([[1]], f=lib)))[0, 0] assert (fn(container_expanded_dims['b']['c']) == fn( ivy.array([[2]], f=lib)))[0, 0] assert (fn(container_expanded_dims.b.c) == fn(ivy.array([[2]], f=lib)))[0, 0] assert (fn(container_expanded_dims['b']['d']) == fn( ivy.array([[3]], f=lib)))[0, 0] assert (fn(container_expanded_dims.b.d) == fn(ivy.array([[3]], f=lib)))[0, 0]
def test_linear_layer(bs_ic_oc_target, with_v, dtype_str, tensor_fn, dev_str, call): # smoke test batch_shape, input_channels, output_channels, target = bs_ic_oc_target x = ivy.cast( ivy.linspace(ivy.zeros(batch_shape), ivy.ones(batch_shape), input_channels), 'float32') if with_v: np.random.seed(0) wlim = (6 / (output_channels + input_channels))**0.5 w = ivy.variable( ivy.array( np.random.uniform(-wlim, wlim, (output_channels, input_channels)), 'float32')) b = ivy.variable(ivy.zeros([output_channels])) v = Container({'w': w, 'b': b}) else: v = None linear_layer = ivy.Linear(input_channels, output_channels, v=v) ret = linear_layer(x) # type test assert ivy.is_array(ret) # cardinality test assert ret.shape == tuple(batch_shape + [output_channels]) # value test if not with_v: return assert np.allclose(call(linear_layer, x), np.array(target)) # compilation test if call is helpers.torch_call: # pytest scripting does not **kwargs return helpers.assert_compilable(linear_layer)
def __init__(self, img_meas: Dict[str, ESMCamMeasurement], agent_rel_mat: ivy.Array, control_mean: ivy.Array = None, control_cov: ivy.Array = None): """ Create esm observation container :param img_meas: dict of ESMImageMeasurement objects, with keys for camera names. :type: img_meas: Ivy container :param agent_rel_mat: The pose of the agent relative to the previous pose, in matrix form *[batch_size, timesteps, 3, 4]*. :type agent_rel_mat: array :param control_mean: The pose of the agent relative to the previous pose, in rotation vector pose form. Inferred from agent_rel_mat if None. *[batch_size, timesteps, 6]* :type control_mean: array, optional :param control_cov: The convariance of the agent relative pose, in rotation vector form. Assumed all zero if None. *[batch_size, timesteps, 6, 6]*. :type control_cov: array, optional """ self['img_meas'] = Container(img_meas) agent_rel_mat = _pad_to_batch_n_time_dims(agent_rel_mat, 4) self['agent_rel_mat'] = agent_rel_mat if control_mean is None: control_mean = ivy_mech.mat_pose_to_rot_vec_pose(agent_rel_mat) else: control_mean = _pad_to_batch_n_time_dims(control_mean, 3) self['control_mean'] = control_mean if control_cov is None: control_cov = ivy.tile(ivy.expand_dims(ivy.zeros_like(control_mean), -1), (1, 1, 1, 6)) else: control_cov = _pad_to_batch_n_time_dims(control_cov, 4) self['control_cov'] = control_cov
def test_lstm_layer(b_t_ic_hc_otf_sctv, with_v, with_initial_state, dtype_str, tensor_fn, dev_str, call): # smoke test b, t, input_channels, hidden_channels, output_true_flat, state_c_true_val = b_t_ic_hc_otf_sctv x = ivy.cast( ivy.linspace(ivy.zeros([b, t]), ivy.ones([b, t]), input_channels), 'float32') if with_initial_state: init_h = ivy.ones([b, hidden_channels]) init_c = ivy.ones([b, hidden_channels]) initial_state = ([init_h], [init_c]) else: initial_state = None if with_v: kernel = ivy.variable( ivy.ones([input_channels, 4 * hidden_channels]) * 0.5) recurrent_kernel = ivy.variable( ivy.ones([hidden_channels, 4 * hidden_channels]) * 0.5) v = Container({ 'input': { 'layer_0': { 'w': kernel } }, 'recurrent': { 'layer_0': { 'w': recurrent_kernel } } }) else: v = None lstm_layer = ivy.LSTM(input_channels, hidden_channels, v=v) output, (state_h, state_c) = lstm_layer(x, initial_state=initial_state) # type test assert ivy.is_array(output) assert ivy.is_array(state_h[0]) assert ivy.is_array(state_c[0]) # cardinality test assert output.shape == (b, t, hidden_channels) assert state_h[0].shape == (b, hidden_channels) assert state_c[0].shape == (b, hidden_channels) # value test if not with_v or not with_initial_state: return output_true = np.tile( np.asarray(output_true_flat).reshape((b, t, 1)), (1, 1, hidden_channels)) state_c_true = np.ones([b, hidden_channels]) * state_c_true_val output, (state_h, state_c) = call(lstm_layer, x, initial_state=initial_state) assert np.allclose(output, output_true, atol=1e-6) assert np.allclose(state_c, state_c_true, atol=1e-6) # compilation test if call in [helpers.torch_call]: # this is not a backend implemented function pytest.skip() helpers.assert_compilable(ivy.lstm_update)
def test_execute_with_gradients(): for lib, call in helpers.calls: if call is helpers.mx_graph_call: # mxnet symbolic does not support ivy gradient functions continue # func with single return val func = lambda xs_in: (xs_in['w'] * xs_in['w'])[0] xs = Container({'w': ivy_grad.variable(ivy_gen.array([3.], f=lib))}) y, dydxs = call(ivy_grad.execute_with_gradients, func, xs, f=lib) assert np.allclose(y, np.array(9.)) if call is helpers.np_call: # numpy doesn't support autodiff assert dydxs is None else: assert np.allclose(lib.to_numpy(dydxs['w']), np.array([6.])) # func with multi return vals func = lambda xs_in: ((xs_in['w'] * xs_in['w'])[0], xs_in['w'] * 1.5) xs = Container({'w': ivy_grad.variable(ivy_gen.array([3.], f=lib))}) y, dydxs, extra_out = call(ivy_grad.execute_with_gradients, func, xs, f=lib) assert np.allclose(y, np.array(9.)) assert np.allclose(extra_out, np.array([4.5])) if call is helpers.np_call: # numpy doesn't support autodiff assert dydxs is None else: assert np.allclose(lib.to_numpy(dydxs['w']), np.array([6.])) # func with multi weights vals func = lambda xs_in: (xs_in['w1'] * xs_in['w2'])[0] xs = Container({ 'w1': ivy_grad.variable(ivy_gen.array([3.], f=lib)), 'w2': ivy_grad.variable(ivy_gen.array([5.], f=lib)) }) y, dydxs = call(ivy_grad.execute_with_gradients, func, xs, f=lib) assert np.allclose(y, np.array(15.)) if call is helpers.np_call: # numpy doesn't support autodiff assert dydxs is None else: assert np.allclose(lib.to_numpy(dydxs['w1']), np.array([5.])) assert np.allclose(lib.to_numpy(dydxs['w2']), np.array([3.]))
def test_sgd_optimizer(bs_ic_oc_target, with_v, dtype_str, tensor_fn, dev_str, call): # smoke test if call is helpers.np_call: # NumPy does not support gradients pytest.skip() batch_shape, input_channels, output_channels, target = bs_ic_oc_target x = ivy.cast( ivy.linspace(ivy.zeros(batch_shape), ivy.ones(batch_shape), input_channels), 'float32') if with_v: np.random.seed(0) wlim = (6 / (output_channels + input_channels))**0.5 w = ivy.variable( ivy.array( np.random.uniform(-wlim, wlim, (output_channels, input_channels)), 'float32')) b = ivy.variable(ivy.zeros([output_channels])) v = Container({'w': w, 'b': b}) else: v = None linear_layer = ivy.Linear(input_channels, output_channels, v=v) def loss_fn(v_): out = linear_layer(x, v=v_) return ivy.reduce_mean(out)[0] # optimizer optimizer = ivy.SGD() # train loss_tm1 = 1e12 loss = None grads = None for i in range(10): loss, grads = ivy.execute_with_gradients(loss_fn, linear_layer.v) linear_layer.v = optimizer.step(linear_layer.v, grads) assert loss < loss_tm1 loss_tm1 = loss # type test assert ivy.is_array(loss) assert isinstance(grads, ivy.Container) # cardinality test if call is helpers.mx_call: # mxnet slicing cannot reduce dimension to zero assert loss.shape == (1, ) else: assert loss.shape == () # value test assert ivy.reduce_max(ivy.abs(grads.b)) > 0 assert ivy.reduce_max(ivy.abs(grads.w)) > 0 # compilation test if call is helpers.torch_call: # pytest scripting does not **kwargs return helpers.assert_compilable(loss_fn)
def test_adam_update(): for lib, call in helpers.calls: if call is helpers.mx_graph_call: # mxnet symbolic does not support ivy gradient functions continue ws = Container({'w': ivy_grad.variable(ivy_gen.array([3.], f=lib))}) dcdws = Container({'w': ivy_gen.array([6.], f=lib)}) mw = dcdws vw = dcdws.map(lambda x, _: x**2) w_new = ivy_gen.array(ivy_grad.adam_update(ws, dcdws, 0.1, mw, vw, lib.array(1), f=lib)[0]['w'], f=lib) assert np.allclose(ivy_gen.to_numpy(w_new), np.array([2.96837726]))
def test_container_to_random(): for lib, call in helpers.calls: dict_in = {'a': ivy.array([1.], f=lib), 'b': {'c': ivy.array([2.], f=lib), 'd': ivy.array([3.], f=lib)}} container = Container(dict_in) random_container = container.to_random(lib) for (key, value), orig_value in zip(random_container.to_iterator(), [ivy.array([2], f=lib), ivy.array([3], f=lib), ivy.array([4], f=lib)]): assert call(ivy.shape, value, f=lib) == call(ivy.shape, orig_value, f=lib)
def test_container_map(): for lib, call in helpers.calls: dict_in = {'a': ivy.array([1], f=lib), 'b': {'c': ivy.array([2], f=lib), 'd': ivy.array([3], f=lib)}} container = Container(dict_in) container_iterator = container.map(lambda x, _: x + 1).to_iterator() for (key, value), expected_value in zip(container_iterator, [ivy.array([2], f=lib), ivy.array([3], f=lib), ivy.array([4], f=lib)]): assert call(lambda x: x, value) == call(lambda x: x, expected_value)
def execute_with_gradients(func, xs): with _tf.GradientTape() as tape: func_ret = func(xs) if isinstance(func_ret, tuple): y = func_ret[0] rest = func_ret[1:] else: y = func_ret rest = tuple() grads = Container(tape.gradient(y, xs)) return (y, grads, *rest)
def test_container_to_iterator(): for lib, call in helpers.calls: if call is helpers.mx_graph_call: fn = func else: fn = lambda x: x dict_in = {'a': ivy.array([1], f=lib), 'b': {'c': ivy.array([2], f=lib), 'd': ivy.array([3], f=lib)}} container = Container(dict_in) container_iterator = container.to_iterator() for (key, value), expected_value in zip(container_iterator, [ivy.array([1], f=lib), ivy.array([2], f=lib), ivy.array([3], f=lib)]): assert fn(value) == fn(expected_value)
def test_container_at_key_chain(dev_str, call): dict_in = { 'a': ivy.array([1]), 'b': { 'c': ivy.array([2]), 'd': ivy.array([3]) } } container = Container(dict_in) sub_container = container.at_key_chain('b') assert (sub_container['c'] == ivy.array([2]))[0] sub_container = container.at_key_chain('b/c') assert (sub_container == ivy.array([2]))[0]
def execute_with_gradients(func, xs): xs = xs.to_dict() func_ret = func(xs) if isinstance(func_ret, tuple): y = func_ret[0] rest = func_ret[1:] grad_fn = lambda x_in: func(x_in)[0] else: y = func_ret rest = tuple() grad_fn = func grads = Container(_jax.grad(grad_fn)(xs)) return (y, grads, *rest)
def test_container_at_key_chain(): for lib, call in helpers.calls: if call is helpers.mx_graph_call: fn = func else: fn = lambda x: x dict_in = {'a': ivy.array([1], f=lib), 'b': {'c': ivy.array([2], f=lib), 'd': ivy.array([3], f=lib)}} container = Container(dict_in) sub_container = container.at_key_chain('b') assert (fn(sub_container['c']) == fn(ivy.array([2], f=lib)))[0] sub_container = container.at_key_chain('b/c') assert (fn(sub_container) == fn(ivy.array([2], f=lib)))[0]
def __call__(self, *args, v=None, **kwargs): """ the forward pass of the layer, treating layer instance as callable function. """ if v is not None: v_orig = self.v self.v = Container(v) res = self._forward(*args, **kwargs) self.v = v_orig return res if hasattr(self.__call__, 'wrapped'): return self.__call__(*args, **kwargs) return self._forward(*args, **kwargs)
def test_container_with_entries_as_lists(): for lib, call in helpers.calls: if call in [helpers.tf_graph_call, helpers.mx_graph_call]: # to_list() requires eager execution continue dict_in = {'a': ivy.array([1], f=lib), 'b': {'c': ivy.array([2.], f=lib), 'd': 'some string'}} container = Container(dict_in) container_w_list_entries = container.with_entries_as_lists(lib) for (key, value), expected_value in zip(container_w_list_entries.to_iterator(), [[1], [2.], 'some string']): assert value == expected_value
def test_container_dtype(): for lib, call in helpers.calls: if call is helpers.mx_graph_call: # MXNet symbolic does not support dtype continue dict_in = {'a': ivy.array([1], f=lib), 'b': {'c': ivy.array([2.], f=lib), 'd': ivy.array([3], f=lib)}} container = Container(dict_in) dtype_container = container.dtype() for (key, value), expected_value in zip(dtype_container.to_iterator(), [ivy.array([1], f=lib).dtype, ivy.array([2.], f=lib).dtype, ivy.array([3], f=lib).dtype]): assert value == expected_value
def test_container_to_disk_shuffle_and_from_disk(dev_str, call): if call in [helpers.tf_graph_call]: # container disk saving requires eager execution pytest.skip() save_filepath = 'container_on_disk.hdf5' dict_in = { 'a': ivy.array([1, 2, 3]), 'b': { 'c': ivy.array([1, 2, 3]), 'd': ivy.array([1, 2, 3]) } } container = Container(dict_in) # saving container.to_disk(save_filepath, max_batch_size=3) assert os.path.exists(save_filepath) # shuffling Container.shuffle_h5_file(save_filepath) # loading container_shuffled = Container.from_disk(save_filepath, slice(3)) # testing data = np.array([1, 2, 3]) random.seed(0) random.shuffle(data) assert (ivy.to_numpy(container_shuffled['a']) == data).all() assert (ivy.to_numpy(container_shuffled.a) == data).all() assert (ivy.to_numpy(container_shuffled['b']['c']) == data).all() assert (ivy.to_numpy(container_shuffled.b.c) == data).all() assert (ivy.to_numpy(container_shuffled['b']['d']) == data).all() assert (ivy.to_numpy(container_shuffled.b.d) == data).all() os.remove(save_filepath)
def test_container_to_random(dev_str, call): dict_in = { 'a': ivy.array([1.]), 'b': { 'c': ivy.array([2.]), 'd': ivy.array([3.]) } } container = Container(dict_in) random_container = container.to_random() for (key, value), orig_value in zip( random_container.to_iterator(), [ivy.array([2]), ivy.array([3]), ivy.array([4])]): assert call(ivy.shape, value) == call(ivy.shape, orig_value)
def test_container_to_and_from_disk(): for lib, call in helpers.calls: if call in [helpers.tf_graph_call, helpers.mx_graph_call]: # container disk saving requires eager execution continue save_filepath = 'container_on_disk.hdf5' dict_in_1 = { 'a': ivy.array([np.float32(1.)], f=lib), 'b': { 'c': ivy.array([np.float32(2.)], f=lib), 'd': ivy.array([np.float32(3.)], f=lib) } } container1 = Container(dict_in_1) dict_in_2 = { 'a': ivy.array([np.float32(1.), np.float32(1.)], f=lib), 'b': { 'c': ivy.array([np.float32(2.), np.float32(2.)], f=lib), 'd': ivy.array([np.float32(3.), np.float32(3.)], f=lib) } } container2 = Container(dict_in_2) # saving container1.to_disk(save_filepath, max_batch_size=2) assert os.path.exists(save_filepath) # loading loaded_container = Container.from_disk(save_filepath, lib, slice(1)) assert np.array_equal(loaded_container.a, container1.a) assert np.array_equal(loaded_container.b.c, container1.b.c) assert np.array_equal(loaded_container.b.d, container1.b.d) # appending container1.to_disk(save_filepath, max_batch_size=2, starting_index=1) assert os.path.exists(save_filepath) # loading after append loaded_container = Container.from_disk(save_filepath, lib) assert np.array_equal(loaded_container.a, container2.a) assert np.array_equal(loaded_container.b.c, container2.b.c) assert np.array_equal(loaded_container.b.d, container2.b.d) # load slice loaded_sliced_container = Container.from_disk(save_filepath, lib, slice(1, 2)) assert np.array_equal(loaded_sliced_container.a, container1.a) assert np.array_equal(loaded_sliced_container.b.c, container1.b.c) assert np.array_equal(loaded_sliced_container.b.d, container1.b.d) # file size file_size, batch_size = Container.h5_file_size(save_filepath) assert file_size == 6 * np.dtype(np.float32).itemsize assert batch_size == 2 os.remove(save_filepath)
def test_container_from_dict(): for lib, call in helpers.calls: if call is helpers.mx_graph_call: fn = func else: fn = lambda x: x dict_in = {'a': ivy.array([1], f=lib), 'b': {'c': ivy.array([2], f=lib), 'd': ivy.array([3], f=lib)}} container = Container(dict_in) assert fn(container['a']) == fn(ivy.array([1], f=lib)) assert fn(container.a) == fn(ivy.array([1], f=lib)) assert fn(container['b']['c']) == fn(ivy.array([2], f=lib)) assert fn(container.b.c) == fn(ivy.array([2], f=lib)) assert fn(container['b']['d']) == fn(ivy.array([3], f=lib)) assert fn(container.b.d) == fn(ivy.array([3], f=lib))