Esempio n. 1
0
 def __init__(self, config=None, blocking=True):
     if not config:
         config = Config()
         config.update(dummyconfig_dict)
     super(DummyDevice, self).__init__(device="cpu",
                                       config=config,
                                       blocking=blocking)
Esempio n. 2
0
def test_config1_basic():
    config = Config()
    config.update(config1_dict)
    desc = LayerNetworkDescription.from_config(config)
    assert_is_instance(desc.hidden_info, list)
    assert_equal(len(desc.hidden_info), len(config1_dict["hidden_size"]))
    assert_equal(desc.num_inputs, config1_dict["num_inputs"])
Esempio n. 3
0
def test_Updater_add_check_numerics_ops():
    class _Layer(DummyLayer):
        def _get_loss_value(self):
            return tf_compat.v1.log(self.x)

    from returnn.tf.network import TFNetwork, ExternData
    from returnn.config import Config

    with make_scope() as session:
        config = Config()
        config.set("debug_add_check_numerics_ops", True)
        network = TFNetwork(extern_data=ExternData(), train_flag=True)
        network.add_layer(name="output", layer_class=_Layer, initial_value=1.0)
        network.initialize_params(session=session)

        updater = Updater(config=config, network=network)
        updater.set_learning_rate(1.0, session=session)
        updater.set_trainable_vars(network.get_trainable_params())
        updater.init_optimizer_vars(session=session)
        # Should succeed.
        session.run(updater.get_optim_op())
        # One gradient descent step from ln(x), x = 1.0: gradient is 1.0 / x, thus x - 1.0 = 0.0.
        assert_almost_equal(
            session.run(network.get_default_output_layer().output.placeholder),
            0.0)

        try:
            # Now, should fail.
            session.run(updater.get_optim_op())
        except tf.errors.InvalidArgumentError as exc:
            print("Expected exception: %r" % exc)
        else:
            assert False, "should have raised an exception"
Esempio n. 4
0
 def test_stftConfig_multi_res_02():
     with make_scope() as session:
         layer_name = "stft_layer"
         fft_sizes = [400, 200, 800]
         frame_sizes = [400, 200, 800]
         frame_shift = 160
         window = "hanning"
         test_input = np.random.normal(0, 0.6, (1, 3200, 2))
         num_outputs = int(
             np.sum([(int(fft_size / 2) + 1) * test_input.shape[2]
                     for fft_size in fft_sizes]))
         config = Config()
         config.update({
             "num_outputs": num_outputs,
             "num_inputs": test_input.shape[2],
             "network": {
                 layer_name: {
                     "class": "multichannel_multiresolution_stft_layer",
                     "frame_shift": frame_shift,
                     "frame_sizes": frame_sizes,
                     "window": window,
                     "fft_sizes": fft_sizes,
                     "use_rfft": True,
                     "nr_of_channels": 2,
                     "is_output_layer": True,
                     "from": "data:data"
                 }
             }
         })
         network = TFNetwork(config=config, train_flag=True)
         network.construct_from_dict(config.typed_value("network"))
         layer = network.layers[layer_name]
         test_output = session.run(
             layer.output.placeholder,
             {network.get_extern_data('data').placeholder: test_input})
         assert test_output.shape[2] == num_outputs
         comparison_frame = 6
         ref00 = _get_ref_output_single_res(test_input, fft_sizes[0],
                                            frame_sizes[0], frame_shift,
                                            window, comparison_frame, 0)
         ref01 = _get_ref_output_single_res(test_input, fft_sizes[0],
                                            frame_sizes[0], frame_shift,
                                            window, comparison_frame, 1)
         ref10 = _get_ref_output_single_res(test_input, fft_sizes[1],
                                            frame_sizes[1], frame_shift,
                                            window, comparison_frame, 0)
         ref11 = _get_ref_output_single_res(test_input, fft_sizes[1],
                                            frame_sizes[1], frame_shift,
                                            window, comparison_frame, 1)
         ref20 = _get_ref_output_single_res(test_input, fft_sizes[2],
                                            frame_sizes[2], frame_shift,
                                            window, comparison_frame, 0)
         ref21 = _get_ref_output_single_res(test_input, fft_sizes[2],
                                            frame_sizes[2], frame_shift,
                                            window, comparison_frame, 1)
         ref = np.concatenate([ref00, ref01, ref10, ref11, ref20, ref21],
                              axis=0)
         resultDiff = np.abs(test_output[0, comparison_frame, :] - ref)
         assert np.mean(resultDiff) < 0.06
         assert np.max(resultDiff) < 1
Esempio n. 5
0
def test_NetworkDescription_to_json_config1():
    config = Config()
    config.update(config1_dict)
    desc = LayerNetworkDescription.from_config(config)
    desc_json_content = desc.to_json_content()
    pprint(desc_json_content)
    assert_in("hidden_0", desc_json_content)
    assert_equal(desc_json_content["hidden_0"]["class"], "forward")
    assert_in("hidden_1", desc_json_content)
    assert_in("output", desc_json_content)
    orig_network = LayerNetwork.from_description(desc)
    assert_in("hidden_0", orig_network.hidden)
    assert_in("hidden_1", orig_network.hidden)
    assert_equal(len(orig_network.hidden), 2)
    assert_is_instance(orig_network.hidden["hidden_0"], ForwardLayer)
    assert_equal(orig_network.hidden["hidden_0"].layer_class, "hidden")
    orig_json_content = orig_network.to_json_content()
    pprint(orig_json_content)
    assert_in("hidden_0", orig_json_content)
    assert_equal(orig_json_content["hidden_0"]["class"], "hidden")
    assert_in("hidden_1", orig_json_content)
    assert_in("output", orig_json_content)
    new_network = LayerNetwork.from_json(
        desc_json_content, config1_dict["num_inputs"],
        {"classes": (config1_dict["num_outputs"], 1)})
    new_json_content = new_network.to_json_content()
    if orig_json_content != new_json_content:
        print(dict_diff_str(orig_json_content, new_json_content))
        assert_equal(orig_json_content, new_network.to_json_content())
Esempio n. 6
0
def test_melFilterbankLayer():
    with make_scope() as session:
        n_in, n_out = 257, 3
        layer_name = "mel_filterbank_layer"
        config = Config()
        config.update({
            "num_outputs": n_out,
            "num_inputs": n_in,
            "network": {
                layer_name: {
                    "class": "mel_filterbank",
                    "fft_size": 512,
                    "nr_of_filters": n_out,
                    "n_out": n_out,
                    "is_output_layer": True,
                    "from": "data:data"
                }
            }
        })
        network = TFNetwork(config=config, train_flag=True)
        network.construct_from_dict(config.typed_value("network"))
        layer = network.layers[layer_name]
        test_out = session.run(layer.output.placeholder,
                               feed_dict={
                                   network.get_extern_data('data').placeholder:
                                   np.ones((1, 1, 257))
                               })
        assert np.sum(test_out - np.asarray(
            [28.27923584, 53.10634232, 99.71585846], dtype=np.float32)) < 1e-5
Esempio n. 7
0
def test_enc_dec1_init():
    config = Config()
    config.load_file(StringIO(config_enc_dec1_json))

    network_json = LayerNetwork.json_from_config(config)
    assert_true(network_json)
    network = LayerNetwork.from_json_and_config(network_json, config)
    assert_true(network)
Esempio n. 8
0
def create_first_epoch(config_filename):
    config = Config()
    config.load_file(config_filename)
    engine = Engine([])
    engine.init_train_from_config(config=config, train_data=None)
    engine.epoch = 1
    engine.save_model(engine.get_epoch_model_filename(), epoch=engine.epoch)
    Engine._epoch_model = None
def is_crnn_config(filename):
    if filename.endswith(".gz"):
        return False
    try:
        config = Config()
        config.load_file(filename)
        return True
    except Exception:
        pass
    return False
Esempio n. 10
0
def cleanup_tmp_models(config_filename):
    assert os.path.exists(config_filename)
    from returnn.config import Config
    config = Config()
    config.load_file(config_filename)
    model_filename = config.value('model', '')
    assert model_filename
    # Remove existing models
    assert model_filename.startswith("/tmp/")
    for f in glob(model_filename + ".*"):
        os.remove(f)
Esempio n. 11
0
def test_num_inputs_outputs_old():
    n_in = 5
    n_out = 10
    config = Config()
    config.update({"num_inputs": n_in, "num_outputs": n_out})
    num_inputs, num_outputs = LayerNetworkDescription.num_inputs_outputs_from_config(
        config)
    assert_equal(num_inputs, n_in)
    assert_is_instance(num_outputs, dict)
    assert_equal(len(num_outputs), 1)
    assert_in("classes", num_outputs)
    assert_equal(num_outputs["classes"], (n_out, 1))
Esempio n. 12
0
def test_config1():
    config = Config()
    config.update(config1_dict)
    pretrain = pretrain_from_config(config)
    assert_equal(pretrain.get_train_num_epochs(), 2)
    net1_json = pretrain.get_network_json_for_epoch(1)
    net2_json = pretrain.get_network_json_for_epoch(2)
    net3_json = pretrain.get_network_json_for_epoch(3)
    assert_in("hidden_0", net1_json)
    assert_not_in("hidden_1", net1_json)
    assert_in("hidden_0", net2_json)
    assert_in("hidden_1", net2_json)
    assert_equal(net2_json, net3_json)
Esempio n. 13
0
def test_init_error_muliple_out():
    config = Config()
    config.update({
        "learning_rate_control": "newbob",
        "learning_rate_control_error_measure": "dev_score"
    })
    lrc = load_learning_rate_control_from_config(config)
    assert isinstance(lrc, NewbobRelative)
    lrc.get_learning_rate_for_epoch(1)
    lrc.set_epoch_error(
        1, {"train_score": {
            'cost:output': 1.95,
            "cost:out2": 2.95
        }})
    lrc.set_epoch_error(
        1, {
            "dev_score": {
                'cost:output': 1.99,
                "cost:out2": 2.99
            },
            "dev_error": {
                'error:output': 0.6,
                "error:out2": 0.7
            }
        })
    error = lrc.get_epoch_error_dict(1)
    assert "train_score_output" in error
    assert "train_score_out2" in error
    assert "dev_score_output" in error
    assert "dev_score_out2" in error
    assert "dev_error_output" in error
    assert "dev_error_out2" in error
    assert_equal(lrc.get_error_key(1), "dev_score_output")
    lrc.get_learning_rate_for_epoch(2)
    lrc.set_epoch_error(
        2, {"train_score": {
            'cost:output': 1.8,
            "cost:out2": 2.8
        }})
    lrc.set_epoch_error(
        2, {
            "dev_score": {
                'cost:output': 1.9,
                "cost:out2": 2.9
            },
            "dev_error": {
                'error:output': 0.5,
                "error:out2": 0.6
            }
        })
    lrc.get_learning_rate_for_epoch(3)
Esempio n. 14
0
def test_config1_to_json_network_copy():
    config = Config()
    config.update(config1_dict)
    orig_network = LayerNetwork.from_config_topology(config)
    orig_json_content = orig_network.to_json_content()
    pprint(orig_json_content)
    new_network = LayerNetwork.from_json(orig_json_content, orig_network.n_in,
                                         orig_network.n_out)
    assert_equal(orig_network.n_in, new_network.n_in)
    assert_equal(orig_network.n_out, new_network.n_out)
    new_json_content = new_network.to_json_content()
    if orig_json_content != new_json_content:
        print(dict_diff_str(orig_json_content, new_json_content))
        assert_equal(orig_json_content, new_network.to_json_content())
Esempio n. 15
0
def is_returnn_config(filename):
  """
  :param str filename:
  :rtype: bool
  """
  if filename.endswith(".gz"):
    return False
  # noinspection PyBroadException
  try:
    config = Config()
    config.load_file(filename)
    return True
  except Exception:
    pass
  return False
Esempio n. 16
0
    def _run_returnn_standalone_net_dict(self):
        print(">>> Constructing RETURNN model, load TF checkpoint, run...")
        with tf.compat.v1.Session() as session:
            from returnn.config import Config
            from returnn.tf.network import TFNetwork
            config = Config({
                "extern_data": {
                    "data": self._returnn_in_data_dict
                },
                "debug_print_layer_output_template": True,
            })
            network = TFNetwork(config=config, name="root")
            network.construct_from_dict(self._returnn_net_dict)
            network.load_params_from_file(
                filename=self._tf_checkpoint_save_path, session=session)

            x = network.extern_data.get_default_input_data()
            y = network.get_default_output_layer().output
            feed_dict = self._make_tf_feed_dict(x)
            y_, y_size = session.run((y.placeholder, y.size_placeholder),
                                     feed_dict=feed_dict)
            assert isinstance(y_, numpy.ndarray)
            print("Output shape:", y_.shape)
            numpy.testing.assert_allclose(self._out_returnn_np, y_)
            print(">>>> Looks good!")
            print()
Esempio n. 17
0
def test_Updater_CustomUpdate():
    with make_scope() as session:
        from returnn.tf.network import TFNetwork, ExternData
        from returnn.config import Config
        from returnn.tf.util.basic import CustomUpdate

        config = Config()
        network = TFNetwork(extern_data=ExternData(), train_flag=True)
        layer = network.add_layer(name="output",
                                  layer_class=DummyLayer,
                                  initial_value=4.0)
        assert isinstance(layer, DummyLayer)
        network.initialize_params(session=session)

        class CustomUpdateAdd13(CustomUpdate):
            def update_var(self, var):
                return tf_compat.v1.assign_add(var, 13.0)

        CustomUpdateAdd13().set_on_var(layer.x)

        updater = Updater(config=config, network=network)
        updater.set_learning_rate(1000.0, session=session)  # should be ignored
        updater.set_trainable_vars(network.get_trainable_params())
        updater.init_optimizer_vars(session=session)
        session.run(updater.get_optim_op())
        # Should have applied CustomUpdateAdd13.
        assert_almost_equal(
            session.run(network.get_default_output_layer().output.placeholder),
            17.0)
Esempio n. 18
0
def test_Device_blocking_init():
    config = Config()
    config.update({
        "multiprocessing": False,
        "blocking": True,
        "device": "cpu",
        "num_epochs": 1,
        "num_inputs": 3,
        "num_outputs": 2,
    })
    config.network_topology_json = """
  {
  "output": {"class": "softmax", "loss": "ce"}
  }
  """

    Device("cpu", config=config, blocking=True)
Esempio n. 19
0
def test_rnn_getCacheByteSizes_zero():
    from returnn.config import Config
    config = Config({"cache_size": "0"})
    import returnn.__main__ as rnn
    rnn.config = config
    sizes = rnn.get_cache_byte_sizes()
    assert len(sizes) == 3
    assert all([s == 0 for s in sizes])
Esempio n. 20
0
def test_config2_bidirect_lstm():
    config = Config()
    config.update(config2_dict)
    desc = LayerNetworkDescription.from_config(config)
    assert_true(desc.bidirectional)
    network = LayerNetwork.from_config_topology(config)
    net_json = network.to_json_content()
    pprint(net_json)
    assert_in("output", net_json)
    assert_in("hidden_0_fw", net_json)
    assert_in("hidden_0_bw", net_json)
    assert_in("hidden_1_fw", net_json)
    assert_in("hidden_1_bw", net_json)
    assert_in("hidden_2_fw", net_json)
    assert_in("hidden_2_bw", net_json)
    assert_equal(net_json["output"]["from"], ["hidden_2_fw", "hidden_2_bw"])
    assert_equal(len(net_json), 7)
Esempio n. 21
0
def test_network_config1_init():
    config = Config()
    config.update(config1_dict)
    network = LayerNetwork.from_config_topology(config)
    assert_in("hidden_0", network.hidden)
    assert_in("hidden_1", network.hidden)
    assert_equal(len(network.hidden), 2)
    assert_is_instance(network.hidden["hidden_0"], ForwardLayer)
    assert_equal(network.hidden["hidden_0"].layer_class, "hidden")
    assert_false(network.recurrent)

    json_content = network.to_json_content()
    pprint(json_content)
    assert_in("hidden_0", json_content)
    assert_equal(json_content["hidden_0"]["class"], "hidden")
    assert_in("hidden_1", json_content)
    assert_in("output", json_content)
Esempio n. 22
0
 def test_rfftStftConfig_01():
     with make_scope() as session:
         layer_name = "stft_layer"
         fft_size = 400
         frame_size = 400
         frame_shift = 160
         window = "hanning"
         test_input = np.ones((1, 32000, 2), dtype=np.float32)
         config = Config()
         config.update({
             "num_outputs":
             int(fft_size / 2) + 1 * test_input.shape[2],
             "num_inputs":
             test_input.shape[2],
             "network": {
                 layer_name: {
                     "class": "multichannel_stft_layer",
                     "frame_shift": frame_shift,
                     "frame_size": frame_size,
                     "window": window,
                     "fft_size": fft_size,
                     "use_rfft": True,
                     "nr_of_channels": 2,
                     "is_output_layer": True,
                     "from": "data:data"
                 }
             }
         })
         network = TFNetwork(config=config, train_flag=True)
         network.construct_from_dict(config.typed_value("network"))
         layer = network.layers[layer_name]
         test_output = session.run(
             layer.output.placeholder,
             {network.get_extern_data('data').placeholder: test_input})
         ref0 = _get_ref_output(test_input, fft_size, frame_size,
                                frame_shift, window, 0, 0)
         # np.fft.rfft and tensorflow.python.ops.rfft differ a little bit in their
         # results, thus an error margin is allowed in the result
         resultDiff = np.abs(test_output[0, 0, 0:(int(fft_size / 2) + 1)] -
                             ref0)
         assert np.mean(resultDiff) < 0.02
         assert np.max(resultDiff) < 1
         pass
Esempio n. 23
0
def test_single_default_target_init():
    config_single_default = Config()
    config_single_default.update({
        "multiprocessing": False,
        "blocking": True,
        "device": "cpu",
        "num_epochs": 1,
        "num_inputs": 3,
        "num_outputs": 2,
    })
    config_single_default.network_topology_json = """
  {
  "output": {"class": "softmax", "loss": "ce"}
  }
  """

    dev = Device("cpu", config=config_single_default, blocking=True)
    num_params = get_num_params(dev.trainnet.get_all_params_vars())
    assert_equal(num_params, 3 * 2 + 2, "W, b")
Esempio n. 24
0
def test_init_error_old():
    config = Config()
    config.update({
        "learning_rate_control": "newbob",
        "learning_rate_control_error_measure": "dev_score"
    })
    lrc = load_learning_rate_control_from_config(config)
    assert isinstance(lrc, NewbobRelative)
    lrc.get_learning_rate_for_epoch(1)
    lrc.set_epoch_error(1, {"train_score": 1.9344199658230012})
    lrc.set_epoch_error(1, {"dev_score": 1.99, "dev_error": 0.6})
    error = lrc.get_epoch_error_dict(1)
    assert "train_score" in error
    assert "dev_score" in error
    assert "dev_error" in error
    assert_equal(lrc.get_error_key(1), "dev_score")
    lrc.get_learning_rate_for_epoch(2)
    lrc.set_epoch_error(2, {"train_score": 1.8})
    lrc.set_epoch_error(2, {"dev_score": 1.9, "dev_error": 0.5})
    lrc.get_learning_rate_for_epoch(3)
Esempio n. 25
0
 def test_stftConfig_single_res_01():
     with make_scope() as session:
         layer_name = "stft_layer"
         fft_sizes = [400]
         frame_sizes = [400]
         frame_shift = 160
         window = "hanning"
         test_input = np.ones((1, 32000, 2), dtype=np.float32)
         num_outputs = (int(fft_sizes[0] / 2) + 1) * test_input.shape[2]
         config = Config()
         config.update({
             "num_outputs": num_outputs,
             "num_inputs": test_input.shape[2],
             "network": {
                 layer_name: {
                     "class": "multichannel_multiresolution_stft_layer",
                     "frame_shift": frame_shift,
                     "frame_sizes": frame_sizes,
                     "window": window,
                     "fft_sizes": fft_sizes,
                     "use_rfft": True,
                     "nr_of_channels": 2,
                     "is_output_layer": True,
                     "from": "data:data"
                 }
             }
         })
         network = TFNetwork(config=config, train_flag=True)
         network.construct_from_dict(config.typed_value("network"))
         layer = network.layers[layer_name]
         test_output = session.run(
             layer.output.placeholder,
             {network.get_extern_data('data').placeholder: test_input})
         ref0 = _get_ref_output_single_res(test_input, fft_sizes[0],
                                           frame_sizes[0], frame_shift,
                                           window, 0, 0)
         resultDiff = np.abs(test_output[0, 0, 0:(int(fft_sizes[0] / 2) +
                                                  1)] - ref0)
         assert test_output.shape[2] == num_outputs
         assert np.mean(resultDiff) < 0.02
         assert np.max(resultDiff) < 1
Esempio n. 26
0
def test_newbob_multi_epoch():
    lr = 0.0005
    config = Config()
    config.update({
        "learning_rate_control": "newbob_multi_epoch",
        "learning_rate_control_relative_error_relative_lr": True,
        "newbob_multi_num_epochs": 6,
        "newbob_multi_update_interval": 1,
        "learning_rate": lr
    })
    lrc = load_learning_rate_control_from_config(config)
    assert isinstance(lrc, NewbobMultiEpoch)
    assert_equal(lrc.get_learning_rate_for_epoch(1), lr)
    lrc.set_epoch_error(
        1, {
            'dev_error': 0.50283176046904721,
            'dev_score': 2.3209858321263455,
            'train_score': 3.095824052426714,
        })
    assert_equal(lrc.get_learning_rate_for_epoch(2),
                 lr)  # epoch 2 cannot be a different lr yet
Esempio n. 27
0
def test_Updater_simple_batch():
    with make_scope() as session:
        from returnn.tf.network import TFNetwork, ExternData
        from returnn.config import Config
        from returnn.datasets.generating import Task12AXDataset
        dataset = Task12AXDataset()
        dataset.init_seq_order(epoch=1)
        extern_data = ExternData()
        extern_data.init_from_dataset(dataset)

        config = Config()
        network = TFNetwork(extern_data=extern_data, train_flag=True)
        network.construct_from_dict({
            "layer1": {
                "class": "linear",
                "activation": "tanh",
                "n_out": 13,
                "from": "data:data"
            },
            "layer2": {
                "class": "linear",
                "activation": "tanh",
                "n_out": 13,
                "from": ["layer1"]
            },
            "output": {
                "class": "softmax",
                "loss": "ce",
                "target": "classes",
                "from": ["layer2"]
            }
        })
        network.initialize_params(session=session)

        updater = Updater(config=config, network=network)
        updater.set_learning_rate(1.0, session=session)
        updater.set_trainable_vars(network.get_trainable_params())
        updater.init_optimizer_vars(session=session)

        from returnn.tf.data_pipeline import FeedDictDataProvider
        batches = dataset.generate_batches(
            recurrent_net=network.recurrent,
            batch_size=100,
            max_seqs=10,
            max_seq_length=sys.maxsize,
            used_data_keys=network.used_data_keys)
        data_provider = FeedDictDataProvider(tf_session=session,
                                             extern_data=extern_data,
                                             data_keys=network.used_data_keys,
                                             dataset=dataset,
                                             batches=batches)
        feed_dict, _ = data_provider.get_feed_dict(single_threaded=True)
        session.run(updater.get_optim_op(), feed_dict=feed_dict)
Esempio n. 28
0
def test_read_all():
    config = Config()
    config.update(dummyconfig_dict)
    print("Create ExternSprintDataset")
    python2_exec = util.which("python2")
    if python2_exec is None:
        raise unittest.SkipTest("python2 not found")
    num_seqs = 4
    dataset = ExternSprintDataset(
        [python2_exec, sprintExecPath],
        "--*.feature-dimension=2 --*.trainer-output-dimension=3 "
        "--*.crnn-dataset=DummyDataset(2,3,num_seqs=%i,seq_len=10)" % num_seqs)
    dataset.init_seq_order(epoch=1)
    seq_idx = 0
    while dataset.is_less_than_num_seqs(seq_idx):
        dataset.load_seqs(seq_idx, seq_idx + 1)
        for key in dataset.get_data_keys():
            value = dataset.get_data(seq_idx, key)
            print("seq idx %i, data %r: %r" % (seq_idx, key, value))
        seq_idx += 1
    assert seq_idx == num_seqs
Esempio n. 29
0
def test_assign_dev_data():
    config = Config()
    config.update(dummyconfig_dict)
    print("Create ExternSprintDataset")
    dataset = ExternSprintDataset([
        sys.executable, sprintExecPath
    ], "--*.feature-dimension=2 --*.trainer-output-dimension=3 --*.crnn-dataset=DummyDataset(2,3,num_seqs=4,seq_len=10)"
                                  )
    dataset.init_seq_order(epoch=1)
    assert_true(dataset.is_less_than_num_seqs(0))
    recurrent = False
    batch_generator = dataset.generate_batches(recurrent_net=recurrent,
                                               batch_size=5)
    batches = batch_generator.peek_next_n(2)
    assert_equal(len(batches), 2)
    if theano:
        print("Create Device")
        device = DummyDevice(config=config)
        success, num_batches = assign_dev_data(device, dataset, batches)
        assert_true(success)
        assert_equal(num_batches, len(batches))
Esempio n. 30
0
def test_rnn_initData():
    hdf_fn = generate_hdf_from_dummy()
    from returnn.config import Config
    config = Config({"cache_size": "0", "train": hdf_fn, "dev": hdf_fn})
    import returnn.__main__ as rnn
    rnn.config = config
    rnn.init_data()
    train, dev = rnn.train_data, rnn.dev_data
    assert train and dev
    assert isinstance(train, HDFDataset)
    assert isinstance(dev, HDFDataset)
    assert train.cache_byte_size_total_limit == dev.cache_byte_size_total_limit == 0
    assert train.cache_byte_size_limit_at_start == dev.cache_byte_size_limit_at_start == 0