Beispiel #1
0
  def init_network_from_config(self, config):
    """
    :param Config.Config config:
    """
    self.model_filename = config.value('model', None)
    self.pretrain = pretrainFromConfig(config)
    self.max_seqs = config.int('max_seqs', -1)

    epoch, model_epoch_filename = self.get_epoch_model(config)
    assert model_epoch_filename or self.start_epoch
    self.epoch = epoch or self.start_epoch

    if self.pretrain:
      # This would be obsolete if we don't want to load an existing model.
      # In self.init_train_epoch(), we initialize a new model.
      net_dict = self.pretrain.get_network_json_for_epoch(self.epoch)
    else:
      net_dict = LayerNetwork.json_from_config(config)

    self._init_network(net_desc=net_dict, epoch=self.epoch)

    if model_epoch_filename:
      print("loading weights from", model_epoch_filename, file=log.v2)
      try:
        self.network.load_params_from_file(model_epoch_filename, session=self.tf_session)
      except tf.errors.NotFoundError:
        print("Exiting now because model cannot be loaded.", file=log.v1)
        sys.exit(1)
Beispiel #2
0
  def init_network_from_config(self, config):
    self.pretrain = pretrainFromConfig(config)
    self.max_seqs = config.int('max_seqs', -1)
    self.compression = config.bool('compression', False)

    epoch, model_epoch_filename = self.get_epoch_model(config)
    assert model_epoch_filename or self.start_epoch

    if model_epoch_filename:
      print("loading weights from", model_epoch_filename, file=log.v2)
      last_model_hdf = h5py.File(model_epoch_filename, "r")
    else:
      last_model_hdf = None

    if config.bool('initialize_from_model', False):
      # That's only about the topology, not the params.
      print("initializing network topology from model", file=log.v5)
      assert last_model_hdf, "last model not specified. use 'load' in config. or don't use 'initialize_from_model'"
      network = LayerNetwork.from_hdf_model_topology(last_model_hdf)
    else:
      if self.pretrain:
        # This would be obsolete if we don't want to load an existing model.
        # In self.init_train_epoch(), we initialize a new model.
        network = self.pretrain.get_network_for_epoch(epoch or self.start_epoch)
      else:
        network = LayerNetwork.from_config_topology(config)

    # We have the parameters randomly initialized at this point.
    # In training, as an initialization, we can copy over the params of an imported model,
    # where our topology might slightly differ from the imported model.
    if config.value('import_model_train_epoch1', '') and self.start_epoch == 1:
      assert last_model_hdf
      old_network = LayerNetwork.from_hdf_model_topology(last_model_hdf)
      old_network.load_hdf(last_model_hdf)
      last_model_hdf.close()
      # Copy params to new network.
      from NetworkCopyUtils import intelli_copy_layer
      # network.hidden are the input + all hidden layers.
      for layer_name, layer in sorted(old_network.hidden.items()):
        print("Copy hidden layer %s" % layer_name, file=log.v3)
        intelli_copy_layer(layer, network.hidden[layer_name])
      for layer_name, layer in sorted(old_network.output.items()):
        print("Copy output layer %s" % layer_name, file=log.v3)
        intelli_copy_layer(layer, network.output[layer_name])
      print("Not copied hidden: %s" % sorted(set(network.hidden.keys()).difference(old_network.hidden.keys())), file=log.v3)
      print("Not copied output: %s" % sorted(set(network.output.keys()).difference(old_network.output.keys())), file=log.v3)

    # Maybe load existing model parameters.
    elif last_model_hdf:
      network.load_hdf(last_model_hdf)
      last_model_hdf.close()
      EngineUtil.maybe_subtract_priors(network, self.train_data, config)

    self.network = network

    if config.has('dump_json'):
      self.network_dump_json(config.value('dump_json', ''))

    self.print_network_info()
Beispiel #3
0
  def init_network_from_config(self, config):
    self.pretrain = pretrainFromConfig(config)
    self.max_seqs = config.int('max_seqs', -1)

    epoch, model_epoch_filename = self.get_epoch_model(config)
    assert model_epoch_filename or self.start_epoch

    if model_epoch_filename:
      print >> log.v2, "loading weights from", model_epoch_filename
      last_model_hdf = h5py.File(model_epoch_filename, "r")
    else:
      last_model_hdf = None

    if config.bool('initialize_from_model', False):
      # That's only about the topology, not the params.
      print >> log.v5, "initializing network topology from model"
      assert last_model_hdf, "last model not specified. use 'load' in config. or don't use 'initialize_from_model'"
      network = LayerNetwork.from_hdf_model_topology(last_model_hdf)
    else:
      if self.pretrain:
        # This would be obsolete if we don't want to load an existing model.
        # In self.init_train_epoch(), we initialize a new model.
        network = self.pretrain.get_network_for_epoch(epoch or self.start_epoch)
      else:
        network = LayerNetwork.from_config_topology(config)

    # We have the parameters randomly initialized at this point.
    # In training, as an initialization, we can copy over the params of an imported model,
    # where our topology might slightly differ from the imported model.
    if config.value('import_model_train_epoch1', '') and self.start_epoch == 1:
      assert last_model_hdf
      old_network = LayerNetwork.from_hdf_model_topology(last_model_hdf)
      old_network.load_hdf(last_model_hdf)
      last_model_hdf.close()
      # Copy params to new network.
      from NetworkCopyUtils import intelli_copy_layer
      # network.hidden are the input + all hidden layers.
      for layer_name, layer in sorted(old_network.hidden.items()):
        print >> log.v3, "Copy hidden layer %s" % layer_name
        intelli_copy_layer(layer, network.hidden[layer_name])
      for layer_name, layer in sorted(old_network.output.items()):
        print >> log.v3, "Copy output layer %s" % layer_name
        intelli_copy_layer(layer, network.output[layer_name])
      print >> log.v3, "Not copied hidden: %s" % sorted(set(network.hidden.keys()).difference(old_network.hidden.keys()))
      print >> log.v3, "Not copied output: %s" % sorted(set(network.output.keys()).difference(old_network.output.keys()))

    # Maybe load existing model parameters.
    elif last_model_hdf:
      network.load_hdf(last_model_hdf)
      last_model_hdf.close()
      EngineUtil.maybe_subtract_priors(network, self.train_data, config)

    self.network = network

    if config.has('dump_json'):
      self.network_dump_json(config.value('dump_json', ''))

    self.print_network_info()
def demo():
  import better_exchook
  better_exchook.install()
  import rnn
  import sys
  if len(sys.argv) <= 1:
    print("usage: python %s [config] [other options]" % __file__)
    print("example usage: python %s ++learning_rate_control newbob ++learning_rate_file newbob.data ++learning_rate 0.001" % __file__)
  rnn.initConfig(commandLineOptions=sys.argv[1:])
  rnn.config._hack_value_reading_debug()
  from Pretrain import pretrainFromConfig
  pretrain = pretrainFromConfig(rnn.config)
  first_non_pretrain_epoch = 1
  pretrain_learning_rate = None
  if pretrain:
    first_non_pretrain_epoch = pretrain.get_train_num_epochs() + 1
  log.initialize(verbosity=[5])
  control = loadLearningRateControlFromConfig(rnn.config)
  print("LearningRateControl: %r" % control)
  if not control.epochData:
    print("No epoch data so far.")
    return
  firstEpoch = min(control.epochData.keys())
  if firstEpoch != 1:
    print("Strange, first epoch from epoch data is %i." % firstEpoch)
  print("Error key: %s from %r" % (control.getErrorKey(epoch=firstEpoch), control.epochData[firstEpoch].error))
  if pretrain:
    pretrain_learning_rate = rnn.config.float('pretrain_learning_rate', control.defaultLearningRate)
  maxEpoch = max(control.epochData.keys())
  for epoch in range(1, maxEpoch + 2):  # all epochs [1..maxEpoch+1]
    oldLearningRate = None
    if epoch in control.epochData:
      oldLearningRate = control.epochData[epoch].learningRate
    if epoch < first_non_pretrain_epoch:
      learningRate = pretrain_learning_rate
      s = "Pretrain epoch %i, fixed learning rate: %s (was: %s)" % (epoch, learningRate, oldLearningRate)
    elif first_non_pretrain_epoch > 1 and epoch == first_non_pretrain_epoch:
      learningRate = control.defaultLearningRate
      s = "First epoch after pretrain, epoch %i, fixed learning rate: %s (was %s)" % (epoch, learningRate, oldLearningRate)
    else:
      learningRate = control.calcNewLearnignRateForEpoch(epoch)
      s = "Calculated learning rate for epoch %i: %s (was: %s)" % (epoch, learningRate, oldLearningRate)
    if learningRate < control.minLearningRate:
      learningRate = control.minLearningRate
      s += ", clipped to %s" % learningRate
    s += ", previous relative error: %s" % control.calcRelativeError(epoch - 2, epoch - 1)
    if hasattr(control, "_calcRecentMeanRelativeError"):
      s += ", previous mean relative error: %s" % control._calcRecentMeanRelativeError(epoch)
    print(s)
    # Overwrite new learning rate so that the calculation for further learning rates stays consistent.
    if epoch in control.epochData:
      control.epochData[epoch].learningRate = learningRate
    else:
      control.epochData[epoch] = control.EpochData(learningRate=learningRate)
  print("Finished, last stored epoch was %i." % maxEpoch)
Beispiel #5
0
def saveCrnnNetwork(epoch, layers):
    """
  :type epoch: int
  :type layers: list[(numpy.ndarray, numpy.ndarray)]
  """
    print("Loading Crnn")

    from Network import LayerNetwork
    from NetworkHiddenLayer import ForwardLayer
    from NetworkOutputLayer import OutputLayer
    from Pretrain import pretrainFromConfig
    from Engine import Engine

    pretrain = pretrainFromConfig(config)
    is_pretrain_epoch = pretrain and epoch <= pretrain.get_train_num_epochs()
    modelFilename = config.value("model", None)
    assert modelFilename, "need 'model' in config"
    filename = Engine.epoch_model_filename(modelFilename, epoch,
                                           is_pretrain_epoch)
    assert not os.path.exists(filename), "already exists"
    if is_pretrain_epoch:
        network = pretrain.get_network_for_epoch(epoch)
    else:
        network = LayerNetwork.from_config_topology(config)
    nHiddenLayers = len(network.hidden)

    # print network topology
    print("Crnn Network layer topology:")
    print("input dim:", network.n_in)
    print("hidden layer count:", nHiddenLayers)
    print("output dim:", network.n_out["classes"])
    print("net weights #:", network.num_params())
    print("net params:", network.train_params_vars)
    print("net output:", network.output["output"])

    assert network.n_in == inputDim
    #assert network.n_out == outputDim
    assert nHiddenLayers + 1 == layerCount  # hidden + output layer
    assert len(layers) == layerCount
    for i, (layerName, hidden) in enumerate(sorted(network.hidden.items())):
        # Some checks whether this is a forward-layer.
        assert isinstance(hidden, ForwardLayer)

        saveCrnnLayer(hidden, *layers[i])

    assert isinstance(network.output["output"], OutputLayer)
    saveCrnnLayer(network.output["output"], *layers[len(layers) - 1])

    import h5py
    print(("Save Crnn model under %s" % filename))
    model = h5py.File(filename, "w")
    network.save_hdf(model, epoch)
    model.close()
Beispiel #6
0
def saveCrnnNetwork(epoch, layers):
  """
  :type epoch: int
  :type layers: list[(numpy.ndarray, numpy.ndarray)]
  """
  print("Loading Crnn")

  from Network import LayerNetwork
  from NetworkHiddenLayer import ForwardLayer
  from NetworkOutputLayer import OutputLayer
  from Pretrain import pretrainFromConfig
  from Engine import Engine

  pretrain = pretrainFromConfig(config)
  is_pretrain_epoch = pretrain and epoch <= pretrain.get_train_num_epochs()
  modelFilename = config.value("model", None)
  assert modelFilename, "need 'model' in config"
  filename = Engine.epoch_model_filename(modelFilename, epoch, is_pretrain_epoch)
  assert not os.path.exists(filename), "already exists"
  if is_pretrain_epoch:
    network = pretrain.get_network_for_epoch(epoch)
  else:
    network = LayerNetwork.from_config_topology(config)
  nHiddenLayers = len(network.hidden)

  # print network topology
  print "Crnn Network layer topology:"
  print "input dim:", network.n_in
  print "hidden layer count:", nHiddenLayers
  print "output dim:", network.n_out["classes"]
  print "net weights #:", network.num_params()
  print "net params:", network.train_params_vars
  print "net output:", network.output["output"]

  assert network.n_in == inputDim
  #assert network.n_out == outputDim
  assert nHiddenLayers + 1 == layerCount  # hidden + output layer
  assert len(layers) == layerCount
  for i, (layerName, hidden) in enumerate(sorted(network.hidden.items())):
    # Some checks whether this is a forward-layer.
    assert isinstance(hidden, ForwardLayer)

    saveCrnnLayer(hidden, *layers[i])

  assert isinstance(network.output["output"], OutputLayer)
  saveCrnnLayer(network.output["output"], *layers[len(layers) - 1])

  import h5py
  print("Save Crnn model under %s" % filename)
  model = h5py.File(filename, "w")
  network.save_hdf(model, epoch)
  model.close()
Beispiel #7
0
def test_config1():
  config = Config()
  config.update(config1_dict)
  pretrain = pretrainFromConfig(config)
  assert_equal(pretrain.get_train_num_epochs(), 2)
  net1_json = pretrain._get_network_json_for_epoch(1)
  net2_json = pretrain._get_network_json_for_epoch(2)
  net3_json = pretrain._get_network_json_for_epoch(3)
  assert_in("hidden_0", net1_json)
  assert_not_in("hidden_1", net1_json)
  assert_in("hidden_0", net2_json)
  assert_in("hidden_1", net2_json)
  assert_equal(net2_json, net3_json)
Beispiel #8
0
def test_config1():
    config = Config()
    config.update(config1_dict)
    pretrain = pretrainFromConfig(config)
    assert_equal(pretrain.get_train_num_epochs(), 2)
    net1_json = pretrain._get_network_json_for_epoch(1)
    net2_json = pretrain._get_network_json_for_epoch(2)
    net3_json = pretrain._get_network_json_for_epoch(3)
    assert_in("hidden_0", net1_json)
    assert_not_in("hidden_1", net1_json)
    assert_in("hidden_0", net2_json)
    assert_in("hidden_1", net2_json)
    assert_equal(net2_json, net3_json)
Beispiel #9
0
 def from_config_topology(cls, config, mask=None, **kwargs):
   """
   :type config: Config.Config
   :param str mask: e.g. "unity" or None ("dropout"). "unity" is for testing.
   :rtype: LayerNetwork
   """
   json_content = cls.json_from_config(config, mask=mask)
   from Pretrain import find_pretrain_wrap_values, pretrainFromConfig
   if find_pretrain_wrap_values(json_content):
     pretrain = pretrainFromConfig(config=config)
     assert pretrain, "found Pretrain WrapEpochValue but no pretrain configured"
     json_content = pretrain.get_final_network_json()
   return cls.from_json_and_config(json_content, config, mask=mask, **kwargs)
Beispiel #10
0
def demo():
  import better_exchook
  better_exchook.install()
  import rnn
  import sys
  if len(sys.argv) <= 1:
    print("usage: python %s [config] [other options]" % __file__)
    print("example usage: python %s ++learning_rate_control newbob ++learning_rate_file newbob.data ++learning_rate 0.001" % __file__)
  rnn.initConfig(commandLineOptions=sys.argv[1:])
  from Pretrain import pretrainFromConfig
  pretrain = pretrainFromConfig(rnn.config)
  first_non_pretrain_epoch = 1
  pretrain_learning_rate = None
  if pretrain:
    first_non_pretrain_epoch = pretrain.get_train_num_epochs() + 1
  rnn.config._hack_value_reading_debug()
  log.initialize(verbosity=[5])
  control = loadLearningRateControlFromConfig(rnn.config)
  print("LearningRateControl: %r" % control)
  if not control.epochData:
    print("No epoch data so far.")
    return
  if pretrain:
    pretrain_learning_rate = rnn.config.float('pretrain_learning_rate', control.defaultLearningRate)
  maxEpoch = max(control.epochData.keys())
  for epoch in range(1, maxEpoch + 2):  # all epochs [1..maxEpoch+1]
    oldLearningRate = None
    if epoch in control.epochData:
      oldLearningRate = control.epochData[epoch].learningRate
    if epoch < first_non_pretrain_epoch:
      learningRate = pretrain_learning_rate
      s = "Pretrain epoch %i, fixed learning rate: %s (was: %s)" % (epoch, learningRate, oldLearningRate)
    elif first_non_pretrain_epoch > 1 and epoch == first_non_pretrain_epoch:
      learningRate = control.defaultLearningRate
      s = "First epoch after pretrain, epoch %i, fixed learning rate: %s (was %s)" % (epoch, learningRate, oldLearningRate)
    else:
      learningRate = control.calcLearningRateForEpoch(epoch)
      s = "Calculated learning rate for epoch %i: %s (was: %s)" % (epoch, learningRate, oldLearningRate)
    if learningRate < control.minLearningRate:
      learningRate = control.minLearningRate
      s += ", clipped to %s" % learningRate
    s += ", previous relative error: %s" % control.calcRelativeError(epoch - 2, epoch - 1)
    print(s)
    # Overwrite new learning rate so that the calculation for further learning rates stays consistent.
    if epoch in control.epochData:
      control.epochData[epoch].learningRate = learningRate
    else:
      control.epochData[epoch] = control.EpochData(learningRate=learningRate)
  print("Finished, last stored epoch was %i." % maxEpoch)
Beispiel #11
0
 def from_config_topology(cls, config, mask=None, **kwargs):
     """
 :type config: Config.Config
 :param str mask: e.g. "unity" or None ("dropout"). "unity" is for testing.
 :rtype: LayerNetwork
 """
     json_content = cls.json_from_config(config, mask=mask)
     from Pretrain import find_pretrain_wrap_values, pretrainFromConfig
     if find_pretrain_wrap_values(json_content):
         pretrain = pretrainFromConfig(config=config)
         assert pretrain, "found Pretrain WrapEpochValue but no pretrain configured"
         json_content = pretrain.get_final_network_json()
     return cls.from_json_and_config(json_content,
                                     config,
                                     mask=mask,
                                     **kwargs)
Beispiel #12
0
def main(argv):
  argparser = argparse.ArgumentParser(description='Dump network as JSON.')
  argparser.add_argument('crnn_config_file')
  argparser.add_argument('--epoch', default=1, type=int)
  argparser.add_argument('--out', default="/dev/stdout")
  args = argparser.parse_args(argv[1:])
  init(configFilename=args.crnn_config_file, commandLineOptions=[])

  pretrain = pretrainFromConfig(config)
  if pretrain:
    network = pretrain.get_network_for_epoch(args.epoch)
  else:
    network = LayerNetwork.from_config_topology(config)

  json_data = network.to_json_content()
  f = open(args.out, 'w')
  print >> f, json.dumps(json_data, indent=2, sort_keys=True)
  f.close()

  rnn.finalize()
Beispiel #13
0
def main(argv):
    argparser = argparse.ArgumentParser(description='Dump network as JSON.')
    argparser.add_argument('crnn_config_file')
    argparser.add_argument('--epoch', default=1, type=int)
    argparser.add_argument('--out', default="/dev/stdout")
    args = argparser.parse_args(argv[1:])
    init(configFilename=args.crnn_config_file, commandLineOptions=[])

    pretrain = pretrainFromConfig(config)
    if pretrain:
        network = pretrain.get_network_for_epoch(args.epoch)
    else:
        network = LayerNetwork.from_config_topology(config)

    json_data = network.to_json_content()
    f = open(args.out, 'w')
    print(json.dumps(json_data, indent=2, sort_keys=True), file=f)
    f.close()

    rnn.finalize()
Beispiel #14
0
    def init_network_from_config(self, config):
        self.pretrain = pretrainFromConfig(config)
        self.max_seqs = config.int('max_seqs', -1)

        epoch, model_epoch_filename = self.get_epoch_model(config)
        assert model_epoch_filename or self.start_epoch

        if self.pretrain:
            # This would be obsolete if we don't want to load an existing model.
            # In self.init_train_epoch(), we initialize a new model.
            net_dict = self.pretrain.get_network_json_for_epoch(
                epoch or self.start_epoch)
        else:
            net_dict = LayerNetwork.json_from_config(config)

        self._init_network(net_desc=net_dict, epoch=epoch or self.start_epoch)

        if model_epoch_filename:
            print("loading weights from", model_epoch_filename, file=log.v2)
            self.network.load_params_from_file(model_epoch_filename,
                                               session=self.tf_session)
Beispiel #15
0
def test_init_config1():
    config = Config()
    config.update(config1_dict)
    pretrain = pretrainFromConfig(config)
    assert_true(pretrain)
Beispiel #16
0
def test_config2():
    config = Config()
    config.update(config2_dict)
    pretrain = pretrainFromConfig(config)
    assert_equal(pretrain.get_train_num_epochs(), 3)
Beispiel #17
0
def test_config3():
  config = Config()
  config.update(config3_dict)
  config.network_topology_json = config3_json
  pretrain = pretrainFromConfig(config)
  assert_equal(pretrain.get_train_num_epochs(), 3)
Beispiel #18
0
def test_config2():
  config = Config()
  config.update(config2_dict)
  pretrain = pretrainFromConfig(config)
  assert_equal(pretrain.get_train_num_epochs(), 3)
Beispiel #19
0
def test_config3():
    config = Config()
    config.update(config3_dict)
    config.network_topology_json = config3_json
    pretrain = pretrainFromConfig(config)
    assert_equal(pretrain.get_train_num_epochs(), 3)
Beispiel #20
0
def test_init_config1():
  config = Config()
  config.update(config1_dict)
  pretrain = pretrainFromConfig(config)
  assert_true(pretrain)
def main():
    argparser = ArgumentParser(description=__doc__,
                               formatter_class=RawTextHelpFormatter)
    argparser.add_argument("--model",
                           required=True,
                           help="or config, or setup")
    argparser.add_argument("--epoch", required=True, type=int)
    argparser.add_argument("--prior",
                           help="none, fixed, softmax (default: none)")
    argparser.add_argument("--prior_scale", type=float, default=1.0)
    argparser.add_argument("--am_scale", type=float, default=1.0)
    argparser.add_argument("--tdp_scale", type=float, default=1.0)
    args = argparser.parse_args()

    cfg_fn = args.model
    if "/" not in cfg_fn:
        cfg_fn = "config-train/%s.config" % cfg_fn
    assert os.path.exists(cfg_fn)
    setup_name = os.path.splitext(os.path.basename(cfg_fn))[0]
    setup_dir = "data-train/%s" % setup_name
    assert os.path.exists(setup_dir)
    Globals.setup_name = setup_name
    Globals.setup_dir = setup_dir
    Globals.epoch = args.epoch

    config_update["epoch"] = args.epoch
    config_update["load_epoch"] = args.epoch
    config_update["model"] = "%s/net-model/network" % setup_dir

    import rnn
    rnn.init(configFilename=cfg_fn,
             config_updates=config_update,
             extra_greeting="calc full sum score.")
    Globals.engine = rnn.engine
    Globals.config = rnn.config
    Globals.dataset = rnn.dev_data

    assert Globals.engine and Globals.config and Globals.dataset
    # This will init the network, load the params, etc.
    Globals.engine.init_train_from_config(config=Globals.config,
                                          dev_data=Globals.dataset)

    # Do not modify the network here. Not needed.
    softmax_prior = get_softmax_prior()

    prior = args.prior or "none"
    if prior == "none":
        prior_filename = None
    elif prior == "softmax":
        prior_filename = softmax_prior
    elif prior == "fixed":
        prior_filename = "dependencies/prior-fixed-f32.xml"
    else:
        raise Exception("invalid prior %r" % prior)
    print("using prior:", prior)
    if prior_filename:
        assert os.path.exists(prior_filename)
        check_valid_prior(prior_filename)

    print("Do the stuff...")
    print("Reinit dataset.")
    Globals.dataset.init_seq_order(epoch=args.epoch)

    network_update["out_fullsum_scores"]["eval_locals"][
        "am_scale"] = args.am_scale
    network_update["out_fullsum_scores"]["eval_locals"][
        "prior_scale"] = args.prior_scale
    network_update["out_fullsum_bw"]["tdp_scale"] = args.tdp_scale
    if prior_filename:
        network_update["out_fullsum_prior"][
            "init"] = "load_txt_file(%r)" % prior_filename
    else:
        network_update["out_fullsum_prior"]["init"] = 0
    from copy import deepcopy
    Globals.config.typed_dict["network"] = deepcopy(
        Globals.config.typed_dict["network"])
    Globals.config.typed_dict["network"].update(network_update)
    # Reinit the network, and copy over params.
    from Pretrain import pretrainFromConfig
    pretrain = pretrainFromConfig(
        Globals.config)  # reinit Pretrain topologies if used
    if pretrain:
        new_network_desc = pretrain.get_network_json_for_epoch(Globals.epoch)
    else:
        new_network_desc = Globals.config.typed_dict["network"]
    assert "output_fullsum" in new_network_desc
    print("Init new network.")
    Globals.engine.maybe_init_new_network(new_network_desc)

    print("Calc scores.")
    calc_fullsum_scores(meta=dict(prior=prior,
                                  prior_scale=args.prior_scale,
                                  am_scale=args.am_scale,
                                  tdp_scale=args.tdp_scale))

    rnn.finalize()
    print("Bye.")