def demo(): import better_exchook better_exchook.install() import rnn import sys if len(sys.argv) <= 1: print("usage: python %s [config] [other options]" % __file__) print("example usage: python %s ++pretrain default ++pretrain_construction_algo from_input" % __file__) rnn.initConfig(commandLineOptions=sys.argv[1:]) rnn.config._hack_value_reading_debug() rnn.config.update({"log": []}) rnn.initLog() rnn.initBackendEngine() if not rnn.config.value("pretrain", ""): print("config option 'pretrain' not set, will set it for this demo to 'default'") rnn.config.set("pretrain", "default") pretrain = pretrainFromConfig(rnn.config) print("pretrain: %s" % pretrain) num_pretrain_epochs = pretrain.get_train_num_epochs() from pprint import pprint for epoch in range(1, 1 + num_pretrain_epochs): print("epoch %i (of %i) network json:" % (epoch, num_pretrain_epochs)) net_json = pretrain.get_network_json_for_epoch(epoch) pprint(net_json) print("done.")
def init(config_filename, log_verbosity): """ :param str config_filename: filename to config-file :param int log_verbosity: """ rnn.initBetterExchook() rnn.initThreadJoinHack() if config_filename: print("Using config file %r." % config_filename) assert os.path.exists(config_filename) rnn.initConfig(configFilename=config_filename, commandLineOptions=[]) global config config = rnn.config config.set("log", None) config.set("log_verbosity", log_verbosity) config.set("use_tensorflow", True) rnn.initLog() print("Returnn compile-native-op starting up.", file=log.v1) rnn.returnnGreeting() rnn.initBackendEngine() assert Util.BackendEngine.is_tensorflow_selected( ), "this is only for TensorFlow" rnn.initFaulthandler() rnn.initConfigJsonNetwork() if 'network' in config.typed_dict: print("Loading network") from TFNetwork import TFNetwork network = TFNetwork(name="root", config=config, rnd_seed=1, train_flag=False, eval_flag=True, search_flag=False) network.construct_from_dict(config.typed_dict["network"])
def init(config_filename, log_verbosity): """ :param str config_filename: filename to config-file :param int log_verbosity: """ rnn.initBetterExchook() rnn.initThreadJoinHack() if config_filename: print("Using config file %r." % config_filename) assert os.path.exists(config_filename) rnn.initConfig(configFilename=config_filename, commandLineOptions=[]) global config config = rnn.config config.set("task", "calculate_wer") config.set("log", None) config.set("log_verbosity", log_verbosity) config.set("use_tensorflow", True) rnn.initLog() print("Returnn calculate-word-error-rate starting up.", file=log.v1) rnn.returnnGreeting() rnn.initBackendEngine() assert Util.BackendEngine.is_tensorflow_selected( ), "this is only for TensorFlow" rnn.initFaulthandler() rnn.initConfigJsonNetwork() rnn.printTaskProperties()
def initBase(configfile=None, targetMode=None, epoch=None): """ :param str|None configfile: filename, via init(), this is set :param str|None targetMode: "forward" or so. via init(), this is set :param int epoch: via init(), this is set """ global isInitialized isInitialized = True # Run through in any case. Maybe just to set targetMode. if not getattr(sys, "argv", None): # Set some dummy. Some code might want this (e.g. TensorFlow). sys.argv = [__file__] global config if not config: # Some subset of what we do in rnn.init(). rnn.initBetterExchook() rnn.initThreadJoinHack() if configfile is None: configfile = DefaultSprintCrnnConfig assert os.path.exists(configfile) rnn.initConfig(configFilename=configfile) config = rnn.config rnn.initLog() rnn.returnnGreeting(configFilename=configfile) rnn.initBackendEngine() rnn.initFaulthandler(sigusr1_chain=True) rnn.initConfigJsonNetwork() if BackendEngine.is_tensorflow_selected(): # Use TFEngine.Engine class instead of Engine.Engine. import TFEngine global Engine Engine = TFEngine.Engine import atexit atexit.register(_at_exit_handler) if targetMode: setTargetMode(targetMode) initDataset() if targetMode and targetMode == "forward" and epoch: model_filename = config.value('model', '') fns = [ Engine.epoch_model_filename(model_filename, epoch, is_pretrain) for is_pretrain in [False, True] ] fn_postfix = "" if BackendEngine.is_tensorflow_selected(): fn_postfix += ".meta" fns_existing = [fn for fn in fns if os.path.exists(fn + fn_postfix)] assert len(fns_existing) == 1, "%s not found" % fns model_epoch_filename = fns_existing[0] config.set('load', model_epoch_filename) assert Engine.get_epoch_model(config)[1] == model_epoch_filename, \ "%r != %r" % (Engine.get_epoch_model(config), model_epoch_filename) global engine if not engine: devices = rnn.initDevices() rnn.printTaskProperties(devices) rnn.initEngine(devices) engine = rnn.engine assert isinstance(engine, Engine)
def demo(): import better_exchook better_exchook.install() import rnn import sys if len(sys.argv) <= 1: print( "usage: python %s [config] [other options] [++check_learning_rates 1]" % __file__) print( "example usage: python %s ++learning_rate_control newbob ++learning_rate_file newbob.data ++learning_rate 0.001" % __file__) rnn.initConfig(commandLineOptions=sys.argv[1:]) rnn.config._hack_value_reading_debug() rnn.config.update({"log": []}) rnn.initLog() rnn.initBackendEngine() check_lr = rnn.config.bool("check_learning_rates", False) from Pretrain import pretrainFromConfig pretrain = pretrainFromConfig(rnn.config) first_non_pretrain_epoch = 1 pretrain_learning_rate = None if pretrain: first_non_pretrain_epoch = pretrain.get_train_num_epochs() + 1 log.initialize(verbosity=[5]) control = loadLearningRateControlFromConfig(rnn.config) print("LearningRateControl: %r" % control) if not control.epochData: print("No epoch data so far.") return firstEpoch = min(control.epochData.keys()) if firstEpoch != 1: print("Strange, first epoch from epoch data is %i." % firstEpoch) print("Error key: %s from %r" % (control.getErrorKey(epoch=firstEpoch), control.epochData[firstEpoch].error)) if pretrain: pretrain_learning_rate = rnn.config.float('pretrain_learning_rate', control.defaultLearningRate) maxEpoch = max(control.epochData.keys()) for epoch in range(1, maxEpoch + 2): # all epochs [1..maxEpoch+1] oldLearningRate = None if epoch in control.epochData: oldLearningRate = control.epochData[epoch].learningRate if epoch < first_non_pretrain_epoch: learningRate = pretrain_learning_rate s = "Pretrain epoch %i, fixed learning rate: %s (was: %s)" % ( epoch, learningRate, oldLearningRate) elif first_non_pretrain_epoch > 1 and epoch == first_non_pretrain_epoch: learningRate = control.defaultLearningRate s = "First epoch after pretrain, epoch %i, fixed learning rate: %s (was %s)" % ( epoch, learningRate, oldLearningRate) else: learningRate = control.calcNewLearnignRateForEpoch(epoch) s = "Calculated learning rate for epoch %i: %s (was: %s)" % ( epoch, learningRate, oldLearningRate) if learningRate < control.minLearningRate: learningRate = control.minLearningRate s += ", clipped to %s" % learningRate s += ", previous relative error: %s" % control.calcRelativeError( epoch - 2, epoch - 1) if hasattr(control, "_calcRecentMeanRelativeError"): s += ", previous mean relative error: %s" % control._calcRecentMeanRelativeError( epoch) print(s) if check_lr and oldLearningRate is not None: if oldLearningRate != learningRate: print("Learning rate is different in epoch %i!" % epoch) sys.exit(1) # Overwrite new learning rate so that the calculation for further learning rates stays consistent. if epoch in control.epochData: control.epochData[epoch].learningRate = learningRate else: control.epochData[epoch] = control.EpochData( learningRate=learningRate) print("Finished, last stored epoch was %i." % maxEpoch)