Exemple #1
0
        "2: Only during validation\n"
        "3: Only during last validation, after training is completed\n")

    # Parse arguments.
    FLAGS, unparsed = parser.parse_known_args()

    # Check if config file was selected.
    if FLAGS.config == '':
        print('Please pass configuration file(s) as --c parameter')
        exit(-1)

    # Get list of configs that need to be loaded.
    configs_to_load = recurrent_config_parse(FLAGS.config, [])

    # Create param interface object.
    param_interface = ParamInterface()

    # Read the YAML files one by one - but in reverse order!
    for config in reversed(configs_to_load):
        # Open file and try to add that to list of parameter dictionaries.
        with open(config, 'r') as stream:
            # Load param dictionaries in reverse order.
            param_interface.add_custom_params(yaml.load(stream))
        print('Loaded configuration from file {}'.format(config))
        # Add to list of loaded configs.
        configs_to_load.append(config)
    # Done. In here Param Registry contains configuration loaded (and
    # overwritten) from several files.

    # Get problem and model names.
    try:
        aux_tuple = AlgSeqAuxTuple(mask, seq_length, 1)

        return data_tuple, aux_tuple

    # method for changing the maximum length, used mainly during curriculum
    # learning
    def set_max_length(self, max_length):
        self.max_sequence_length = max_length


if __name__ == "__main__":
    """ Tests sequence generator - generates and displays a random sample"""

    # "Loaded parameters".
    from utils.param_interface import ParamInterface
    params = ParamInterface()
    params.add_custom_params({
        'control_bits': 4,
        'data_bits': 8,
        'batch_size': 2,
        # 'randomize_control_lines': True,
        'min_sequence_length': 1,
        'max_sequence_length': 10
    })
    # Create problem object.
    problem = DualSerialReverseRecallCommandLines(params)
    # Get generator
    generator = problem.return_generator()
    # Get batch.
    data_tuple, aux_tuple = next(generator)
    # Display single sample (0) from batch.
    # TK: ok, what is going on in here...?

    # add problem folder to path for import
    sys.path.insert(
        0,
        os.path.normpath(
            os.path.join(os.getcwd(), '../../problems/seq_to_seq/text2text')))
    import translation as pb

    # instantiate problem
    eng_prefixes = ("i am ", "i m ", "he is", "he s ", "she is", "she s",
                    "you are", "you re ", "we are", "we re ", "they are",
                    "they re ")
    from utils.param_interface import ParamInterface

    params = ParamInterface()
    params.add_custom_params({
        'batch_size': 64,
        'training_size': 0.90,
        'output_lang_name': 'fra',
        'max_sequence_length': 15,
        'eng_prefixes': eng_prefixes,
        'use_train_data': True,
        'data_folder': '~/data/language',
        'reverse': False
    })

    problem = pb.Translation(params)
    print('Problem successfully created.\n')

    # get size of vocabulary for input & output language