return data_tuple, aux_tuple

    # method for changing the maximum length, used mainly during curriculum
    # learning
    def set_max_length(self, max_length):
        self.max_sequence_length = max_length


if __name__ == "__main__":
    """ Tests sequence generator - generates and displays a random sample"""

    # "Loaded parameters".
    from utils.param_interface import ParamInterface
    params = ParamInterface()
    params.add_custom_params({
        'control_bits': 4,
        'data_bits': 8,
        'batch_size': 2,
        # 'randomize_control_lines': True,
        'min_sequence_length': 1,
        'max_sequence_length': 10
    })
    # Create problem object.
    problem = DualSerialReverseRecallCommandLines(params)
    # Get generator
    generator = problem.return_generator()
    # Get batch.
    data_tuple, aux_tuple = next(generator)
    # Display single sample (0) from batch.
    problem.show_sample(data_tuple, aux_tuple)
Exemplo n.º 2
0
        x = F.relu(self.fc2(x))
        x = F.dropout(x)
        x = self.fc3(x)
        return F.log_softmax(x, dim=-1)


if __name__ == '__main__':
    # Set visualization.
    from utils.app_state import AppState
    AppState().visualize = True

    # Test base model.
    from utils.param_interface import ParamInterface

    params = ParamInterface()
    params.add_custom_params({})

    # model
    model = MultiHopsAttention(params)

    while True:
        # Generate new sequence.
        # "Image" - batch x channels x width x height
        input_np = np.random.binomial(1, 0.5, (1, 3, 128, 128))
        image = torch.from_numpy(input_np).type(torch.FloatTensor)

        # Question
        questions_np = np.random.binomial(1, 0.5, (1, 3, 7))
        questions = torch.from_numpy(questions_np).type(torch.FloatTensor)

        # Target.
Exemplo n.º 3
0
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = F.dropout(x)
        x = self.fc3(x)
        return F.log_softmax(x, dim=-1)


if __name__ == '__main__':
    # Set visualization.
    AppState().visualize = True

    # Test base model.
    params = ParamInterface()
    params.add_custom_params({'use_question_encoding': False,
                              'pretrained_cnn_model': 'resnet18',
                              'num_blocks': 2,
                              'use_pretrained_cnn': True,
                              'word_embedded_size': 7})

    # model
    model = StackedAttentionVQA(params)

    while True:
        # Generate new sequence.
        # "Image" - batch x channels x width x height
        input_np = np.random.binomial(1, 0.5, (2, 3, 128, 128))
        image = torch.from_numpy(input_np).type(torch.FloatTensor)

        # Question
        if params['use_question_encoding']:
            questions_np = np.random.binomial(1, 0.5, (2, 13, 7))
Exemplo n.º 4
0
        return data_tuple, aux_tuple

    # method for changing the maximum length, used mainly during curriculum
    # learning
    def set_max_length(self, max_length):
        self.max_sequence_length = max_length


if __name__ == "__main__":
    """ Tests sequence generator - generates and displays a random sample"""

    # "Loaded parameters".
    from utils.param_interface import ParamInterface 
    params = ParamInterface()
    params.add_custom_params({'control_bits': 4,
                              'data_bits': 8,
                              'batch_size': 1,
                              'min_sequence_length': 1,
                              'max_sequence_length': 10,
                              'num_subseq_min': 1,
                              'num_subseq_max': 4})
    # Create problem object.
    problem = InterruptionReverseRecall(params)
    # Get generator
    generator = problem.return_generator()
    # Get batch.
    data_tuple, aux_tuple = next(generator)
    # Display single sample (0) from batch.
    problem.show_sample(data_tuple, aux_tuple)
Exemplo n.º 5
0
        aux_tuple = AlgSeqAuxTuple(mask, seq_length, 1)

        return data_tuple, aux_tuple

    # method for changing the maximum length, used mainly during curriculum
    # learning
    def set_max_length(self, max_length):
        self.max_sequence_length = max_length


if __name__ == "__main__":
    """ Tests sequence generator - generates and displays a random sample"""

    # "Loaded parameters".
    from utils.param_interface import ParamInterface 
    params = ParamInterface()
    params.add_custom_params({'control_bits': 2,
                              'data_bits': 8,
                              'batch_size': 1,
                              'min_sequence_length': 1,
                              'max_sequence_length': 10,
                              'num_bits': 0.5})
    # Create problem object.
    problem = ManipulationSpatialRotation(params)
    # Get generator
    generator = problem.return_generator()
    # Get batch.
    data_tuple, aux_tuple = next(generator)
    # Display single sample (0) from batch.
    problem.show_sample(data_tuple, aux_tuple)
Exemplo n.º 6
0
    # method for changing the maximum length, used mainly during curriculum
    # learning
    def set_max_length(self, max_length):
        self.max_sequence_length = max_length


if __name__ == "__main__":
    """ Tests sequence generator - generates and displays a random sample"""

    # "Loaded parameters".
    from utils.param_interface import ParamInterface
    params = ParamInterface()
    params.add_custom_params({
        'control_bits': 4,
        'data_bits': 8,
        'batch_size': 1,
        'min_sequence_length': 1,
        'max_sequence_length': 10,
        'seq_start': 0,
        'skip_step': 2
    })
    # Create problem object.
    problem = SkipRecallCommandLines(params)
    # Get generator
    generator = problem.return_generator()
    # Get batch.
    data_tuple, aux_tuple = next(generator)
    # Display single sample (0) from batch.
    problem.show_sample(data_tuple, aux_tuple)
Exemplo n.º 7
0
        'context_input_size': 32,
        'input_size': input_size,
        'output_size': 10,
        'center_size': 1,
        'center_size_per_module': 32,
        'num_modules': 4
    }

    # Initialize the application state singleton.
    from utils.app_state import AppState
    app_state = AppState()
    app_state.visualize = True

    from utils.param_interface import ParamInterface
    params = ParamInterface()
    params.add_custom_params(params_dict)
    model = ThalNetModel(params)

    seq_length = 10
    batch_size = 2

    # Check for different seq_lengts and batch_sizes.
    for i in range(1):
        # Create random Tensors to hold inputs and outputs
        x = torch.randn(batch_size, 1, input_size, input_size)
        logits = torch.randn(batch_size, 1, params_dict['output_size'])
        y = x
        data_tuple = (x, y)

        # Test forward pass.
        y_pred = model(data_tuple)
Exemplo n.º 8
0
    max_step = 12
    self_attention = True
    memory_gate = True
    nb_classes = 28
    dropout = 0.15

    from utils.app_state import AppState
    app_state = AppState()

    from utils.param_interface import ParamInterface
    params = ParamInterface()
    params.add_custom_params({
        'dim': dim,
        'embed_hidden': embed_hidden,
        'max_step': 12,
        'self_attention': self_attention,
        'memory_gate': memory_gate,
        'nb_classes': nb_classes,
        'dropout': dropout
    })

    net = MACNetwork(params)

    import torch
    import numpy as np
    from problems.image_text_to_class.clevr import DataTuple, ImageTextTuple

    batch_size = 64
    embedded_dim = 300
    images = torch.from_numpy(
        np.random.binomial(n=1, p=0.5, size=(batch_size, 1024, 14,
Exemplo n.º 9
0
    if FLAGS.config == '':
        print('Please pass configuration file(s) as --c parameter')
        exit(-1)

    # Get list of configs that need to be loaded.
    configs_to_load = recurrent_config_parse(FLAGS.config, [])

    # Create param interface object.
    param_interface = ParamInterface()

    # Read the YAML files one by one - but in reverse order!
    for config in reversed(configs_to_load):
        # Open file and try to add that to list of parameter dictionaries.
        with open(config, 'r') as stream:
            # Load param dictionaries in reverse order.
            param_interface.add_custom_params(yaml.load(stream))
        print('Loaded configuration from file {}'.format(config))
        # Add to list of loaded configs.
        configs_to_load.append(config)
    # Done. In here Param Registry contains configuration loaded (and
    # overwritten) from several files.

    # Get problem and model names.
    try:
        task_name = param_interface['training']['problem']['name']
    except BaseException:
        print(
            "Error: Couldn't retrieve problem name from the loaded configuration"
        )
        exit(-1)
Exemplo n.º 10
0
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = F.dropout(x)
        x = self.fc3(x)
        return F.log_softmax(x, dim=-1)


if __name__ == '__main__':
    # Set visualization.
    from utils.app_state import AppState
    AppState().visualize = True

    # Test base model.
    # "Loaded parameters".
    params = ParamInterface()
    params.add_custom_params({'use_question_encoding': False})

    # model
    model = CNNLSTMVQA(params)

    while True:
        # Generate new sequence.
        # "Image" - batch x channels x width x height
        input_np = np.random.binomial(1, 0.5, (2, 3, 128, 128))
        image = torch.from_numpy(input_np).type(torch.FloatTensor)

        # Question
        if params['use_question_encoding']:
            questions_np = np.random.binomial(1, 0.5, (2, 13, 7))
        else:
            questions_np = np.random.binomial(1, 0.5, (2, 13))
Exemplo n.º 11
0
        os.path.normpath(
            os.path.join(os.getcwd(), '../../problems/seq_to_seq/text2text')))
    import translation as pb

    # instantiate problem
    eng_prefixes = ("i am ", "i m ", "he is", "he s ", "she is", "she s",
                    "you are", "you re ", "we are", "we re ", "they are",
                    "they re ")
    from utils.param_interface import ParamInterface

    params = ParamInterface()
    params.add_custom_params({
        'batch_size': 64,
        'training_size': 0.90,
        'output_lang_name': 'fra',
        'max_sequence_length': 15,
        'eng_prefixes': eng_prefixes,
        'use_train_data': True,
        'data_folder': '~/data/language',
        'reverse': False
    })

    problem = pb.Translation(params)
    print('Problem successfully created.\n')

    # get size of vocabulary for input & output language
    input_voc_size = problem.input_lang.n_words
    output_voc_size = problem.output_lang.n_words

    # instantiate model with credible parameters
    from utils.param_interface import ParamInterface
Exemplo n.º 12
0
    # method for changing the maximum length, used mainly during curriculum
    # learning
    def set_max_length(self, max_length):
        self.max_sequence_length = max_length


if __name__ == "__main__":
    """ Tests sequence generator - generates and displays a random sample"""

    # "Loaded parameters".
    from utils.param_interface import ParamInterface
    params = ParamInterface()
    params.add_custom_params({
        'name': 'serial_recall_original',
        'control_bits': 4,
        'data_bits': 8,
        'batch_size': 1,
        'min_sequence_length': 1,
        'max_sequence_length': 10,
        'num_subseq_min': 1,
        'num_subseq_max': 4
    })
    # Create problem object.
    problem = InterruptionNot(params)
    # Get generator
    generator = problem.return_generator()
    # Get batch.
    data_tuple, aux_tuple = next(generator)
    # Display single sample (0) from batch.
    problem.show_sample(data_tuple, aux_tuple)
Exemplo n.º 13
0
    # method for changing the maximum length, used mainly during curriculum
    # learning
    def set_max_length(self, max_length):
        self.max_sequence_length = max_length


if __name__ == "__main__":
    """ Tests sequence generator - generates and displays a random sample"""

    # "Loaded parameters".
    from utils.param_interface import ParamInterface
    params = ParamInterface()
    params.add_custom_params({
        'control_bits': 4,
        'data_bits': 8,
        'batch_size': 1,
        # 'predict_inverse': False,
        'min_sequence_length': 3,
        'max_sequence_length': 5
    })
    # Create problem object.
    problem = SequenceSymmetryCommandLines(params)
    # Get generator
    generator = problem.return_generator()
    # Get batch.
    data_tuple, aux_tuple = next(generator)

    # Display single sample (0) from batch.
    problem.show_sample(data_tuple, aux_tuple)
Exemplo n.º 14
0

if __name__ == '__main__':
    # Set visualization.
    from utils.app_state import AppState
    AppState().visualize = True

    # Test base model.
    from utils.param_interface import ParamInterface
    params = ParamInterface()
    params.add_custom_params({
        'depth_conv1': 10,
        'depth_conv2': 20,
        'filter_size_conv1': 5,
        'filter_size_conv2': 5,
        'num_pooling': 2,
        'num_channels': 1,
        'up_scaling': None,
        'height': 28,
        'width': 28,
        'padding': (0, 0, 0, 0)
    })

    # model
    model = SimpleConvNet(params)

    while True:
        # Generate new sequence.
        # "Image" - batch x channels x width x height
        input_np = np.random.binomial(1, 0.5, (1, 1, 28, 28))
        input = torch.from_numpy(input_np).type(torch.FloatTensor)
        # Target.