mask = torch.zeros(self.num_rows * self.num_columns)
        mask[-1] = 1

        # train_loader a generator: (data, label)
        (data, label) = next(train_loader)

        # Return DataTuple(!) and an empty (aux) tuple.
        return DataTuple(data, label), MaskAuxTuple(mask.type(torch.uint8))


if __name__ == "__main__":
    """ Tests sequence generator - generates and displays a random sample"""

    # "Loaded parameters".
    from utils.param_interface import ParamInterface 
    params = ParamInterface()
    params.add_default_params({'batch_size': 3, 'start_index': 0, 'stop_index': 54999,
              'use_train_data': True, 'mnist_folder': '~/data/mnist'})

    # Create problem object.
    problem = SequentialPixelMNIST(params)
    # Get generator
    generator = problem.return_generator()
    # Get batch.
    num_rows = 28
    num_columns = 28
    sample_num = 0
    data_tuple, _ = next(generator)
    x, y = data_tuple

    print(x.size())
Ejemplo n.º 2
0
    params_dict = {
        'context_input_size': 32,
        'input_size': input_size,
        'output_size': 10,
        'center_size': 1,
        'center_size_per_module': 32,
        'num_modules': 4
    }

    # Initialize the application state singleton.
    from utils.app_state import AppState
    app_state = AppState()
    app_state.visualize = True

    from utils.param_interface import ParamInterface
    params = ParamInterface()
    params.add_custom_params(params_dict)
    model = ThalNetModel(params)

    seq_length = 10
    batch_size = 2

    # Check for different seq_lengts and batch_sizes.
    for i in range(1):
        # Create random Tensors to hold inputs and outputs
        x = torch.randn(batch_size, 1, input_size, input_size)
        logits = torch.randn(batch_size, 1, params_dict['output_size'])
        y = x
        data_tuple = (x, y)

        # Test forward pass.
Ejemplo n.º 3
0
            mask, max(seq_lengths_a), nb_sub_seq_a + nb_sub_seq_b)

        return data_tuple, aux_tuple

    # method for changing the maximum length, used mainly during curriculum
    # learning
    def set_max_length(self, max_length):
        self.max_sequence_length = max_length


if __name__ == "__main__":
    """ Tests sequence generator - generates and displays a random sample"""

    # "Loaded parameters".
    from utils.param_interface import ParamInterface 
    params = ParamInterface()
    params.add_custom_params({'control_bits': 4,
                              'data_bits': 8,
                              'batch_size': 1,
                              'min_sequence_length': 2,
                              'max_sequence_length': 4,
                              'num_subseq_min': 2,
                              'num_subseq_max': 4,
                              'num_rotation': 0.5})
    # Create problem object.
    problem = OperationSpan(params)
    # Get generator
    generator = problem.return_generator()
    # Get batch.
    data_tuple, aux_tuple = next(generator)
    # Display single sample (0) from batch.
Ejemplo n.º 4
0
        aux_tuple = AlgSeqAuxTuple(mask, seq_length, 1)

        return data_tuple, aux_tuple

    # method for changing the maximum length, used mainly during curriculum
    # learning
    def set_max_length(self, max_length):
        self.max_sequence_length = max_length


if __name__ == "__main__":
    """ Tests sequence generator - generates and displays a random sample"""

    # "Loaded parameters".
    from utils.param_interface import ParamInterface
    params = ParamInterface()
    params.add_custom_params({
        'control_bits': 4,
        'data_bits': 8,
        'batch_size': 1,
        'min_sequence_length': 1,
        'max_sequence_length': 10,
        'seq_start': 0,
        'skip_step': 2
    })
    # Create problem object.
    problem = SkipRecallCommandLines(params)
    # Get generator
    generator = problem.return_generator()
    # Get batch.
    data_tuple, aux_tuple = next(generator)
Ejemplo n.º 5
0
            # Color
            Q[i * self.NUM_QUESTIONS:(i + 1) * self.NUM_QUESTIONS,
              1, obj.color] = True
            # Query.
            Q[i * self.NUM_QUESTIONS:(i + 1) * self.NUM_QUESTIONS, 2,
              :num_bits] = query_matrix[:self.NUM_QUESTIONS, :num_bits]

        return Q


if __name__ == "__main__":
    """ Tests Shape-Color-Query - generates and displays a sample"""

    # "Loaded parameters".
    from utils.param_interface import ParamInterface 
    params = ParamInterface()
    params.add_default_params({
        'batch_size': 10,
        'data_folder': '~/data/shape-color-query/',
        'data_filename': 'training.hy',
        'shuffle': True,
        "regenerate": True,
        'use_train_data': True,
        'dataset_size': 100,
        'img_size': 224})

    # Configure logger.
    logging.basicConfig(level=logging.DEBUG)
    logger.debug("params: {}".format(params))

    # Create problem object.
Ejemplo n.º 6
0
        """

        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = F.dropout(x)
        x = self.fc3(x)
        return F.log_softmax(x, dim=-1)


if __name__ == '__main__':
    # Set visualization.
    AppState().visualize = True

    # Test base model.
    params = ParamInterface()
    params.add_custom_params({'use_question_encoding': False,
                              'pretrained_cnn_model': 'resnet18',
                              'num_blocks': 2,
                              'use_pretrained_cnn': True,
                              'word_embedded_size': 7})

    # model
    model = StackedAttentionVQA(params)

    while True:
        # Generate new sequence.
        # "Image" - batch x channels x width x height
        input_np = np.random.binomial(1, 0.5, (2, 3, 128, 128))
        image = torch.from_numpy(input_np).type(torch.FloatTensor)
Ejemplo n.º 7
0
        aux_tuple = AlgSeqAuxTuple(mask, seq_length, 1)

        return data_tuple, aux_tuple

    # method for changing the maximum length, used mainly during curriculum
    # learning
    def set_max_length(self, max_length):
        self.max_sequence_length = max_length


if __name__ == "__main__":
    """ Tests sequence generator - generates and displays a random sample"""

    # "Loaded parameters".
    from utils.param_interface import ParamInterface
    params = ParamInterface()
    params.add_custom_params({
        'control_bits': 4,
        'data_bits': 8,
        'batch_size': 1,
        # 'predict_inverse': False,
        'min_sequence_length': 3,
        'max_sequence_length': 5
    })
    # Create problem object.
    problem = SequenceSymmetryCommandLines(params)
    # Get generator
    generator = problem.return_generator()
    # Get batch.
    data_tuple, aux_tuple = next(generator)
Ejemplo n.º 8
0
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = F.dropout(x)
        x = self.fc3(x)
        return F.log_softmax(x, dim=-1)


if __name__ == '__main__':
    # Set visualization.
    from utils.app_state import AppState
    AppState().visualize = True

    # Test base model.
    # "Loaded parameters".
    params = ParamInterface()
    params.add_custom_params({'use_question_encoding': False})

    # model
    model = CNNLSTMVQA(params)

    while True:
        # Generate new sequence.
        # "Image" - batch x channels x width x height
        input_np = np.random.binomial(1, 0.5, (2, 3, 128, 128))
        image = torch.from_numpy(input_np).type(torch.FloatTensor)

        # Question
        if params['use_question_encoding']:
            questions_np = np.random.binomial(1, 0.5, (2, 13, 7))
        else:
Ejemplo n.º 9
0

if __name__ == '__main__':
    dim = 512
    embed_hidden = 300
    max_step = 12
    self_attention = True
    memory_gate = True
    nb_classes = 28
    dropout = 0.15

    from utils.app_state import AppState
    app_state = AppState()

    from utils.param_interface import ParamInterface
    params = ParamInterface()
    params.add_custom_params({
        'dim': dim,
        'embed_hidden': embed_hidden,
        'max_step': 12,
        'self_attention': self_attention,
        'memory_gate': memory_gate,
        'nb_classes': nb_classes,
        'dropout': dropout
    })

    net = MACNetwork(params)

    import torch
    import numpy as np
    from problems.image_text_to_class.clevr import DataTuple, ImageTextTuple
Ejemplo n.º 10
0
        self.plotWindow.update(fig, frames)
        return self.plotWindow.is_closed


if __name__ == "__main__":
    # Set logging level.
    logger = logging.getLogger('NTM-Module')
    logging.basicConfig(level=logging.DEBUG)

    # Set visualization.
    from utils.app_state import AppState
    AppState().visualize = True

    # "Loaded parameters".
    from utils.param_interface import ParamInterface
    params = ParamInterface()
    params.add_default_params({
        'num_control_bits': 2,
        'num_data_bits': 8,  # input and output size
        # controller parameters
        'controller': {
            'name': 'ffgru',
            'hidden_state_size': 5,
            'num_layers': 1,
            'non_linearity': 'none',
            'ff_output_size': 5
        },
        # interface parameters
        'interface': {
            'num_read_heads': 2,
            'shift_size': 3
Ejemplo n.º 11
0
    # TK: ok, what is going on in here...?

    # add problem folder to path for import
    sys.path.insert(
        0,
        os.path.normpath(
            os.path.join(os.getcwd(), '../../problems/seq_to_seq/text2text')))
    import translation as pb

    # instantiate problem
    eng_prefixes = ("i am ", "i m ", "he is", "he s ", "she is", "she s",
                    "you are", "you re ", "we are", "we re ", "they are",
                    "they re ")
    from utils.param_interface import ParamInterface

    params = ParamInterface()
    params.add_custom_params({
        'batch_size': 64,
        'training_size': 0.90,
        'output_lang_name': 'fra',
        'max_sequence_length': 15,
        'eng_prefixes': eng_prefixes,
        'use_train_data': True,
        'data_folder': '~/data/language',
        'reverse': False
    })

    problem = pb.Translation(params)
    print('Problem successfully created.\n')

    # get size of vocabulary for input & output language
Ejemplo n.º 12
0
                                   nb_sub_seq_a + nb_sub_seq_b)

        return data_tuple, aux_tuple

    # method for changing the maximum length, used mainly during curriculum
    # learning
    def set_max_length(self, max_length):
        self.max_sequence_length = max_length


if __name__ == "__main__":
    """ Tests sequence generator - generates and displays a random sample"""

    # "Loaded parameters".
    from utils.param_interface import ParamInterface
    params = ParamInterface()
    params.add_custom_params({
        'name': 'serial_recall_original',
        'control_bits': 4,
        'data_bits': 8,
        'batch_size': 1,
        'min_sequence_length': 1,
        'max_sequence_length': 10,
        'num_subseq_min': 1,
        'num_subseq_max': 4
    })
    # Create problem object.
    problem = InterruptionNot(params)
    # Get generator
    generator = problem.return_generator()
    # Get batch.
Ejemplo n.º 13
0
        # padding data
        data_padded = F.pad(data, self.padding, 'constant', 0)

        # Generate labels for aux tuple
        class_names = [self.mnist_class_names[i] for i in label]

        # Return DataTuple(!) and an empty (aux) tuple.
        return DataTuple(data_padded, label), LabelAuxTuple(class_names)


if __name__ == "__main__":
    """ Tests sequence generator - generates and displays a random sample"""

    # "Loaded parameters".
    from utils.param_interface import ParamInterface
    params = ParamInterface()
    params.add_default_params({
        'batch_size': 2,
        'start_index': 0,
        'stop_index': 54999,
        'use_train_data': True,
        'mnist_folder': '~/data/mnist',
        'padding': [4, 4, 3, 3],
        'up_scaling': False
    })

    # Create problem object.
    problem = MNIST(params)
    # Get generator
    generator = problem.return_generator()
    # Get batch.
Ejemplo n.º 14
0
        # Generate labels for aux tuple
        class_names = [self.cifar_class_names[i] for i in label]

        # Return DataTuple(!) and an empty (aux) tuple.
        return DataTuple(data_padded, label), LabelAuxTuple(class_names)


if __name__ == "__main__":
    """ Tests sequence generator - generates and displays a random sample"""
    np.random.seed(0)
    torch.manual_seed(0)

    # "Loaded parameters".
    from utils.param_interface import ParamInterface
    params = ParamInterface()
    params.add_default_params({
        'batch_size': 2,
        'start_index': 0,
        'stop_index': 40000,
        'use_train_data': True,
        'folder': '~/data/cifar10',
        'padding': [0, 0, 0, 0],
        'up_scaling': True
    })

    # Create problem object.
    problem = CIFAR10(params)
    # Get generator
    generator = problem.return_generator()
    # Get batch.
Ejemplo n.º 15
0
        aux_tuple = AlgSeqAuxTuple(mask, seq_length, 1)

        return data_tuple, aux_tuple

    # method for changing the maximum length, used mainly during curriculum
    # learning
    def set_max_length(self, max_length):
        self.max_sequence_length = max_length


if __name__ == "__main__":
    """ Tests sequence generator - generates and displays a random sample"""

    # "Loaded parameters".
    from utils.param_interface import ParamInterface 
    params = ParamInterface()
    params.add_custom_params({'control_bits': 2,
                              'data_bits': 8,
                              'batch_size': 1,
                              'min_sequence_length': 1,
                              'max_sequence_length': 10,
                              'num_bits': 0.5})
    # Create problem object.
    problem = ManipulationSpatialRotation(params)
    # Get generator
    generator = problem.return_generator()
    # Get batch.
    data_tuple, aux_tuple = next(generator)
    # Display single sample (0) from batch.
    problem.show_sample(data_tuple, aux_tuple)
Ejemplo n.º 16
0
        logits = torch.stack(logits, 1)
        return logits


if __name__ == "__main__":
    # Set logging level.
    logger = logging.getLogger('MAES')
    logging.basicConfig(level=logging.DEBUG)

    # Set visualization.
    from utils.app_state import AppState
    AppState().visualize = True

    # "Loaded parameters".
    from utils.param_interface import ParamInterface
    params = ParamInterface()
    params.add_default_params({
        'num_control_bits': 3,
        'num_data_bits': 8,  # input and output size
        'encoding_bit': 0,
        'solving_bit': 1,
        # controller parameters
        'controller': {
            'name': 'rnn',
            'hidden_state_size': 20,
            'num_layers': 1,
            'non_linearity': 'sigmoid'
        },
        'mae_interface': {
            'shift_size': 3
        },  # encoder interface parameters
Ejemplo n.º 17
0
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = F.dropout(x)
        x = self.fc3(x)
        return F.log_softmax(x, dim=-1)


if __name__ == '__main__':
    # Set visualization.
    from utils.app_state import AppState
    AppState().visualize = True

    # Test base model.
    from utils.param_interface import ParamInterface

    params = ParamInterface()
    params.add_custom_params({})

    # model
    model = MultiHopsAttention(params)

    while True:
        # Generate new sequence.
        # "Image" - batch x channels x width x height
        input_np = np.random.binomial(1, 0.5, (1, 3, 128, 128))
        image = torch.from_numpy(input_np).type(torch.FloatTensor)

        # Question
        questions_np = np.random.binomial(1, 0.5, (1, 3, 7))
        questions = torch.from_numpy(questions_np).type(torch.FloatTensor)
Ejemplo n.º 18
0
        "2: Only during validation\n"
        "3: Only during last validation, after training is completed\n")

    # Parse arguments.
    FLAGS, unparsed = parser.parse_known_args()

    # Check if config file was selected.
    if FLAGS.config == '':
        print('Please pass configuration file(s) as --c parameter')
        exit(-1)

    # Get list of configs that need to be loaded.
    configs_to_load = recurrent_config_parse(FLAGS.config, [])

    # Create param interface object.
    param_interface = ParamInterface()

    # Read the YAML files one by one - but in reverse order!
    for config in reversed(configs_to_load):
        # Open file and try to add that to list of parameter dictionaries.
        with open(config, 'r') as stream:
            # Load param dictionaries in reverse order.
            param_interface.add_custom_params(yaml.load(stream))
        print('Loaded configuration from file {}'.format(config))
        # Add to list of loaded configs.
        configs_to_load.append(config)
    # Done. In here Param Registry contains configuration loaded (and
    # overwritten) from several files.

    # Get problem and model names.
    try:
        aux_tuple = AlgSeqAuxTuple(mask, seq_length, 1)

        return data_tuple, aux_tuple

    # method for changing the maximum length, used mainly during curriculum
    # learning
    def set_max_length(self, max_length):
        self.max_sequence_length = max_length


if __name__ == "__main__":
    """ Tests sequence generator - generates and displays a random sample"""

    # "Loaded parameters".
    from utils.param_interface import ParamInterface
    params = ParamInterface()
    params.add_custom_params({
        'control_bits': 4,
        'data_bits': 8,
        'batch_size': 2,
        # 'randomize_control_lines': True,
        'min_sequence_length': 1,
        'max_sequence_length': 10
    })
    # Create problem object.
    problem = DualSerialReverseRecallCommandLines(params)
    # Get generator
    generator = problem.return_generator()
    # Get batch.
    data_tuple, aux_tuple = next(generator)
    # Display single sample (0) from batch.
Ejemplo n.º 20
0
            ax = plt.subplot(gs[i])
            ax.imshow(self.output_conv2[0, i].detach().numpy())

        # Plot!
        plt.show()
        exit()


if __name__ == '__main__':
    # Set visualization.
    from utils.app_state import AppState
    AppState().visualize = True

    # Test base model.
    from utils.param_interface import ParamInterface
    params = ParamInterface()
    params.add_custom_params({
        'depth_conv1': 10,
        'depth_conv2': 20,
        'filter_size_conv1': 5,
        'filter_size_conv2': 5,
        'num_pooling': 2,
        'num_channels': 1,
        'up_scaling': None,
        'height': 28,
        'width': 28,
        'padding': (0, 0, 0, 0)
    })

    # model
    model = SimpleConvNet(params)