Ejemplo n.º 1
0
        elif mode == 'inference':
            seq_logProb, seq_predictions = self.decoder.infer(
                encoder_last_hidden_state=encoder_last_hidden_state,
                encoder_output=encoder_outputs,
            )

        else:
            raise KeyError('mode is not valid')

        return seq_logProb, seq_predictions


if __name__ == '__main__':
    import logging
    logger.setLevel(logging.INFO)
    from vocabulary import Vocabulary

    json_file = 'data/testing_label.json'
    numpy_file = 'data/testing_data/feat'

    helper = Vocabulary(json_file, min_word_count=5)

    input_data = Variable(torch.randn(3, 80, 4096).view(-1, 80, 4096))

    encoder = EncoderRNN(input_size=4096, hidden_size=1000)
    decoder = DecoderRNN(hidden_size=1000,
                         output_size=1700,
                         vocab_size=1700,
                         word_dim=128,
                         helper=helper)
Ejemplo n.º 2
0
        return x

    def parms_n(self):
        model_parameters = filter(lambda p: p.requires_grad, self.parameters())
        params = sum([np.prod(p.size()) for p in model_parameters])

        return params

        # get unique string repr for each different net
    def get_name(self):
        net_name = self.__class__.__name__
        return "{}_{}".format(net_name, self.depth)

    def summary(self):
        model_parameters = filter(lambda p: p.requires_grad, self.parameters())
        params = sum([np.prod(p.size()) for p in model_parameters])

        print("Model Parameters")
        print(self.parameters)
        print("Trainable parameters: {}".format(params))

if __name__ ==  "__main__":
    logger.setLevel(logging.DEBUG)
    t = MnistCNN(depth=128)

    a = Variable(torch.rand(2, 3, 32, 32))

    print(t.parms_n())
    print(t(a).shape)
    print(t(a))