Exemple #1
0
    def test(self):
        #output from custom implementation
        X = torch.Tensor([[1, 1, 1, 0, 0], [0, 1, 1, 1, 0], [0, 0, 1, 1, 1],
                          [0, 0, 1, 1, 0], [0, 1, 1, 0, 0]])
        X = X.reshape(
            (1, 1, 5, 5))  # (batch_size x channel_size x height x weight)
        conv = Conv()
        K = torch.Tensor([[[[1, 0, 1], [0, 1, 0], [1, 0, 1]]]])
        conv.init_params(K, 3, stride=1, padding=1)
        output_custom = conv.forward(X)
        print(output_custom)

        #ouput from pytorch's implementation
        output_pytorch = F.conv2d(X, K, padding=1, stride=1)
        print(output_pytorch)
        self.assertEqual(output_custom, output_pytorch)
Exemple #2
0
num_labels = 26
cuda = torch.cuda.is_available()
print("cuda : ")
print(cuda)

# Instantiate the CRF model
crf = CRF(input_dim, embed_dim, conv_shapes, num_labels, batch_size)

# Setup the optimizer
# opt = optim.LBFGS(crf.parameters())
opt = optim.Adam(crf.parameters())

from conv import Conv

convLayer = Conv()
convLayer.init_params(kernel_size=5)

##################################################
# Begin training
##################################################
step = 0

# Fetch dataset
dataset = get_dataset()
# split = int(0.5 * len(dataset.data)) # train-test split
split = int(0.01 * len(dataset.data))  # train-test split
# train_data, test_data = dataset.data[:split], dataset.data[split:]
# train_target, test_target = dataset.target[:split], dataset.target[split:]
train_data = dataset.data[:split]
test_data = train_data
train_target = dataset.target[:split]