super(TestPRelu, self).__init__() self.prelu = nn.PReLU(init=random.random(), num_parameters=inp) def forward(self, x): x = self.prelu(x) return x if __name__ == '__main__': max_error = 0 for i in range(100): inp = np.random.randint(1, 100) out = np.random.randint(1, 100) model = TestPRelu(inp, out, inp % 2) input_np = np.random.uniform(-10, 10, (1, inp)) input_var = Variable(torch.FloatTensor(input_np)) output = model(input_var) k_model = pytorch_to_keras((inp, ), output) pytorch_output = output.data.numpy() keras_output = k_model.predict(input_np) error = np.max(pytorch_output - keras_output) print(error) if max_error < error: max_error = error print('Max error: {0}'.format(max_error))
sys.path.append('../pytorch2keras') from converter import pytorch_to_keras if __name__ == '__main__': max_error = 0 for i in range(10): model = torchvision.models.resnet18() for m in model.modules(): m.training = False input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) input_var = Variable(torch.FloatTensor(input_np)) output = model(input_var) k_model = pytorch_to_keras(( 3, 224, 224, ), output) pytorch_output = output.data.numpy() keras_output = k_model.predict(input_np) error = np.max(pytorch_output - keras_output) print(error) if max_error < error: max_error = error print('Max error: {0}'.format(max_error))
def forward(self, x): x = self.conv2d(x) return x if __name__ == '__main__': max_error = 0 for i in range(100): kernel_size = np.random.randint(1, 10) inp = np.random.randint(kernel_size + 1, 100) out = np.random.randint(1, 2) model = TestConv2d(inp + 2, out, kernel_size, inp % 2) input_np = np.random.uniform(0, 1, (1, inp + 2, inp, inp)) input_var = Variable(torch.FloatTensor(input_np)) output = model(input_var) k_model = pytorch_to_keras((inp + 2, inp, inp,), output, change_ordering=True) pytorch_output = output.data.numpy() keras_output = k_model.predict(input_np.transpose(0, 2, 3, 1)) error = np.max(pytorch_output - keras_output.transpose(0, 3, 1, 2)) print(error) if max_error < error: max_error = error print('Max error: {0}'.format(max_error))
self.sigmoid = nn.Sigmoid() def forward(self, x): x = self.linear(x) x = self.sigmoid(x) return x if __name__ == '__main__': max_error = 0 for i in range(100): inp = np.random.randint(1, 100) out = np.random.randint(1, 100) model = TestSigmoid(inp, out, inp % 2) input_np = np.random.uniform(-1.0, 1.0, (1, inp)) input_var = Variable(torch.FloatTensor(input_np)) output = model(input_var) k_model = pytorch_to_keras(model, input_var, (inp, ), verbose=True) pytorch_output = output.data.numpy() keras_output = k_model.predict(input_np) error = np.max(pytorch_output - keras_output) print(error) if max_error < error: max_error = error print('Max error: {0}'.format(max_error))
class TestEmbedding(nn.Module): def __init__(self, input_size): super(TestEmbedding, self).__init__() self.embedd = nn.Embedding(input_size, 100) def forward(self, input): return self.embedd(input).sum(dim=0) if __name__ == '__main__': max_error = 0 for i in range(100): input_np = np.random.randint(0, 10, (1, 1, 4)) input = Variable(torch.LongTensor(input_np)) simple_net = TestEmbedding(1000) output = simple_net(input) k_model = pytorch_to_keras(simple_net, input, (1, 4), verbose=True) pytorch_output = output.data.numpy() keras_output = k_model.predict(input_np) error = np.max(pytorch_output - keras_output[0]) print(error) if max_error < error: max_error = error print('Max error: {0}'.format(max_error))
if __name__ == '__main__': max_error = 0 for i in range(10): model = resnet18() for m in model.modules(): m.training = False input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) input_var = Variable(torch.FloatTensor(input_np)) output = model(input_var) k_model = pytorch_to_keras(( 3, 224, 224, ), output, change_ordering=True) pytorch_output = output.data.numpy() keras_output = k_model.predict(input_np.transpose(0, 2, 3, 1)) print(pytorch_output.shape, keras_output.shape) error = np.max(pytorch_output - keras_output) print(error) if max_error < error: max_error = error print('Max error: {0}'.format(max_error))
max_error = 0 for i in range(100): kernel_size = np.random.randint(1, 10) inp = np.random.randint(kernel_size + 1, 100) out = np.random.randint(1, 2) model = TestConv2d(inp + 2, out, kernel_size, inp % 2) input_np = np.random.uniform(0, 1, (1, inp + 2, inp, inp)) input_var = Variable(torch.FloatTensor(input_np)) output = model(input_var) k_model = pytorch_to_keras(model, input_var, ( inp + 2, inp, inp, ), change_ordering=True, verbose=True) pytorch_output = output.data.numpy() keras_output = k_model.predict(input_np.transpose(0, 2, 3, 1)) error = np.max(pytorch_output - keras_output.transpose(0, 3, 1, 2)) print(error) if max_error < error: max_error = error print('Max error: {0}'.format(max_error))
self.classif = nn.Linear(1536, num_classes) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.classif(x) return x if __name__ == '__main__': max_error = 0 for i in range(10): model = InceptionV4() model.eval() input_np = np.random.uniform(0, 1, (4, 3, 299, 299)) input_var = Variable(torch.FloatTensor(input_np)) output = model(input_var) k_model = pytorch_to_keras((3, 299, 299,), output) pytorch_output = output.data.numpy() keras_output = k_model.predict(input_np) error = np.max(pytorch_output - keras_output) print(error) if max_error < error: max_error = error print('Max error: {0}'.format(max_error))
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet18'])) return model if __name__ == '__main__': max_error = 0 for i in range(10): model = resnet18() for m in model.modules(): m.training = False input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) input_var = Variable(torch.FloatTensor(input_np)) output = model(input_var) k_model = pytorch_to_keras(model, input_var, (3, 224, 224,), verbose=True, change_ordering=True) pytorch_output = output.data.numpy() keras_output = k_model.predict(input_np.transpose(0, 2, 3, 1)) print(pytorch_output.shape, keras_output.shape) error = np.max(pytorch_output - keras_output) print(error) if max_error < error: max_error = error print('Max error: {0}'.format(max_error))