def main(): import numpy as np np.random.seed(314) x = np.random.rand(12, 6, 4).astype(np.float32) testtools.generate_testcase(Size(), [x])
def main(): import numpy as np np.random.seed(314) x = np.random.rand(5, 7).astype(np.float32) y = np.random.rand(7, 4).astype(np.float32) testtools.generate_testcase(Matmul, [x, y])
def main(): np.random.seed(314) testtools.generate_testcase(Len(), [np.random.rand(3, 5, 4)], subname='basic') testtools.generate_testcase(LenList(), [[np.array(x) for x in [3, 5, 4]]], subname='list')
def main(): import numpy as np np.random.seed(314) eprojs = 3 dunits = 4 att_dim = 5 batch_size = 3 sequence_length = 4 num_vocabs = 10 model_fn = lambda: AttDot(eprojs, dunits, att_dim) labels, ilens = sequence_utils.gen_random_sequence(batch_size, sequence_length, num_vocabs) xs = [] for l in ilens: xs.append(np.random.rand(l, eprojs).astype(np.float32)) # Check if our modification is valid. expected = model_fn().original(xs, None, None) actual = model_fn().forward(xs, None, None) for e, a in zip(expected, actual): assert np.allclose(e.array, a.array) testtools.generate_testcase(model_fn, [xs, None, None]) z = np.random.rand(batch_size, dunits).astype(np.float32) testtools.generate_testcase( lambda: AttDotBackprop(eprojs, dunits, att_dim), [xs, z, None], backprop=True)
def main(): import numpy as np np.random.seed(314) idim = 5 elayers = 2 cdim = 3 hdim = 7 batch_size = 3 sequence_length = 4 num_vocabs = 10 model = BLSTM(idim, elayers, cdim, hdim, 0) labels, ilens = sequence_utils.gen_random_sequence(batch_size, sequence_length, num_vocabs) xs = [] for l in ilens: xs.append(np.random.rand(l, idim).astype(dtype=np.float32)) # Check if our modification is valid. expected = model.original(xs, ilens) actual = model.forward(xs, ilens) for e, a in zip(expected[0], actual[0]): assert np.allclose(e.array, a.array) assert np.allclose(expected[1], actual[1]) testtools.generate_testcase(model, [xs, ilens]) testtools.generate_testcase(BLSTMBackprop(idim, elayers, cdim, hdim, 0), [xs, ilens], backprop=True)
def main(): import numpy as np np.random.seed(314) model = ExpandDims() x = np.random.rand(6, 4).astype(np.float32) - 0.5 testtools.generate_testcase(model, [x])
def main(): import numpy as np np.random.seed(43) batch_size = 3 in_size = 7 out_size = 4 def model_fn(): lstm = StatelessLSTM(in_size, out_size) return lstm c = np.random.rand(batch_size, out_size).astype(np.float32) h = np.random.rand(batch_size, out_size).astype(np.float32) x = np.random.rand(batch_size, in_size).astype(np.float32) model = model_fn() # Check if our modification is valid. expected = model.original(c, h, x) actual = model.forward(c, h, x) for e, a in zip(expected, actual): assert np.allclose(e.array, a.array) testtools.generate_testcase(model_fn(), [c, h, x]) # TODO (hamaji): support func # testtools.generate_testcase(model_fn, [c, h, x]) def model_fn(): lstm = StatelessLSTMBackprop(in_size, out_size) return lstm testtools.generate_testcase(model_fn(), [c, h, x], backprop=True)
def main(): import numpy as np np.random.seed(314) idim = 5 elayers = 2 cdim = 3 hdim = 7 batch_size = 3 sequence_length = 4 num_vocabs = 10 labels, ilens = sequence_utils.gen_random_sequence(batch_size, sequence_length, num_vocabs) xs = [] for l in ilens: xs.append(np.random.rand(l, idim).astype(dtype=np.float32)) testtools.generate_testcase(VGG2L(1), [xs, ilens], subname='VGGL') # TODO (hamaji): support lambda #testtools.generate_testcase(lambda: VGG2L(1), [xs, ilens], subname='VGGL_lambda') testtools.generate_testcase(VGG2LBackprop(1), [xs, ilens], backprop=True, subname='VGGL_backprop')
def main(): model = Tanh() np.random.seed(314) x = np.random.rand(6, 4).astype(np.float32) testtools.generate_testcase(model, [x])
def main(): np.random.seed(314) model = GoogLeNet() # 224 v = np.random.rand(1, 3, 227, 227).astype(np.float32) t = np.random.randint(1000, size=1) testtools.generate_testcase(model, [v, t])
def main(): model = A() x = np.random.rand(6, 4, 2, 7).astype(np.float32) testtools.generate_testcase(model, [x]) testtools.generate_testcase(Self(), [x], subname='self')
def main(): import numpy as np np.random.seed(314) eprojs = 3 dunits = 4 att_dim = 5 batch_size = 3 sequence_length = 4 num_vocabs = 10 aconv_chans = 7 aconv_filts = 6 model_fn = lambda: AttLoc(eprojs, dunits, att_dim, aconv_chans, aconv_filts ) labels, ilens = sequence_utils.gen_random_sequence(batch_size, sequence_length, num_vocabs) xs = [] for l in ilens: xs.append(np.random.rand(l, eprojs).astype(dtype=np.float32)) # Check if our modification is valid. model = model_fn() expected = model.original(xs, None, None) model.reset() actual = model.forward(xs, None, None) for e, a in zip(expected, actual): assert np.allclose(e.array, a.array) testtools.generate_testcase(model_fn, [xs, None, None])
def main(): v = np.random.rand(7, 4, 2).astype(np.float32) w = np.random.rand(7, 3, 2).astype(np.float32) testtools.generate_testcase(ConcatTuple, [v, w]) testtools.generate_testcase(ConcatList, [v, w], subname='list')
def main(): np.random.seed(314) n_fg_class = 80 base = ResNet50(n_class=n_fg_class, arch='he') base.pick = ('res2', 'res3', 'res4', 'res5') base.pool1 = lambda x: F.max_pooling_2d( x, 3, stride=2, pad=1, cover_all=False) base.remove_unused() extractor = FPN(base, len(base.pick), (1 / 4, 1 / 8, 1 / 16, 1 / 32, 1 / 64)) return_values = ['bboxes', 'labels', 'scores'] min_size = 800 max_size = 1333 model = FasterRCNN(extractor=extractor, rpn=RPN(extractor.scales), bbox_head=BboxHead(n_fg_class + 1, extractor.scales), mask_head=MaskHead(n_fg_class + 1, extractor.scales), return_values=return_values, min_size=min_size, max_size=max_size) bsize = 2 v = np.random.rand(bsize, 3, 224, 224).astype(np.float32) # testtools.generate_testcase(base, [v], 'base') # testtools.generate_testcase(extractor, [v], 'extractor') testtools.generate_testcase(model, [v], 'faster_rcnn')
def main(): np.random.seed(314) a1 = np.random.rand(6, 2, 3).astype(np.float32) a2 = np.random.rand(6, 2, 3).astype(np.float32) testtools.generate_testcase(Minimum(), [a1, a2]) testtools.generate_testcase(MinimumNumpy(), [a1, a2], subname='np')
def main(): model = A() v = np.random.rand(3, 5).astype(np.float32) w = np.random.rand(3, 5).astype(np.float32) testtools.generate_testcase(model, [v, w])
def main(): np.random.seed(314) model = Alex(shrink_ratio=23) v = np.random.rand(5, 3, 227, 227).astype(np.float32) t = np.random.randint(1000, size=5) testtools.generate_testcase(model, [v, t])
def gen_test(model_fn, subname=None): model = model_fn() # Check if our modification is valid. expected = model.original(xs, ilens, ys) actual = model.forward(xs, ilens, ys) assert np.allclose(expected.array, actual.array) testtools.generate_testcase(model_fn, [xs, ilens, ys], subname=subname)
def main(): np.random.seed(314) v = np.random.rand(3, 7).astype(np.float32) model = A() result = model(v) testtools.generate_testcase(model, [v])
def main(): out_n = 2 batch_size = 1 model = A() v = np.random.rand(batch_size, out_n).astype(np.float32) w = np.random.randint(out_n, size=batch_size) testtools.generate_testcase(model, [v, w])
def gen(output, recipe, bwd=True, use_gpu=False): import testtools test_args.get_test_args([output, '--allow-unused-params']) (idim, odim, args), (xs, ilens, ys) = recipe testtools.generate_testcase(lambda: E2E(idim, odim, args), [xs, ilens, ys], backprop=bwd, use_gpu=use_gpu)
def main(): model = A() np.random.seed(123) x = np.random.rand(2, 20, 15, 17).astype(np.float32) testtools.generate_testcase(model, [x]) testtools.generate_testcase(SingleParam(), [x], subname='single_param')
def main(): import numpy as np np.random.seed(314) model = A() x = np.random.rand(12, 6, 4).astype(np.float32) p = np.int64(3) testtools.generate_testcase(model, [x, p])
def main(): import numpy as np np.random.seed(314) model = A() x = np.random.rand(5, 7).astype(np.float32) x = [x] testtools.generate_testcase(model, x)
def main(): import numpy as np np.random.seed(12) model = A() ps = [3, 1, 4, 1, 5, 9, 2] testtools.generate_testcase(model, [ps])
def main(): import numpy as np np.random.seed(314) model = A() v = np.random.rand(2, 3, 5, 5).astype(np.float32) testtools.generate_testcase(model, [v])
def main(): import numpy as np np.random.seed(43) eprojs = 3 dunits = 4 att_dim = 5 batch_size = 3 sequence_length = 4 num_vocabs = 10 dlayers = 2 odim = 11 sos = odim - 1 eos = odim - 1 aconv_chans = 7 aconv_filts = 6 labels, ilens = sequence_utils.gen_random_sequence(batch_size, sequence_length, num_vocabs) hs = [] for l in ilens: hs.append(np.random.rand(l, eprojs).astype(dtype=np.float32)) ys, ilens = sequence_utils.gen_random_sequence(batch_size, sequence_length, odim) def gen_test(model_fn, subname=None): model = model_fn() # Check if our modification is valid. expected, _ = model.original(hs, ys) actual = model.forward(hs, ys) assert np.allclose(expected.array, actual.array) testtools.generate_testcase(model_fn, [hs, ys], subname=subname) def model_fn(): # att = AttDot(eprojs, dunits, att_dim) # dec = Decoder(eprojs, odim, dlayers, dunits, sos, eos, att) dec = Decoder(eprojs, odim, dlayers, dunits, sos, eos, att_dim) return dec gen_test(model_fn) testtools.generate_testcase(model_fn, [hs, ys], backprop=True) def model_fn(): dec = Decoder(eprojs, odim, dlayers, dunits, sos, eos, att_dim, aconv_chans, aconv_filts) return dec gen_test(model_fn, subname='attloc') testtools.generate_testcase(model_fn, [hs, ys], subname='attloc', backprop=True)
def main(): np.random.seed(314) x = np.random.rand(6, 4, 1).astype(np.float32) - 0.5 testtools.generate_testcase(BroadcastTo(), [x], subname='basic') x = np.random.rand(6, 3).astype(np.float32) - 0.5 testtools.generate_testcase(BroadcastToBackprop(), [x], backprop=True, subname='with_backprop')
def main(): np.random.seed(314) out_n = 4 batch_size = 100 model = MLP(8, out_n) v = np.random.rand(batch_size, 3).astype(np.float32) w = np.random.randint(out_n, size=batch_size) testtools.generate_testcase(model, [v, w])
def main(): np.random.seed(314) model = FasterRCNNFPNResNet50(n_fg_class=80) bsize = 2 v = np.random.rand(bsize, 3, 224, 224).astype(np.float32) testtools.generate_testcase(model, [v])