def project_and_conv1d(d, noutput, r=5): return [ layers.Fun("lambda x: x.max(2)[0]"), flex.Conv1d(d, r), flex.BatchNorm1d(), nn.ReLU(), flex.Conv1d(noutput, 1), layers.Reorder("BDL", ocr_output) ]
def test_Conv1d(): mod = flex.Conv1d(17, 3, padding=1) a = torch.zeros((7, 3, 99)) b = mod(a) assert b.size() == (7, 17, 99) a = torch.zeros((4, 3, 9)) b = mod(a) assert b.size() == (4, 17, 9)
def project_and_lstm(d, noutput, num_layers=1): return [ layers.Fun("lambda x: x.sum(2)"), # BDHW -> BDW layers.Reorder("BDL", "LBD"), flex.LSTM(d, bidirectional=True, num_layers=num_layers), layers.Reorder("LBD", "BDL"), flex.Conv1d(noutput, 1), layers.Reorder("BDL", ocr_output) ]
def make_lstm_normalized(noutput=noutput): model = nn.Sequential( layers.Input("BDHW", range=(0, 1), sizes=[None, 1, 80, None]), *combos.conv2d_block(50, 3, mp=(2, 1)), *combos.conv2d_block(100, 3, mp=(2, 1)), *combos.conv2d_block(150, 3, mp=2), layers.Reshape(0, [1, 2], 3), layers.Reorder("BDL", "LBD"), flex.LSTM(100, bidirectional=True), layers.Reorder("LBD", "BDL"), flex.Conv1d(noutput, 1), layers.Reorder("BDL", ocr_output)) flex.shape_inference(model, (1, 1, 80, 200)) return model
def make_lstm_keep(noutput=noutput): model = nn.Sequential( layers.Input("BDHW", range=(0, 1), sizes=[None, 1, None, None]), layers.KeepSize( mode="nearest", dims=[3], sub=nn.Sequential( *combos.conv2d_block(50, 3, repeat=2), *combos.conv2d_block(100, 3, repeat=2), *combos.conv2d_block(150, 3, repeat=2), layers.Fun("lambda x: x.sum(2)") # BDHW -> BDW )), flex.Conv1d(500, 5, padding=2), flex.BatchNorm1d(), nn.ReLU(), layers.Reorder("BDL", "LBD"), flex.LSTM(200, bidirectional=True), layers.Reorder("LBD", "BDL"), flex.Conv1d(noutput, 1), layers.Reorder("BDL", ocr_output)) flex.shape_inference(model, (1, 1, 128, 512)) return model
def test_shape_inference(): mod = nn.Sequential(flex.Conv1d(3, 3, padding=1)) print(mod) assert "Flex" in repr(mod) a = torch.zeros((7, 3, 99)) b = mod(a) assert b.size() == (7, 3, 99) assert "Flex" in repr(mod) print(mod) flex.shape_inference(mod, a.shape) print(mod) assert "Flex" not in repr(mod) a = torch.zeros((4, 3, 9)) b = mod(a) assert b.size() == (4, 3, 9)
def make_lstm_transpose(noutput=noutput): model = nn.Sequential( layers.Input("BDHW", range=(0, 1), sizes=[None, 1, None, None]), *combos.conv2d_block(50, 3, repeat=2), *combos.conv2d_block(100, 3, repeat=2), *combos.conv2d_block(150, 3, repeat=2), *combos.conv2d_block(200, 3, repeat=2), layers.Fun("lambda x: x.sum(2)"), # BDHW -> BDW flex.ConvTranspose1d(800, 1, stride=2), # <-- undo too tight spacing #flex.BatchNorm1d(), nn.ReLU(), layers.Reorder("BDL", "LBD"), flex.LSTM(100, bidirectional=True), layers.Reorder("LBD", "BDL"), flex.Conv1d(noutput, 1), layers.Reorder("BDL", ocr_output)) flex.shape_inference(model, (1, 1, 128, 512)) return model