def make_lstm_ctc(noutput=noutput): model = nn.Sequential( layers.Input("BDHW", range=(0, 1), sizes=[None, 1, None, None]), *combos.conv2d_block(50, 3, mp=(2, 1)), *combos.conv2d_block(100, 3, mp=(2, 1)), *combos.conv2d_block(150, 3, mp=2), *project_and_lstm(100, noutput)) flex.shape_inference(model, (1, 1, 128, 512)) return model
def make_lstm_unet(noutput=noutput): model = nn.Sequential( layers.Input("BDHW", range=(0, 1), sizes=[None, 1, None, None]), *combos.conv2d_block(64, 3, repeat=3), combos.make_unet([64, 128, 256, 512]), *combos.conv2d_block(128, 3, repeat=2), *project_and_lstm(100, noutput)) flex.shape_inference(model, (1, 1, 128, 256)) return model
def make_conv_resnet(noutput=noutput): model = nn.Sequential( layers.Input("BDHW", range=(0, 1), sizes=[None, 1, None, None]), *combos.conv2d_block(64, 3, mp=2), *combos.resnet_blocks(5, 64), *combos.conv2d_block(128, 3, mp=(2, 1)), *combos.resnet_blocks(5, 128), *combos.conv2d_block(192, 3, mp=2), *combos.resnet_blocks(5, 192), *combos.conv2d_block(256, 3, mp=(2, 1)), *combos.resnet_blocks(5, 256), *combos.conv2d_block(512, 3), *project_and_conv1d(512, noutput)) flex.shape_inference(model, (1, 1, 128, 512)) return model
def make_conv_only(noutput=noutput): model = nn.Sequential( layers.Input("BDHW", range=(0, 1), sizes=[None, 1, None, None]), *combos.conv2d_block(100, 3, mp=2, repeat=2), *combos.conv2d_block(200, 3, mp=2, repeat=2), *combos.conv2d_block(300, 3, mp=2, repeat=2), *combos.conv2d_block(400, 3, repeat=2), *project_and_conv1d(800, noutput)) flex.shape_inference(model, (1, 1, 48, 300)) return model
def make_seg_unet(noutput=3): model = nn.Sequential( layers.Input("BDHW", range=(0, 1), sizes=[None, 1, None, None]), *combos.conv2d_block(64, 3, repeat=3), combos.make_unet([128, 256, 512]), *combos.conv2d_block(64, 3, repeat=2), flex.Conv2d(noutput, 5)) flex.shape_inference(model, (1, 1, 256, 256)) return model
def make_seg_conv(noutput=3): model = nn.Sequential( layers.Input("BDHW", range=(0, 1), sizes=[None, 1, None, None]), layers.KeepSize( sub=nn.Sequential(*combos.conv2d_block(50, 3, mp=2, repeat=3), *combos.conv2d_block(100, 3, mp=2, repeat=3), *combos.conv2d_block(200, 3, mp=2, repeat=3))), *combos.conv2d_block(400, 5), flex.Conv2d(noutput, 3)) flex.shape_inference(model, (1, 1, 256, 256)) return model
def make_lstm_normalized(noutput=noutput): model = nn.Sequential( layers.Input("BDHW", range=(0, 1), sizes=[None, 1, 80, None]), *combos.conv2d_block(50, 3, mp=(2, 1)), *combos.conv2d_block(100, 3, mp=(2, 1)), *combos.conv2d_block(150, 3, mp=2), layers.Reshape(0, [1, 2], 3), layers.Reorder("BDL", "LBD"), flex.LSTM(100, bidirectional=True), layers.Reorder("LBD", "BDL"), flex.Conv1d(noutput, 1), layers.Reorder("BDL", ocr_output)) flex.shape_inference(model, (1, 1, 80, 200)) return model
def test_UnetLayer(): for mode in range(100): if f"UnetLayer{mode}" not in combos.__dict__: continue print(f"testting mode {mode}:") mod = combos.__dict__[f"UnetLayer{mode}"](33, dropout=0.5) flex.shape_inference(mod, (17, 11, 64, 64)) print(mod) a = torch.ones((17, 11, 64, 64)) b = mod(a) assert b.shape[:1] == a.shape[:1] assert b.shape[2:] == a.shape[2:]
def test_shape_inference(): mod = nn.Sequential(flex.Conv1d(3, 3, padding=1)) print(mod) assert "Flex" in repr(mod) a = torch.zeros((7, 3, 99)) b = mod(a) assert b.size() == (7, 3, 99) assert "Flex" in repr(mod) print(mod) flex.shape_inference(mod, a.shape) print(mod) assert "Flex" not in repr(mod) a = torch.zeros((4, 3, 9)) b = mod(a) assert b.size() == (4, 3, 9)
def make_lstm_transpose(noutput=noutput): model = nn.Sequential( layers.Input("BDHW", range=(0, 1), sizes=[None, 1, None, None]), *combos.conv2d_block(50, 3, repeat=2), *combos.conv2d_block(100, 3, repeat=2), *combos.conv2d_block(150, 3, repeat=2), *combos.conv2d_block(200, 3, repeat=2), layers.Fun("lambda x: x.sum(2)"), # BDHW -> BDW flex.ConvTranspose1d(800, 1, stride=2), # <-- undo too tight spacing #flex.BatchNorm1d(), nn.ReLU(), layers.Reorder("BDL", "LBD"), flex.LSTM(100, bidirectional=True), layers.Reorder("LBD", "BDL"), flex.Conv1d(noutput, 1), layers.Reorder("BDL", ocr_output)) flex.shape_inference(model, (1, 1, 128, 512)) return model
def make_lstm_keep(noutput=noutput): model = nn.Sequential( layers.Input("BDHW", range=(0, 1), sizes=[None, 1, None, None]), layers.KeepSize( mode="nearest", dims=[3], sub=nn.Sequential( *combos.conv2d_block(50, 3, repeat=2), *combos.conv2d_block(100, 3, repeat=2), *combos.conv2d_block(150, 3, repeat=2), layers.Fun("lambda x: x.sum(2)") # BDHW -> BDW )), flex.Conv1d(500, 5, padding=2), flex.BatchNorm1d(), nn.ReLU(), layers.Reorder("BDL", "LBD"), flex.LSTM(200, bidirectional=True), layers.Reorder("LBD", "BDL"), flex.Conv1d(noutput, 1), layers.Reorder("BDL", ocr_output)) flex.shape_inference(model, (1, 1, 128, 512)) return model