BATCH_SIZE = 32 SCALE = 1 # 1 IS QUARTER RES, 2 IS HALF RES, 4 IS FULL RES input_shape = [BATCH_SIZE, 1, 40 * SCALE, 96 * SCALE, 96 * SCALE] GPUS = 4 phi = network_wrappers.FunctionFromVectorField( networks.tallUNet(unet=networks.UNet2ChunkyMiddle, dimension=3) ) psi = network_wrappers.FunctionFromVectorField(networks.tallUNet2(dimension=3)) pretrained_lowres_net = inverseConsistentNet.InverseConsistentNet( network_wrappers.DoubleNet(phi, psi), lambda x, y: torch.mean((x - y) ** 2), 100, ) network_wrappers.assignIdentityMap(pretrained_lowres_net, input_shape) network_wrappers.adjust_batch_size(pretrained_lowres_net, 12) trained_weights = torch.load( "results/dd_l400_continue_rescalegrad2/knee_aligner_resi_net1800" ) # trained_weights = torch.load("../results/dd_knee_l400_continue_smallbatch2/knee_aligner_resi_net9300") # rained_weights = torch.load("../results/double_deformable_knee3/knee_aligner_resi_net22200") pretrained_lowres_net.load_state_dict(trained_weights)
d1_t, d2_t = data.get_dataset_triangles("test", data_size=50, hollow=False, batch_size=batch_size) lmbda = 2048 random.seed(1) torch.manual_seed(1) torch.cuda.manual_seed(1) np.random.seed(1) print("=" * 50) net = inverseConsistentNet.InverseConsistentNet( network_wrappers.DoubleNet( network_wrappers.RandomShift(0.25), network_wrappers.FunctionFromVectorField( networks.tallUNet2(dimension=2)), ), lambda x, y: torch.mean((x - y)**2), lmbda, ) input_shape = next(iter(d1))[0].size() network_wrappers.assignIdentityMap(net, input_shape) net.cuda() optimizer = torch.optim.Adam(net.parameters(), lr=0.001) net.train() xs = [] for _ in range(40): y = np.array(train.train2d(net, optimizer, d1, d2, epochs=50)) xs.append(y)
import data import describe BATCH_SIZE = 32 SCALE = 1 # 1 IS QUARTER RES, 2 IS HALF RES, 4 IS FULL RES input_shape = [BATCH_SIZE, 1, 40 * SCALE, 96 * SCALE, 96 * SCALE] GPUS = 4 phi = network_wrappers.FunctionFromVectorField( networks.tallUNet(unet=networks.UNet2ChunkyMiddle, dimension=3)) psi = network_wrappers.FunctionFromVectorField(networks.tallUNet2(dimension=3)) pretrained_lowres_net = inverseConsistentNet.InverseConsistentNet( network_wrappers.DoubleNet(phi, psi), lambda x, y: torch.mean((x - y)**2), 100, ) network_wrappers.assignIdentityMap(pretrained_lowres_net, input_shape) network_wrappers.adjust_batch_size(pretrained_lowres_net, 12) trained_weights = torch.load( "results/dd_l400_continue_rescalegrad2/knee_aligner_resi_net25500") # trained_weights = torch.load("../results/dd_knee_l400_continue_smallbatch2/knee_aligner_resi_net9300") # rained_weights = torch.load("../results/double_deformable_knee3/knee_aligner_resi_net22200") pretrained_lowres_net.load_state_dict(trained_weights) hires_net = inverseConsistentNet.InverseConsistentNet( network_wrappers.DownsampleNet(pretrained_lowres_net.regis_net,
import data import describe BATCH_SIZE = 12 SCALE = 1 # 1 IS QUARTER RES, 2 IS HALF RES, 4 IS FULL RES input_shape = [BATCH_SIZE, 1, 40 * SCALE, 96 * SCALE, 96 * SCALE] GPUS = 4 phi = network_wrappers.FunctionFromVectorField( networks.tallUNet(unet=networks.UNet2ChunkyMiddle, dimension=3)) psi = network_wrappers.FunctionFromVectorField(networks.tallUNet2(dimension=3)) net = inverseConsistentNet.InverseConsistentNet( network_wrappers.DoubleNet(phi, psi), lambda x, y: torch.mean((x - y)**2), 100, ) network_wrappers.assignIdentityMap(net, input_shape) # load weights net_weights = torch.load( "results/double_deformable_knee3/knee_aligner_resi_net26400") opt_weights = torch.load( "results/double_deformable_knee3/knee_aligner_resi_opt26400") network_wrappers.adjust_batch_size(net, 32) net.load_state_dict(net_weights) network_wrappers.adjust_batch_size(net, BATCH_SIZE)
import torch import random import inverseConsistentNet import networks import data import describe BATCH_SIZE = 8 SCALE = 2 # 1 IS QUARTER RES, 2 IS HALF RES, 4 IS FULL RES working_shape = [BATCH_SIZE, 1, 40 * SCALE, 96 * SCALE, 96 * SCALE] GPUS = 4 net = inverseConsistentNet.InverseConsistentNet( networks.tallUNet2(dimension=3), lmbda=512, input_shape=working_shape, random_sampling=False, ) knees, medknees = data.get_knees_dataset() knees = [F.avg_pool3d(knee, 2) for knee in knees] if GPUS == 1: net_par = net.cuda() else: net_par = torch.nn.DataParallel(net).cuda() optimizer = torch.optim.Adam(net_par.parameters(), lr=0.00005) net_par.train() def make_batch():
batch_size = 128 data_size = 50 d1, d2 = data.get_dataset_triangles("train", data_size=data_size, hollow=True, batch_size=batch_size) d1_t, d2_t = data.get_dataset_triangles("test", data_size=data_size, hollow=True, batch_size=batch_size) image_A, image_B = (x[0].cuda() for x in next(zip(d1, d2))) net = inverseConsistentNet.InverseConsistentNet( network_wrappers.FunctionFromMatrix(networks.ConvolutionalMatrixNet()), lambda x, y: torch.mean((x - y)**2), 100, ) network_wrappers.assignIdentityMap(net, image_A.shape) net.cuda() import train optim = torch.optim.Adam(net.parameters(), lr=0.00001) net.train().cuda() xs = [] for _ in range(240): y = np.array(train.train2d(net, optim, d1, d2, epochs=50)) xs.append(y) x = np.concatenate(xs)
# "test", data_size=50, hollow=True, samples=256 # ) d1_triangles_test, d2_triangles_test = d1_triangles, d2_triangles network = networks.tallUNet2 d1, d2, d1_t, d2_t = (d1_triangles, d2_triangles, d1_triangles_test, d2_triangles_test) lmbda = 2048 random.seed(1) torch.manual_seed(1) torch.cuda.manual_seed(1) np.random.seed(1) print("=" * 50) print(network, lmbda) net = inverseConsistentNet.InverseConsistentNet(network(), lmbda, next(iter(d1))[0].size()) net.load_state_dict(torch.load("results/SmallDatasetTriangles6/network.trch")) net.cuda() optimizer = torch.optim.Adam(net.parameters(), lr=0.01) net.train() optimizer.load_state_dict( torch.load("results/SmallDatasetTriangles6/opt.trch")) for _ in range(40): x = np.array(train.train2d(net, optimizer, d1, d2, epochs=50)) plt.title("Loss curve for " + type(net.regis_net).__name__ + " lambda=" + str(lmbda)) plt.plot(x[:, :3]) plt.title("Log # pixels with negative Jacobian per epoch") plt.plot(x[:, 3])
import data import describe BATCH_SIZE = 4 SCALE = 2 # 1 IS QUARTER RES, 2 IS HALF RES, 4 IS FULL RES input_shape = [BATCH_SIZE, 1, 40 * SCALE, 96 * SCALE, 96 * SCALE] GPUS = 4 phi = network_wrappers.FunctionFromVectorField( networks.tallUNet(unet=networks.UNet2ChunkyMiddle, dimension=3)) psi = network_wrappers.FunctionFromVectorField(networks.tallUNet2(dimension=3)) net = inverseConsistentNet.InverseConsistentNet( network_wrappers.DoubleNet( network_wrappers.DownsampleNet(network_wrappers.DoubleNet(phi, psi), dimension=3), network_wrappers.FunctionFromVectorField( networks.tallUNet2(dimension=3)), ), inverseConsistentNet.ncc, 165000, ) network_wrappers.assignIdentityMap(net, input_shape) weights = torch.load("results/hires_ncc_70000_6/knee_aligner_resi_net4200") net.load_state_dict(weights) if GPUS == 1: net_par = net.cuda() else: net_par = torch.nn.DataParallel(net).cuda() optimizer = torch.optim.Adam(net_par.parameters(), lr=0.00001)
torch.cuda.manual_seed(1) np.random.seed(1) d1_triangles, d2_triangles = data.get_dataset_triangles( "train", data_size=50, hollow=False ) network = networks.FCNet d1, d2 = (d1_triangles, d2_triangles) lmbda = 2048 image_A, image_B = (x[0].cuda() for x in next(zip(d1, d2))) image_A = image_A[:1].cuda() image_B = image_B[:1].cuda() print("=" * 50) print(network, lmbda) net = inverseConsistentNet.InverseConsistentNet(network(), lmbda, image_A.size()) net.cuda() optimizer = torch.optim.Adam(net.parameters(), lr=0.00001) net.train() def train2d(net, optimizer, image_A, image_B, epochs=400): loss_history = [] print("[", end="") for epoch in range(epochs): print("-", end="") if (epoch + 1) % 50 == 0: print("]", end="\n[") for _ in range(1): optimizer.zero_grad()
d1, d2 = data.get_dataset_triangles("train", data_size=data_size, hollow=True, batch_size=batch_size) d1_t, d2_t = data.get_dataset_triangles("test", data_size=data_size, hollow=True, batch_size=batch_size) image_A, image_B = (x[0].cuda() for x in next(zip(d1, d2))) net = inverseConsistentNet.InverseConsistentNet( network_wrappers.DoubleNet( network_wrappers.FunctionFromMatrix( networks.ConvolutionalMatrixNet(dimension=2), ), network_wrappers.FunctionFromVectorField( networks.tallUNet2(dimension=2)), ), lambda x, y: torch.mean((x - y)**2), 700, ) input_shape = next(iter(d1))[0].size() network_wrappers.assignIdentityMap(net, input_shape) # pretrained_weights = torch.load("results/affine_triangle_pretrain/epoch000case0.png") # pretrained_weights = OrderedDict( # [ # (a.split("regis_net.")[1], b) # for a, b in pretrained_weights.items() # if "regis_net" in a # ] # )
hollow=False, batch_size=batch_size) d1_t, d2_t = data.get_dataset_triangles("test", data_size=50, hollow=False, batch_size=batch_size) lmbda = 2048 random.seed(1) torch.manual_seed(1) torch.cuda.manual_seed(1) np.random.seed(1) print("=" * 50) net = inverseConsistentNet.InverseConsistentNet( network_wrappers.FunctionFromVectorField(networks.tallUNet2(dimension=2)), inverseConsistentNet.ncc, lmbda, ) input_shape = next(iter(d1))[0].size() network_wrappers.assignIdentityMap(net, input_shape) net.cuda() optimizer = torch.optim.Adam(net.parameters(), lr=0.001) net.train() xs = [] for _ in range(40): y = np.array(train.train2d(net, optimizer, d1, d2, epochs=50)) xs.append(y) x = np.concatenate(xs) plt.title("Loss curve for " + type(net.regis_net).__name__ + " lambda=" +