def unet_large(img_shape, num_classes, optimizer, loss): i, o = UNet(img_shape, dims=[32, 64, 128, 256, 128], out_ch=num_classes) o = Activation("softmax", name="outputs_hr")(o) return make_model(i, o, optimizer, loss)
batch_size=MBS, shuffle=True) #Obtain testing dataset, initiate test loader. Shuffle the data DS = 'test_unet_RNNData.pkl' with open(PicklePath + DS, 'rb') as f: testset = pickle.load(f) #print(np.array(testset).shape) test_loader = torch.utils.data.DataLoader(testset, batch_size=MBS, shuffle=True) """ ============================== INSTANTIATE MODEL ============================= """ #Use BidirectionalRNN with 8 layers. Send to cuda() cnn = UNet() cnn.cuda() criterion = nn.MSELoss() #Loss function is MSE optimizer = torch.optim.Adam(cnn.parameters(), lr=LEARNING_RATE) train_loss_plot = [] #Keep track of training loss test_loss_plot = [] #Keep track of testing loss model_pretrained_name = "unet_l2_tau1e-3_Ellipsoid_RnnData.pt" torch.autograd.set_detect_anomaly = False #Prevents memory leaks... """ ==================================== TRAINING ================================= """ print("Starting Training...")
def unet(img_shape, num_classes, loss): i, o = UNet(img_shape, dims=[64, 32, 32, 32, 32], out_ch=num_classes) o = Activation("softmax", name="outputs_hr")(o) return make_model(i, o, loss)
def build_model(self, netpath: str = None): if self.outchannel is None: self.outchannel = self.img_.shape[1] if self.args.datadim in ['2d', '2.5d']: if self.args.net == 'unet': self.net = UNet( num_input_channels=self.args.inputdepth, num_output_channels=self.outchannel, filters=self.args.filters, upsample_mode=self.args.upsample, # default is bilinear need_sigmoid=self.args.need_sigmoid, need_bias=True, activation=self.args.activation # ) elif self.args.net == 'attmultiunet': self.net = AttMulResUnet2D( num_input_channels=self.args.inputdepth, num_output_channels=self.outchannel, num_channels_down=self.args.filters, upsample_mode=self.args.upsample, # default is bilinear need_sigmoid=self.args.need_sigmoid, need_bias=True, act_fun=self.args. activation # default is LeakyReLU).type(self.dtype) ) elif self.args.net == 'part': self.net = PartialConvUNet(self.args.inputdepth, self.outchannel) else: self.net = MulResUnet( num_input_channels=self.args.inputdepth, num_output_channels=self.outchannel, num_channels_down=self.args.filters, num_channels_up=self.args.filters, num_channels_skip=self.args.skip, upsample_mode=self.args.upsample, # default is bilinear need_sigmoid=self.args.need_sigmoid, need_bias=True, act_fun=self.args.activation # default is LeakyReLU ) else: if self.args.net == 'part': self.net = PartialConv3DUNet(self.args.inputdepth, self.outchannel) elif self.args.net == 'load': self.net = MulResUnet3D( num_input_channels=self.args.inputdepth, num_output_channels=self.outchannel, num_channels_down=self.args.filters, num_channels_up=self.args.filters, num_channels_skip=self.args.skip, upsample_mode=self.args.upsample, # default is bilinear need_sigmoid=self.args.need_sigmoid, need_bias=True, act_fun=self.args. activation # default is LeakyReLU).type(self.dtype) ) self.net.load_state_dict(torch.load(netpath)) else: self.net = MulResUnet3D( num_input_channels=self.args.inputdepth, num_output_channels=self.outchannel, num_channels_down=self.args.filters, num_channels_up=self.args.filters, num_channels_skip=self.args.skip, upsample_mode=self.args.upsample, # default is bilinear need_sigmoid=self.args.need_sigmoid, need_bias=True, act_fun=self.args. activation # default is LeakyReLU).type(self.dtype) ) self.net = self.net.type(self.dtype) if self.args.net != 'load': u.init_weights(self.net, self.args.inittype, self.args.initgain) self.parameters = u.get_params('net', self.net, self.input_) self.num_params = sum( np.prod(list(p.size())) for p in self.net.parameters())