コード例 #1
0
    def load_single_model(self, trained_model_filepath):
        '''
        func: 加载并实例化trained_model_filepath这个路径下的模型
        Parameter
        ---------
        trained_model_filepath: str
            训练好的单帧预测模型的保存路径,
            eg: '/home/All_12_Basic_model/Basic0/checkpoint.chk'
        Returns:
        --------
        network: model
            已经加载并实例化并后的模型
        '''
        from leibniz.unet.base import UNet
        from leibniz.unet.residual import Basic
        from leibniz.nn.activation import CappingRelu

        network = UNet(in_channels=self.in_dim,
                       out_channels=self.single_out_dim,
                       normalizor='batch',
                       spatial=(256, 256),
                       layers=4,
                       ratio=0,
                       vblks=[2, 2, 2, 2],
                       hblks=[0, 0, 0, 0],
                       scales=[-1, -1, -1, -1],
                       factors=[1, 1, 1, 1],
                       block=Basic,
                       relu=CappingRelu(),
                       final_normalized=False)

        checkpoint = torch.load(trained_model_filepath)
        network.load_state_dict(checkpoint['net'])

        return network.to(self.device)
コード例 #2
0
 def __init__(self):
     super().__init__()
     self.relu = nn.ReLU()
     self.enc = resunet(10,
                        62,
                        block=HyperBottleneck,
                        relu=nn.ReLU(inplace=True),
                        layers=6,
                        ratio=-2,
                        vblks=[1, 1, 1, 1, 1, 1],
                        hblks=[3, 3, 3, 3, 3, 3],
                        scales=[-1, -1, -1, -1, -1, -1],
                        factors=[1, 1, 1, 1, 1, 1],
                        spatial=(64, 64))
     self.dec = resunet(10,
                        10,
                        block=HyperBottleneck,
                        relu=CappingRelu(),
                        layers=6,
                        ratio=-3,
                        vblks=[1, 1, 1, 1, 1, 1],
                        hblks=[0, 0, 0, 0, 0, 0],
                        scales=[-1, -1, -1, -1, -1, -1],
                        factors=[1, 1, 1, 1, 1, 1],
                        spatial=(64, 64),
                        final_normalized=True)
コード例 #3
0
ファイル: t850d3bg_rasp.py プロジェクト: caiyunapp/wxbtool
 def __init__(self, setting):
     super().__init__(setting)
     self.name = 't850d3bg-rasp'
     self.resunet = resunet(setting.input_span * len(setting.vars_in) + self.constant_size + 2, 1,
                         spatial=(32, 64+2), layers=5, ratio=-2,
                         vblks=[2, 2, 2, 2, 2], hblks=[1, 1, 1, 1, 1],
                         scales=[-1, -1, -1, -1, -1], factors=[1, 1, 1, 1, 1],
                         block=HyperBottleneck, relu=CappingRelu(), final_normalized=False)
コード例 #4
0
ファイル: train_tent.py プロジェクト: mountain/suan-demo
 def __init__(self):
     super().__init__()
     self.unet = UNet(2,
                      10,
                      normalizor='batch',
                      spatial=(32, 32),
                      layers=5,
                      ratio=0,
                      vblks=[2, 2, 2, 2, 2],
                      hblks=[2, 2, 2, 2, 2],
                      scales=[-1, -1, -1, -1, -1],
                      factors=[1, 1, 1, 1, 1],
                      block=DirectBlocks,
                      relu=CappingRelu(),
                      final_normalized=True)
コード例 #5
0
 def __init__(self):
     super().__init__()
     self.tube = hyptub(10,
                        25,
                        10,
                        encoder=resunet,
                        decoder=resunet,
                        block=HyperBottleneck,
                        relu=CappingRelu(),
                        ratio=-1.5,
                        layers=6,
                        vblks=[1, 1, 1, 1, 1, 1],
                        hblks=[1, 1, 1, 1, 1, 1],
                        scales=[-1, -1, -1, -1, -1, -1],
                        factors=[1, 1, 1, 1, 1, 1],
                        spatial=(64, 64))
コード例 #6
0
 def __init__(self, setting):
     super().__init__(setting)
     tube = hyptub(1664, 832, 1664, encoder=linear, decoder=linear)
     self.resunet = resunet(setting.input_span * (len(setting.vars) + 2) +
                            self.constant_size + 2,
                            1,
                            spatial=(32, 64 + 2),
                            layers=5,
                            ratio=-1,
                            vblks=[2, 2, 2, 2, 2],
                            hblks=[1, 1, 1, 1, 1],
                            scales=[-1, -1, -1, -1, -1],
                            factors=[1, 1, 1, 1, 1],
                            block=HyperBottleneck,
                            relu=CappingRelu(),
                            enhencer=tube,
                            final_normalized=False)
コード例 #7
0
 def __init__(self):
     super().__init__()
     if configs.add_constants:
         self.constants = get_constants()
     self.unet = UNet(configs.input_dim,
                      configs.output_dim,
                      block=HyperBottleneck,
                      relu=CappingRelu(),
                      layers=4,
                      ratio=0,
                      vblks=[2, 2, 2, 2],
                      hblks=[0, 0, 0, 0],
                      scales=[-1, -1, -1, -1],
                      factors=[1, 1, 1, 1],
                      spatial=(32, 64),
                      normalizor='batch',
                      padding=None,
                      final_normalized=False)
コード例 #8
0
 def __init__(self):
     super().__init__()
     if configs.add_constants:
         self.constants = get_constants()
     self.unet = ReUNet(configs.input_dim,
                        configs.output_dim,
                        block=WarpBottleneck,
                        relu=CappingRelu(),
                        layers=4,
                        ratio=0,
                        vblks=[2, 2, 2, 2],
                        hblks=[0, 0, 0, 0],
                        scales=[s, s, s, s],
                        factors=[1, 1, 1, 1],
                        spatial=(6, 48, 48),
                        normalizor='batch',
                        padding=CubeSpherePadding2D(1),
                        final_normalized=False)
コード例 #9
0
else:
    model_configs['out_channels'] = dataset_configs['output_frames']

model_configs['normalizor'] = 'batch'
model_configs['spatial'] = dataset_configs['aim_size']
model_configs['layers'] = 4
model_configs['ratio'] = 0
model_configs['vblks'] = [2] * model_configs['layers']
model_configs['hblks'] = [0] * model_configs['layers']
model_configs['scales'] = [-1] * model_configs['layers']
model_configs['factors'] = [1] * model_configs['layers']
# model_configs['block'] = wp.WarpBottleneck
model_configs['block'] = Basic
# model_configs['block'] = HyperBottleneck

model_configs['relu'] = CappingRelu()
model_configs['final_normalized'] = False

network = UNet(**model_configs)

#%%
##损失函数及其参数配置
loss_configs = {}
loss_configs['a'] = 6
loss_configs['b'] = 0.8
loss_configs['mse_w'] = 1
loss_configs['mae_w'] = 1

loss_fn = BLoss.BEXPMSAELoss(**loss_configs)

loss_configs['loss_name'] = loss_fn.__str__()[:-2]