コード例 #1
0
def vismodel():
    import sys
    import torch
    import tensorwatch as tw
    from networks import ResnetConditionHR
    netM = ResnetConditionHR(input_nc=(3, 3, 1, 4),
                             output_nc=4,
                             n_blocks1=7,
                             n_blocks2=3)
    tw.draw_model(netM, [1, 3, 512, 512])
コード例 #2
0
ファイル: test_func.py プロジェクト: ray0809/pytorch
def test_build_ssd():
    # 验证ssd模型构建成功
    ssd = build_vgg_ssd(cfg)
    ssd = ssd.eval()

    inputs = torch.randn(1, 3, 300, 300)

    conf, loc, _ = ssd(inputs)
    print('conf size: {}, loc size: {}'.format(conf.size(), loc.size()))
    g = tw.draw_model(ssd.eval(), [1, 3, 300, 300])
    g.save('ssd_network.png')
コード例 #3
0
ファイル: nnUNetTrainer.py プロジェクト: 975150313/nnunet-1
    def initialize_network_optimizer_and_scheduler(self):
        """
        This is specific to the U-Net and must be adapted for other network architectures
        :return:
        """
        #self.print_to_log_file(self.net_num_pool_op_kernel_sizes)
        #self.print_to_log_file(self.net_conv_kernel_sizes)
        print('Has entered into the initialize_network_optimizer_and_scheduler!')
        net_numpool = len(self.net_num_pool_op_kernel_sizes)

        if self.threeD:
            conv_op = nn.Conv3d
            dropout_op = nn.Dropout3d
            norm_op = nn.InstanceNorm3d
        else:
            conv_op = nn.Conv2d
            dropout_op = nn.Dropout2d
            norm_op = nn.InstanceNorm2d

        norm_op_kwargs = {'eps': 1e-5, 'affine': True}
        dropout_op_kwargs = {'p': 0, 'inplace': True}
        net_nonlin = nn.LeakyReLU
        net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
        torch.cuda.set_device(0)

        self.do_supervision=False
        self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes, net_numpool,
                                    2, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs,
                                    net_nonlin, net_nonlin_kwargs, self.do_supervision, False, lambda x: x, InitWeights_He(1e-2),
                                    self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)

        # print('self.network is:',self.network)
        # Here!!! Modify it to be parallel model
        # self.network=nn.DataParallel(self.network,device_ids=[0,1,2,3])
        g=tw.draw_model(self.network,[8,1,128,128,128])
        g.save('/cache/WenshuaiZhao/ProjectFiles/NNUnet/nnunet/model_structure.png')
        print('draw model has been done!')

        self.optimizer = torch.optim.Adam(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay, amsgrad=True)
        # Here!!! optimizer is also need to be parallel
        # self.optimizer=nn.DataParallel(self.optimizer,device_ids=[0,1,2,3])

        self.lr_scheduler = lr_scheduler.ReduceLROnPlateau(self.optimizer, mode='min', factor=0.2, patience=self.lr_scheduler_patience,
                                                           verbose=True, threshold=self.lr_scheduler_eps, threshold_mode="abs")
        self.network.cuda()
        self.network.inference_apply_nonlin = softmax_helper
コード例 #4
0
def draw_visual_net(net, input, filename="", method="torchviz"):

    if method == "torchviz":
        output = net(input)
        g = make_dot(output)
        #        g = make_dot(output, params=dict(backbone.named_parameters()))
        #        g = make_dot(y, params=dict(list(model.named_parameters()) + [('x', x)]))
        #        g.view() # 会生成一个 Digraph.gv.pdf 的PDF文件
        g.render(filename + '_net', view=False
                 )  # 会自动保存为一个 espnet.pdf,第二个参数为True,则会自动打开该PDF文件,为False则不打开

    elif method == "tensorboard":
        with SummaryWriter(comment='mnt') as w:
            w.add_graph(net, input)

    elif method == "tensorwatch":
        img = tw.draw_model(net, input)
        img.save(r'./alexnet.jpg')
コード例 #5
0
 def __init__(self,
              params,
              net,
              datasets,
              criterion,
              optimizer,
              scheduler,
              sets=['train', 'val', 'test'],
              verbose=100,
              stat=False,
              eval_func=compute_errors,
              disp_func=display_figure):
     self.time = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
     self.params = params
     self.verbose = verbose
     self.eval_func = eval_func
     self.disp_func = disp_func
     # Init dir
     if params.workdir is not None:
         workdir = os.path.expanduser(params.workdir)
     if params.logdir is None:
         logdir = os.path.join(
             workdir, 'log_{}_{}'.format(params.encoder + params.decoder,
                                         params.dataset))
     else:
         logdir = os.path.join(workdir, params.logdir)
     resdir = None
     if self.params.mode == 'test':
         if params.resdir is None:
             resdir = os.path.join(logdir, 'res')
         else:
             resdir = os.path.join(logdir, params.resdir)
     # Call the constructor of the parent class (Trainer)
     super().__init__(net,
                      datasets,
                      optimizer,
                      scheduler,
                      criterion,
                      batch_size=params.batch,
                      batch_size_val=params.batch_val,
                      max_epochs=params.epochs,
                      threads=params.threads,
                      eval_freq=params.eval_freq,
                      use_gpu=params.gpu,
                      resume=params.resume,
                      mode=params.mode,
                      sets=sets,
                      workdir=workdir,
                      logdir=logdir,
                      resdir=resdir)
     self.params.logdir = self.logdir
     self.params.resdir = self.resdir
     # params json
     if self.params.mode == 'train':
         with open(
                 os.path.join(self.logdir,
                              'params_{}.json'.format(self.time)),
                 'w') as f:
             json.dump(vars(self.params), f)
     # uncomment to display the model complexity
     if stat:
         from torchstat import stat
         import tensorwatch as tw
         #net_copy = deepcopy(self.net)
         stat(self.net, (3, *self.datasets[sets[0]].input_size))
         exit()
         tw.draw_model(self.net, (1, 3, *self.datasets[sets[0]].input_size))
         #del net_copy
     self.print('###### Experiment Parameters ######')
     for k, v in vars(self.params).items():
         self.print('{0:<22s} : {1:}'.format(k, v))
コード例 #6
0
def vis_model(model, input_size=[1, 3, 224, 224]):
    import tensorwatch as tw
    drawing = tw.draw_model(model, [1, 3, 224, 224])
    drawing.save('model_distillation.pdf')
コード例 #7
0
ファイル: network_visual.py プロジェクト: FMsunyh/SiamDW
#!/usr/bin/python3
"""
Copyright 2018-2019  Firmin.Sun ([email protected])

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# -----------------------------------------------------
# @Time    : 6/18/2019 11:36 AM
# @Author  : Firmin.Sun ([email protected])
# @Software: ZJ_AI
# -----------------------------------------------------
# -*- coding: utf-8 -*-
import sys
import torch
import tensorwatch as tw
import torchvision.models

alexnet_model = torchvision.models.alexnet()
tw.draw_model(alexnet_model, [1, 3, 224, 224])
tw.model_stats(alexnet_model, [1, 3, 224, 224])
コード例 #8
0
        seg1 = self.classifier1(c2_b)
        edge, edge_fea = self.edge(c2, c3)

        merge = torch.cat([c2_b, edge_fea], dim=1)
        seg2 = self.classifier2(merge)

        return [[seg1, seg2], [edge]]


def FaceParseNet101(num_classes=19, url=None, pretrained=True):
    model = ResNet(Bottleneck, [3, 4, 23, 3], num_classes)
    if pretrained:
        pretrained_dict = torch.load(url)
        model_dict = model.state_dict().copy()
        pretrained_dict = {
            k: v
            for k, v in pretrained_dict.items() if k in model_dict
        }
        model_dict.update(pretrained_dict)
        model.load_state_dict(model_dict)
    return model


if __name__ == '__main__':
    from torchstat import stat
    import tensorwatch as tw

    net = FaceParseNet101(pretrained=False)
    # stat(net, (3, 512, 512))
    tw.draw_model(net, [1, 3, 512, 512])
コード例 #9
0
from torchvision.models import AlexNet
from torchviz import make_dot

x=torch.rand(1,3,256,256)
model=AlexNet()
import hiddenlayer as h
vis_graph = h.build_graph(model, torch.zeros([1 ,3, 256, 256]))   # 获取绘制图像的对象
vis_graph.theme = h.graph.THEMES["blue"].copy()     # 指定主题颜色
vis_graph.save("./demo1.png")   # 保存图像的路径
# y=model(x)

# # 这三种方式都可以
# g = make_dot(y)
# # g=make_dot(y, params=dict(model.named_parameters()))
# #g = make_dot(y, params=dict(list(model.named_parameters()) + [('x', x)]))

# # 这两种方法都可以
# # g.view() # 会生成一个 Digraph.gv.pdf 的PDF文件
# g.render('espnet_model', view=False) # 会自动保存为一个 espnet.pdf,第二个参数为True,则会自动打开该PDF文件,为False则不打开

 
 
import torch
import tensorwatch as tw
from torchvision.models import AlexNet
#from lanenet_model.blocks import ESPNet_Encoder # 这是我自己定义的一个网络
 
# 其实就两句话
model=AlexNet()
tw.draw_model(model, [1, 3, 512, 256])
コード例 #10
0
ファイル: draw_model.py プロジェクト: zhuikonger/tensorwatch
import torch
import torchvision.models
import tensorwatch as tw

vgg16_model = torchvision.models.vgg16()

drawing = tw.draw_model(vgg16_model, [1, 3, 224, 224])
drawing.save('abc.png')

input("Press any key")
コード例 #11
0
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torch.utils.tensorboard import SummaryWriter
from torchvision import datasets, transforms
writer = SummaryWriter()


class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.layer = nn.Sequential(nn.Conv2d(3, 64, 7, padding=1, stride=1),
                                   nn.ReLU(), nn.MaxPool2d(1, 2),
                                   nn.BatchNorm2d(64), nn.AvgPool2d(1, 1),
                                   nn.Dropout(0.5), nn.Linear(110, 30))

    def forward(self, x):
        return self.layer(x)


net = Net()
args = torch.ones([1, 3, 224, 224])
# writer.add_graph(net, args)
# writer.close()

#vgg16_model = torchvision.models.vgg16()

drawing = tw.draw_model(net, [1, 3, 224, 224])
drawing.save('abc2.png')

input("Press any key")
コード例 #12
0
    lists = glob(os.path.join(path, '*.tar'))
    # 对获取的文件根据修改时间进行排序
    lists.sort(key=lambda x: os.path.getmtime(x))
    # 把目录和文件名合成一个路�?
    file_new = lists[-1]
    return file_new

model = pwc_dc_net()
model.cuda()
model.eval()

from torchviz import make_dot
# x = Variable(torch.randn(2,1, 3,256,256).cuda())
# vis_graph = make_dot(model(x), params=dict(model.named_parameters()))
# vis_graph.view()
tw.draw_model(model, [2,1, 3, 224, 224])

def main():
    weight_path = find_NewFile('logname')
    # weight_path = 'logname/finetune_60.tar'
    model = pwc_dc_net(weight_path)
    model.cuda()
    model.eval()
    x1 = Variable(torch.zeros((1, 3, 128, 128))).cuda()
    x2 = Variable(torch.zeros((1, 3, 128, 128))).cuda()
    for i in range(10):
        t_s = time.time()
        model((x1,x2))
        t_d = time.time()
        print((t_d-t_s)*1000)
    # summary(model, input_size=(1, 1, 256, 256))
コード例 #13
0
ファイル: model.py プロジェクト: GitStardust/pytorch-openpose
        self.model4 = blocks['block4']
        self.model5 = blocks['block5']
        self.model6 = blocks['block6']

    def forward(self, x):
        out1_0 = self.model1_0(x)
        out1_1 = self.model1_1(out1_0)
        concat_stage2 = torch.cat([out1_1, out1_0], 1)
        out_stage2 = self.model2(concat_stage2)
        concat_stage3 = torch.cat([out_stage2, out1_0], 1)
        out_stage3 = self.model3(concat_stage3)
        concat_stage4 = torch.cat([out_stage3, out1_0], 1)
        out_stage4 = self.model4(concat_stage4)
        concat_stage5 = torch.cat([out_stage4, out1_0], 1)
        out_stage5 = self.model5(concat_stage5)
        concat_stage6 = torch.cat([out_stage5, out1_0], 1)
        out_stage6 = self.model6(concat_stage6)
        return out_stage6


if __name__ == "__main__":

    import tensorwatch as tw
    import sys
    import torchvision.models
    # model=torchvision.models.alexnet()
    # tw.draw_model(model,[1,3,224,224])
    bodypose_model__ = bodypose_model()
    drawing = tw.draw_model(bodypose_model__, [1, 3, 224, 224])
    drawing.save('bodypose_model.png')
    input("Press any key")