示例#1
0
 def __debug_nuse_nn_fcn__():
     import torchstat
     m = FCN()
     torchstat.stat(m, (3, 64, 64))
     x = torch.zeros(1, 3, 64, 64)
     assert x.size(2) == m(x).size(2)
     assert x.size(3) == m(x).size(3)
示例#2
0
def test():
    net = PreActResNet50(num_classes=1000)
    y = net((torch.randn(1, 3, 224, 224)))
    print(y.size())

    from torchstat import stat
    stat(net, (3, 224, 224))
示例#3
0
def main():
    # parse arguments
    args = parse_args()
    if args is None:
        exit()

    # if args.gpu_mode and not torch.cuda.is_available():
    #     raise Exception("No GPU found, please run without --gpu_mode=False")

    # print 'scale factor = ', scale_factor, \
    #     '\ntest_dir =', args.test_dataset,\

    from network import Net as net
    #from network import Net as net
    model = net(num_channels=1, scale_factor=4, d=32, s=5, m=1)
    #model = net(num_channels=1, scale_factor=4, d=32, s=5, m=5)
    #model.load_state_dict(torch.load(args.pretrained_model, map_location = torch.device('cpu')))
    #In GPU type
    model.load_state_dict(torch.load(args.pretrained_model,
                                     map_location='cpu'))

    # for param_tensor in model.state_dict():
    #     #print(param_tensor)
    #     #print(model.state_dict()[param_tensor].size())
    #     if 'act' in param_tensor:
    #         print(param_tensor)
    #         print(model.state_dict()[param_tensor])

    # print(model)
    x = torch.randn(1, 1, 1024, 768, requires_grad=False).type(torch.float)
    stat(model, (1, 1024, 768))
示例#4
0
def demo():
    net = resnet50(num_classes=100)
    y = net(torch.randn(2,3,224,224))
    print(y.size())

    from torchstat import stat
    stat(net, (3, 224, 224))

# demo()
示例#5
0
def demo():
    from torchstat import stat
    st = time.perf_counter()
    for i in range(1):
        net = adaptive_learn_resnet50(num_classes=365)
        y = net(torch.randn(2, 3, 224, 224))
        print(y.size())
    # print("CPU time: {}".format(time.perf_counter() - st))
    stat(net, (3, 224, 224))
示例#6
0
def demo():
    net = wide_resnet50_2()
    y = net(torch.randn(2, 3, 224, 224))
    print(y.size())
    from torchstat import stat
    stat(net, (3, 224, 224))


# demo()
示例#7
0
def main():
  database = RetrievalModel(args=args)
  logging.info("db param size = %fMB", utils.count_parameters_in_MB(database))
  logging.info(stat(database,(3,224,224)))
  genotype = eval("genotypes.%s" % args.arch)
  model = Network(args.init_channels, args.code_size, args.layers, args.auxiliary, genotype)
  model.drop_path_prob = args.drop_path_prob
  logging.info("query param size = %fMB", utils.count_parameters_in_MB(model))
  logging.info(stat(model,(3,224,224)))
示例#8
0
def demo():
    from torchstat import stat
    for i in range(1):
        net = resnext50(4, 64)
        y = net(torch.ones(2, 3, 224, 224))
        print(y.size())
    stat(net, (3, 224, 224))


# demo()
示例#9
0
def main():
    m = torch.load(
        '../../../work_dirs/fabric/defectnet_inverse_cascade_rcnn_r50_fpn_1x/epoch_12.pth'
    )
    print(get_parameter_number(m['state_dict']))

    model = init_detector(
        '../../../configs/fabric/defectnet_inverse_cascade_rcnn_r50_fpn_1x.py',
        '../../../work_dirs/fabric/defectnet_inverse_cascade_rcnn_r50_fpn_1x/epoch_12.pth'
    )

    # count_params(model, (3, 800, 1333))
    stat(model, fake_data(model, '../../../demo/normal_demo.jpg'))
    stat(model, fake_data(model, '../../../demo/defective_demo.jpg'))
示例#10
0
def main():
    genotype = eval("genotypes.%s" % args.arch)
    model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary,
                    genotype)
    #start_epochs = 0
    model.drop_path_prob = 0
    stat(model, (3, 224, 224))

    genotype = eval("genotypes.%s" % "MY_DARTS")
    model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary,
                    genotype)

    model.drop_path_prob = 0
    stat(model, (3, 224, 224))
示例#11
0
def main():
    args = arg()
    try:
        spec = importlib.util.spec_from_file_location('models', args.file)
        module = importlib.util.module_from_spec(spec)
        spec.loader.exec_module(module)
        model = getattr(module, args.model)()
    except Exception:
        import traceback
        print(f'Tried to import {args.model} from {args.file}. but failed.')
        traceback.print_exc()

        import sys
        sys.exit()

    input_size = tuple(int(x) for x in args.size.split('x'))
    stat(model, input_size, query_granularity=1)
示例#12
0
def main():
    #统计神经网络
    args = parser.parse_args()

    model = SENet.se_resnet50(num_classes=365)
    stat(model, (3, 224, 224))
    model = SENet.se_resnet152(num_classes=365)
    stat(model, (3, 224, 224))
    '''
    traindir = os.path.join(args.data, 'train')
    valdir = os.path.join(args.data, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    train_loader = torch.utils.data.DataLoader(
        datasets.ImageFolder(traindir, transforms.Compose([
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ])),
        batch_size=args.batch_size, shuffle=True,
        num_workers=args.workers, pin_memory=True)

    val_loader = torch.utils.data.DataLoader(
        datasets.ImageFolder(valdir, transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ])),
        batch_size=args.batch_size, shuffle=False,
        num_workers=args.workers, pin_memory=True)
    '''

    print(time.asctime(time.localtime(time.time())))
示例#13
0
def get_model_statics(model_name):
    compress_rate = [
        0.95, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.9, 0.9, 0.9, 0.9, 0.8, 0.8
    ]
    model_state = torch.load('D:\\datasets\\saved_model\\hrprine\\' +
                             model_name,
                             map_location=device)
    net = VGG(num_classes=10,
              init_weights=False,
              cfg=None,
              compress_rate=compress_rate)
    if 'state_dict' in model_state.keys():
        net.load_state_dict(model_state['state_dict'])
    else:
        net.load_state_dict(model_state)

    print(stat(net, (3, 32, 32)))
示例#14
0
def main():
    global args, best_prec1
    global viz, train_lot, test_lot
    args = parser.parse_args()
    print("args", args)

    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    random.seed(args.seed)

    # create model
    if args.arch == "resnet":
        model = ResidualNet5('ImageNet', args.depth, 1000, args.att_type)

    # define loss function (criterion) and optimizer
    if 0:
        from torchstat import stat
        stat(model, (3, 224, 224))
        exit()
    # model = model.cuda()
    # print ("model")
    # print (model)
    criterion = nn.CrossEntropyLoss().cuda()
    if args.loss_type == 'mse':
        criterion2 = cam_loss_mse_topk().cuda()
    elif args.loss_type == 'kd':
        criterion2 = cam_loss_kd_topk().cuda()

    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)
    model = torch.nn.DataParallel(model, device_ids=list(range(args.ngpu)))
    #model = torch.nn.DataParallel(model).cuda()

    model = model.cuda()
    print("model")
    print(model)

    # get the number of model parameters
    print('Number of model parameters: {}'.format(
        sum([p.data.nelement() for p in model.parameters()])))

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            if 'optimizer' in checkpoint:
                optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    # Data loading code
    traindir = os.path.join(args.data, 'train')
    valdir = os.path.join(args.data, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    # import pdb
    # pdb.set_trace()
    val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
        valdir,
        transforms.Compose([
            transforms.Scale(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ])),
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)
    if args.evaluate:
        validate(val_loader, model, criterion, criterion2, 0)
        return

    train_dataset = datasets.ImageFolder(
        traindir,
        transforms.Compose([
            transforms.RandomSizedCrop(224),
            # transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ]))

    train_sampler = None

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=(train_sampler is None),
                                               num_workers=args.workers,
                                               pin_memory=True,
                                               sampler=train_sampler)

    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)

        # train for one epoch
        prec1_train, loss_train = train(train_loader, model, criterion,
                                        criterion2, optimizer, epoch,
                                        args.weight)

        # evaluate on validation set
        prec1, loss_test = validate(val_loader, model, criterion, criterion2,
                                    epoch, args.weight)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
                'optimizer': optimizer.state_dict(),
            }, is_best, args.prefix)

    print("best_prec1: ", best_prec1)
示例#15
0
    # 测试组件作用
    model = ResNet(BasicBlock, [2, 2, 2, 2], num_classes)
    if pretrained:
        # pretrained_dict = torch.load(url)
        pretrained_dict = load_url("https://download.pytorch.org/models/resnet18-5c106cde.pth")
        model_dict = model.state_dict().copy()
        pretrained_dict = {k: v for k,
                                    v in pretrained_dict.items() if k in model_dict}
        model_dict.update(pretrained_dict)
        model.load_state_dict(model_dict)
    return model

    # def EHANet34(num_classes=19, url=None, pretrained=True):
    #     model = ResNet(BasicBlock, [3, 4, 6, 3], num_classes)
    #     if pretrained:
    #         pretrained_dict = load_url("https://download.pytorch.org/models/resnet34-333f7ec4.pth")
    #         # pretrained_dict = torch.load(url)
    #         model_dict = model.state_dict().copy()
    #         pretrained_dict = {k: v for k,
    #                                     v in pretrained_dict.items() if k in model_dict}
    #         model_dict.update(pretrained_dict)
    #         model.load_state_dict(model_dict)
    #     return model


if __name__ == '__main__':
    from torchstat import stat

    net = EHANet18(pretrained=False)
    stat(net, (3, 256, 256))
示例#16
0
 def stat(self):
     stat(self.net, (3, self.config.ss_size, self.config.ss_size))
     pass
示例#17
0
                            bn.running_mean.data.numpy().tofile(f)
                            bn.running_var.data.numpy().tofile(f)
                            conv.weight.data.numpy().tofile(f)
                    else:
                        conv = model[0]
                        if conv.bias.is_cuda:
                            convert2cpu(conv.bias.data).numpy().tofile(f)
                            convert2cpu(conv.weight.data).numpy().tofile(f)
                        else:
                            conv.bias.data.numpy().tofile(f)
                            conv.weight.data.numpy().tofile(f)

                elif self.blocks[i+1]['type'] == 'shortcut':
                    pass
                elif self.blocks[i+1]['type'] == 'route':
                    pass
                elif self.blocks[i+1]['type'] == 'upsample':
                    pass
                elif self.blocks[i+1]['type'] == 'yolo':
                    pass
                else:
                    print("Unknown layer type:{}".format(self.blocks[i+1]['type']))


if __name__=="__main__":
    darknet = Darknet("./data/yolo_v3.cfg")
    device = "cuda" if torch.cuda.is_available() else "cpu"
    from torchstat import stat
    stat(darknet, (3, 416, 416))
    # summary(darknet, (3, 416, 416))
from transformers import BertModel
from torchstat import stat

if __name__ == '__main__':
    bert = BertModel.from_pretrained(
        "/Volumes/PortableSSD/bert模型/bert-base-chinese")
    print(stat(bert, (
        64,
        500,
    )))
示例#19
0
        x1 = self.up1(x1)
        x1 = self.Conv_out1(x1)
        return x1

def count_param(model):
    param_count = 0
    for param in model.parameters():
        param_count += param.view(-1).size()[0]
    return param_count


if __name__ == "__main__":
    # A full forward pass
    from torchstat import stat
    # from torchsummary import summary
    from thop import profile

    device = torch.device('cuda')
    # image_size = 128
    # out = None
    # x = torch.rand((1, 1, 64, 64), device=device)
    # print("x size: {}".format(x.size()))
    model = CleanU_Net(4, 2,70)
    # flops, params = profile(model, inputs=(x, out))
    # print("***********")
    # print(flops, params)
    # print("***********")
    # print(count_param(model))
    # summary(model, input_size=(1, 256, 256), batch_size=1, device="cuda")
    stat(model, (1, 512, 512))
示例#20
0
        return locs, cls_scores


from thop import profile, clever_format
from torchstat import stat

if __name__ == '__main__':

    #M = MobileNet()
    #for index, feat in enumerate(M.layers):
    #    print(index)
    #    print(feat)
    '''
    model = SSD(class_num=7, backbone='VGG', device='cpu')
    print(model)
    image = torch.randn(3, 3, 300, 300)
    model(image)
    '''
    #torch.save(model.state_dict(), 'temp.pth')
    '''
    image = torch.randn(1, 3, 300, 300)
    model = SSD(class_num=6, backbone='MobileNetV1', device='cpu')
    flops, params = profile(model, inputs=(image,))
    flops, params = clever_format([flops, params], "%.3f")
    print(flops, params)
    '''

    model = SSD512(class_num=7, backbone='MobileNetV3_Small', device='cpu')
    stat(model, (3, 512, 512))
示例#21
0
                                          nums=2)
        self.fc = nn.Linear(2048, 10)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, sqrt(2. / n))

    def make_layer(self, block, in_channels, out_channels, stride, nums):
        layers = []
        for i in range(nums - 1):
            layers.append(block(in_channels, in_channels, 1))
        layers.append(block(in_channels, out_channels, stride))
        return nn.Sequential(*layers)

    def forward(self, x):
        x = self.input(x)
        x = self.res_block1(x)
        x = self.res_block2(x)
        x = self.res_block3(x)
        x = self.res_block4(x)
        x = self.avepool(x)
        x = x.view((x.shape[0], -1))
        return self.fc(x)


if __name__ == '__main__':
    net = resnet18()
    from torchstat import stat
    stat(net, (3, 32, 32))
示例#22
0
    Implementation of the OCR module:
    We aggregate the global object representation to update the representation for each pixel.
    """

    def __init__(self, in_channels, key_channels, out_channels, scale=1, dropout=0.1, bn_type=None):
        super(SpatialOCR_Module, self).__init__()

        self.object_context_block = ObjectAttentionBlock(in_channels, key_channels, scale, bn_type)
        _in_channels = 2 * in_channels

        self.conv_bn_dropout = nn.Sequential(
            nn.Conv2d(_in_channels, out_channels, kernel_size=1, padding=0, bias=False),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            nn.Dropout2d(dropout)
        )

    def forward(self, feats, proxy_feats):
        context = self.object_context_block(feats, proxy_feats)

        output = self.conv_bn_dropout(torch.cat([context, feats], 1))

        return output


if __name__ == '__main__':
    net = SpatialOCR_Module(2048, 256, 256)
    from torchstat import stat

    stat(net, [(2048, 32, 32), (2048, 32, 32)])
示例#23
0
        gt_landmark = gt[idx, 5:]
        loss_landmark = self.mse(pred_landmark, gt_landmark)
        loss_landmark = torch.mean(loss_landmark, dim=1)
        # ohem
        n_keep = int(self.ohem * loss_landmark.shape[0])
        loss_landmark = torch.mean(torch.topk(loss_landmark, n_keep)[0])

        ## total loss
        loss_total = self.cls * loss_cls + self.bbox * loss_bbox + self.landmark * loss_landmark

        return loss_total, loss_cls, loss_bbox, loss_landmark


loss_coef = {
    'PNet': [1.0, 0.5, 0.5],
    'RNet': [1.0, 0.5, 0.5],
    'ONet': [1.0, 0.5, 1.0],
}

if __name__ == "__main__":

    from torchstat import stat

    stat(PNet(False), (3, 16, 16))
    stat(RNet(False), (3, 24, 24))
    stat(ONet(False), (3, 48, 48))

    PNet()
    RNet()
    ONet()
示例#24
0
        self.branch3 = nn.Conv2d(192, out_channels, 1, 1)

    def forward(self, x1, x2, x3):
        y1 = self.branch1(x1)
        y2 = self.branch2(x2)
        y3 = self.branch3(x3)

        return y1, y2, y3


class BaseModel(nn.Module):
    def __init__(self, num_classes, is_mobile=True):
        super(BaseModel, self).__init__()

        # box + obj + 类别数
        out_channels = (4 + 1 + num_classes) * 3

        self.body = Body(is_mobile=is_mobile)
        self.head = Head(out_channels)

    def forward(self, x):
        y1, y2, y3 = self.body(x)
        y1, y2, y3 = self.head(y1, y2, y3)
        return y1, y2, y3


if __name__ == '__main__':
    from torchstat import stat
    model = BaseModel(2, True)
    stat(model, (3, 255, 255))
示例#25
0
        self.stage_block_cpm = stage_block_CPM(channel_scale, channel_scale)
        self.stage_block_paf = stage_block_PAF(channel_scale, channel_scale)
        self.stage_last_cpm = stage_last_CPM(channel_scale, channel_scale)
        self.stage_last_paf = stage_last_PAF(channel_scale, channel_scale)

    def forward(self, input):
        y = self.stage_first(input)
        cpm_1 = self.stage_block_cpm(y)
        paf_1 = self.stage_block_paf(y)
        y = torch.cat((y, cpm_1, paf_1), 1)
        cpm = self.stage_last_cpm(y)
        paf = self.stage_last_paf(y)
        # y = torch.cat((self.stage_last_cpm(y), self.stage_last_paf(y)), 1)
        return cpm_1, paf_1, cpm, paf


if __name__ == '__main__':
    import torchstat
    # compute model size and FLOPs
    model = RTNet_Half()
    model = RTNet()
    torchstat.stat(model, (3, 368, 368))

    # plot model graph
    # x = torch.randn(1, 3, 368, 368).requires_grad_(True)
    # y = model(x)
    # cc = make_dot(y, params=dict(list(model.named_parameters()) + [('x', x)]))

    # compute model params size
    # summary(model, (3, 368, 368), device='cpu')
示例#26
0
        self.aspp_18 = _DenseASPPConv(in_channels + inter_channels2 * 3,
                                      inter_channels1, inter_channels2, 18,
                                      0.1, norm_layer, norm_kwargs)
        self.aspp_24 = _DenseASPPConv(in_channels + inter_channels2 * 4,
                                      inter_channels1, inter_channels2, 24,
                                      0.1, norm_layer, norm_kwargs)

    def forward(self, x):
        aspp3 = self.aspp_3(x)
        x = torch.cat([aspp3, x], dim=1)

        aspp6 = self.aspp_6(x)
        x = torch.cat([aspp6, x], dim=1)

        aspp12 = self.aspp_12(x)
        x = torch.cat([aspp12, x], dim=1)

        aspp18 = self.aspp_18(x)
        x = torch.cat([aspp18, x], dim=1)

        aspp24 = self.aspp_24(x)
        x = torch.cat([aspp24, x], dim=1)

        return x


model = PAmodule(19)
# model = ASPP()
# model = _DenseASPPHead(2048, 19)
stat(model, (2048, 54, 54))
示例#27
0
            Upsampler(scale, n_feats, act=None, group=group),
            nn.Conv2d(
                n_feats, n_colors, kernel_size=3, padding=kernel_size // 2)
        ])

    def forward(self, x):
        x = self.sub_mean(x)
        x = self.head(x)

        b1 = self.b1(x)
        c1 = torch.cat([x, b1], dim=1)
        o1 = self.act(self.c1(c1))

        b2 = self.b2(o1)
        c2 = torch.cat([c1, b2], dim=1)
        o2 = self.act(self.c2(c2))

        b3 = self.b3(o2)
        c3 = torch.cat([c2, b3], dim=1)
        o3 = self.act(self.c3(c3))

        x = self.tail(o3)
        x = self.add_mean(x)
        return x


from torchstat import stat

net = E_CARN()
stat(net, (3, 10, 10))
示例#28
0
文件: mdmnet.py 项目: LouisNUST/CMNet
        d4_ = F.interpolate(d4,
                            size=(img_h, img_w),
                            mode='bilinear',
                            align_corners=False)
        d3_ = F.interpolate(d3,
                            size=(img_h, img_w),
                            mode='bilinear',
                            align_corners=False)
        d2_ = F.interpolate(d2,
                            size=(img_h, img_w),
                            mode='bilinear',
                            align_corners=False)

        side_4 = self.side_4(d4_)
        side_3 = self.side_3(d3_)
        side_2 = self.side_2(d2_)

        d = self.fu1 * d1 + self.fu2 * side_2 + self.fu3 * side_3 + self.fu4 * side_4

        # train d branch and test only d1 branch
        return [torch.sigmoid(d), torch.sigmoid(d1), torch.sigmoid(side_2), torch.sigmoid(side_3), \
            torch.sigmoid(side_4)]


if __name__ == '__main__':
    from torchstat import stat
    input_size = (784, 1168)

    net = MDMNet(input_size=input_size)
    stat(net, (3, ) + input_size)
示例#29
0
def demo():
    from torchstat import stat
    net = resnet50(num_classes=365)
    y = net(torch.randn(1, 3, 224, 224))
    print(y.size())
    stat(net, (3, 224, 224))
示例#30
0
 def torch_stat(self):
     stat(self.encoder.to('cpu'), input_size=(3, self.feed_width, self.feed_height))