Esempio n. 1
0
    def __init__(self, db):
        super(NetworkFactory, self).__init__()

        module_file = "models.{}".format(system_configs.snapshot_name)
        print("module_file: {}".format(module_file))
        nnet_module = importlib.import_module(module_file)

        self.model = DummyModule(nnet_module.model(db))
        self.loss = nnet_module.loss
        self.network = Network(self.model, self.loss)
        self.network = DataParallel(self.network,
                                    chunk_sizes=system_configs.chunk_sizes)

        total_params = 0
        for params in self.model.parameters():
            num_params = 1
            for x in params.size():
                num_params *= x
            total_params += num_params
        print("total parameters: {}".format(total_params))

        if system_configs.opt_algo == "adam":
            self.optimizer = torch.optim.Adam(
                filter(lambda p: p.requires_grad, self.model.parameters()))
        elif system_configs.opt_algo == "sgd":
            self.optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad,
                                                    self.model.parameters()),
                                             lr=system_configs.learning_rate,
                                             momentum=0.9,
                                             weight_decay=0.0001)
        else:
            raise ValueError("unknown optimizer")
Esempio n. 2
0
    def __init__(self, flag=False):
        super(NetworkFactory, self).__init__()

        nnet_module = importlib.import_module('models.LSTR')

        self.model = DummyModule(nnet_module.model(flag=flag))
        self.loss = nnet_module.loss()
        self.network = Network(self.model, self.loss)
        self.network = DataParallel(self.network, chunk_sizes=[16])
        self.flag = flag
Esempio n. 3
0
    def __init__(self, db):
        # db is a MSCOCO instance.
        # and images, tl, br, center heatmaps and tl,br,center regression are all
        # prepared into torch.Tensor
        super(NetworkFactory, self).__init__()

        module_file = "models.{}".format(system_configs.snapshot_name)
        # snapshot_name check CenterNet-104.json
        # snapshot = 5000  but snapshot_name wasn't in CenterNet-104.json
        # snapshot_name in config.py  is None
        # snapshot_name is set to "CenterNet-104" in train.py
        # so here module_file = models.CenterNet-104
        # it is to say we should go to models/CenterNet-104.py to see the classes

        print("module_file: {}".format(module_file))
        nnet_module = importlib.import_module(module_file)
        # this is importing the models/CenterNet-104.py
        # include five functions and 6 classes

        self.model   = DummyModule(nnet_module.model(db))
        # nnet_module.model(db) means CenterNet-104.py class model()
        # model inherit kp
        #
        self.loss    = nnet_module.loss
        # CenterNet-104.py  loss
        # loss = AELoss(pull_weight=1e-1, push_weight=1e-1, focal_loss=_neg_loss)
        # AELoss in kp.py

        self.network = Network(self.model, self.loss)
        self.network = DataParallel(self.network, chunk_sizes=system_configs.chunk_sizes).cuda()

        total_params = 0
        for params in self.model.parameters():
            num_params = 1
            for x in params.size():
                num_params *= x
            total_params += num_params
        print("total parameters: {}".format(total_params))

        if system_configs.opt_algo == "adam":
            self.optimizer = torch.optim.Adam(
                filter(lambda p: p.requires_grad, self.model.parameters())
            )
        elif system_configs.opt_algo == "sgd":
            self.optimizer = torch.optim.SGD(
                filter(lambda p: p.requires_grad, self.model.parameters()),
                lr=system_configs.learning_rate, 
                momentum=0.9, weight_decay=0.0001
            )
        else:
            raise ValueError("unknown optimizer")
Esempio n. 4
0
    def __init__(self, flag=False):
        super(NetworkFactory, self).__init__()

        module_file = "models.{}".format(system_configs.snapshot_name)
        # print("module_file: {}".format(module_file)) # models.CornerNet
        nnet_module = importlib.import_module(module_file)

        self.model = DummyModule(nnet_module.model(flag=flag))
        self.loss = nnet_module.loss()
        self.network = Network(self.model, self.loss)
        self.network = DataParallel(self.network,
                                    chunk_sizes=system_configs.chunk_sizes)
        self.flag = flag

        # Count total parameters
        total_params = 0
        for params in self.model.parameters():
            num_params = 1
            for x in params.size():
                num_params *= x
            total_params += num_params
        print("Total parameters: {}".format(total_params))

        # Count MACs when input is 360 x 640 x 3
        input_test = torch.randn(1, 3, 360, 640).cuda()
        input_mask = torch.randn(1, 3, 360, 640).cuda()
        macs, params, = profile(self.model,
                                inputs=(input_test, input_mask),
                                verbose=False)
        macs, _ = clever_format([macs, params], "%.3f")
        print('MACs: {}'.format(macs))

        if system_configs.opt_algo == "adam":
            self.optimizer = torch.optim.Adam(
                filter(lambda p: p.requires_grad, self.model.parameters()))
        elif system_configs.opt_algo == "sgd":
            self.optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad,
                                                    self.model.parameters()),
                                             lr=system_configs.learning_rate,
                                             momentum=0.9,
                                             weight_decay=0.0001)
        elif system_configs.opt_algo == 'adamW':
            self.optimizer = torch.optim.AdamW(filter(
                lambda p: p.requires_grad, self.model.parameters()),
                                               lr=system_configs.learning_rate,
                                               weight_decay=1e-4)
        else:
            raise ValueError("unknown optimizer")
    def __init__(self, db):
        super(NetworkFactory, self).__init__()

        module_file = "models.{}".format(system_configs.snapshot_name)
        print("module_file: {}".format(
            module_file))  # module_file: models.CenterNet-52
        nnet_module = importlib.import_module(
            module_file
        )  # 导入models.CenterNet-52; NetworkFactory又通过importlib.import_module(module_file)导入了self.model和self.loss
        # module_file使用的system_configs.snapshot_name来自train.py中的configs["system"]["snapshot_name"] = args.cfg_file
        # NetworkFactory中的self.model和self.loss, 这二者来自CenterNet-52.py中的class model(kp), 这个model继承自kp.py中的class kp(nn.Module), 这个loss也是来自kp.py中的class AELoss(nn.Module)
        # 所以model主要框架都在这个class kp(nn.Module)里.
        # 只传入1张图片的list时, 模型执行_test函数. 所以在测试的时候(看test/coco.py中的def kp_decode函数), 输入被封装为[images](只有images这个元素)

        self.model = DummyModule(nnet_module.model(db))
        self.loss = nnet_module.loss
        self.network = Network(self.model, self.loss)
        self.network = DataParallel(
            self.network, chunk_sizes=system_configs.chunk_sizes).cuda()
        #  此处的DataParallel是作者自己写的一个在模块级别实现数据并行的类,该容器通过 将batch size个输入按照chunk_size分配给指定GPU 来并行化数据。

        total_params = 0
        for params in self.model.parameters():  # 计算参数量
            num_params = 1
            for x in params.size():
                num_params *= x
            total_params += num_params
        print("total parameters: {}".format(total_params))  # 打印总的参数量

        if system_configs.opt_algo == "adam":  # 参数更新策略:adam
            self.optimizer = torch.optim.Adam(
                filter(lambda p: p.requires_grad, self.model.parameters())
            )  # # 只有requires_grad=True的参数需要optimize
        elif system_configs.opt_algo == "sgd":
            self.optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad,
                                                    self.model.parameters()),
                                             lr=system_configs.learning_rate,
                                             momentum=0.9,
                                             weight_decay=0.0001)
        else:
            raise ValueError("unknown optimizer")
Esempio n. 6
0
    def __init__(self, db, cuda_flag):
        super(NetworkFactory, self).__init__()
        # print("[NetworkFactory __init__] db", db )
        module_file = "models.{}".format(system_configs.snapshot_name)
        # print("[NetworkFactory __init__] module_file: {}".format(module_file))
        # [NetworkFactory __init__] module_file: models.medical_ExtremeNet
        nnet_module = importlib.import_module(module_file)
        # print("[NetworkFactory __init__] nnet_module", nnet_module)
        self.model   = DummyModule(nnet_module.model(db))
        self.loss    = nnet_module.loss # yezheng: this is last line in models/ExtremeNet.py
        self.network = Network(self.model, self.loss)
        self.cuda_flag = cuda_flag
        if self.cuda_flag:
            self.network = DataParallel(self.network, chunk_sizes=system_configs.chunk_sizes)
        

        total_params = 0
        for params in self.model.parameters():
            num_params = 1
            for x in params.size():
                num_params *= x
            total_params += num_params
        print("total parameters: {}".format(total_params))

        if system_configs.opt_algo == "adam":
            self.optimizer = torch.optim.Adam(
                filter(lambda p: p.requires_grad, self.model.parameters())
            )
        elif system_configs.opt_algo == "sgd":
            self.optimizer = torch.optim.SGD(
                filter(lambda p: p.requires_grad, self.model.parameters()),
                lr=system_configs.learning_rate, 
                momentum=0.9, weight_decay=0.0001
            )
        else:
            raise ValueError("unknown optimizer")