Beispiel #1
0
    def __init__(self):
        # 准备模型
        self.net = Net(num_class=2, is_first_bn=True)  # 网络结构加载
        model_path = 'model_A_color_48/checkpoint/global_min_acer_model.pth'  # 权重加载
        if torch.cuda.is_available():
            state_dict = torch.load(model_path, map_location='cuda')
        else:
            state_dict = torch.load(model_path, map_location='cpu')
        new_state_dict = OrderedDict()

        for k, v in state_dict.items():
            name = k[7:]  # remove 'module'
            new_state_dict[name] = v
        self.net.load_state_dict(new_state_dict)
        if torch.cuda.is_available():
            self.net = self.net.cuda()
        self.net.eval()  # bn 层输出锁定
Beispiel #2
0
    def __init__(self):
        from model.FaceBagNet_model_A import Net
        self.net = Net(num_class=2, is_first_bn=True)
        model_pth = "./global_min_acer_model.pth"
        if torch.cuda.is_available():
            state_dict = torch.load(model_pth, map_location='cuda')

        else:
            state_dict = torch.load(model_pth, map_location='cpu')
        new_state_dict = OrderedDict()

        for k, v in state_dict.items():
            name = k[7:]
            new_state_dict[name] = v
        self.net.load_state_dict(new_state_dict)

        if torch.cuda.is_available():
            self.net = self.net.cuda()
Beispiel #3
0
    def __init__(self, model_path, patch_size=48, torch_device="cpu"):

        # TODO: bn, id_class?
        self.model_path = model_path
        self.patch_size = patch_size

        self.neural_net = Net(num_class=2, id_class=300, is_first_bn=True)
        self.neural_net.load_pretrain(self.model_path)

        self.neural_net = torch.nn.DataParallel(self.neural_net)

        self.neural_net.to(torch_device)

        self.torch_device = torch_device

        # TODO: this line
        self.neural_net.eval()

        self.augmentor = color_augumentor
    def __init__(self, num_class=2):
        super(FusionNet,self).__init__()
        self.inplanes = 256
        self.color_moudle  = Net(num_class=num_class,is_first_bn=True)
        self.depth_moudle = Net(num_class=num_class,is_first_bn=True)
        self.ir_moudle = Net(num_class=num_class,is_first_bn=True)

        self.color_SE = SEModule(256,reduction=16)
        self.depth_SE = SEModule(256,reduction=16)
        self.ir_SE = SEModule(256,reduction=16)

        self.bottleneck = nn.Sequential(nn.Conv2d(256*3, 256, kernel_size=1, padding=0),
                                         nn.BatchNorm2d(256),
                                         nn.ReLU(inplace=True))

        # self.res_0 = self._make_layer(BasicBlock, 256, 512, 2, stride=2)
        # self.res_1 = self._make_layer(BasicBlock, 512, 1024, 2, stride=2)
        self.res_0 = self._make_layer(
            SEResNeXtBottleneck,
            planes=256,
            blocks=2,
            stride=2,
            groups=32,
            reduction=16,
            downsample_kernel_size=1,
            downsample_padding=0
        )
        self.res_1 = self._make_layer(
            SEResNeXtBottleneck,
            planes=512,
            blocks=2,
            stride=2,
            groups=32,
            reduction=16,
            downsample_kernel_size=1,
            downsample_padding=0
        )


        self.fc = nn.Sequential(nn.Dropout(0.5),
                                nn.Linear(1024, 256),
                                nn.ReLU(inplace=True),
                                nn.Linear(256, num_class))
Beispiel #5
0
    def __init__(self, num_class=2):
        super(FusionNet, self).__init__()

        self.color_moudle = Net(num_class=num_class, is_first_bn=True)
        self.depth_moudle = Net(num_class=num_class, is_first_bn=True)
        self.ir_moudle = Net(num_class=num_class, is_first_bn=True)

        self.color_SE = SEModule(512, reduction=16)
        self.depth_SE = SEModule(512, reduction=16)
        self.ir_SE = SEModule(512, reduction=16)

        self.bottleneck = nn.Sequential(
            nn.Conv2d(512 * 3, 128 * 3, kernel_size=1, padding=0),
            nn.BatchNorm2d(128 * 3), nn.ReLU(inplace=True))

        self.res_0 = self._make_layer(BasicBlock, 128 * 3, 256, 2, stride=2)
        self.res_1 = self._make_layer(BasicBlock, 256, 512, 2, stride=2)

        self.fc = nn.Sequential(nn.Dropout(0.5), nn.Linear(512, 256),
                                nn.ReLU(inplace=True),
                                nn.Linear(256, num_class))
Beispiel #6
0
class FusionNet(nn.Module):
    def load_pretrain(self, pretrain_file):
        #raise NotImplementedError
        pretrain_state_dict = torch.load(pretrain_file)
        state_dict = self.state_dict()
        keys = list(state_dict.keys())
        for key in keys:
            state_dict[key] = pretrain_state_dict[key]

        self.load_state_dict(state_dict)
        print('')


    def __init__(self, num_class=2):
        super(FusionNet,self).__init__()

        self.color_moudle  = Net(num_class=num_class,is_first_bn=True)
        self.depth_moudle = Net(num_class=num_class,is_first_bn=True)
        self.ir_moudle = Net(num_class=num_class,is_first_bn=True)

        self.color_SE = SEModule(512,reduction=16)
        self.depth_SE = SEModule(512,reduction=16)
        self.ir_SE = SEModule(512,reduction=16)

        self.bottleneck = nn.Sequential(nn.Conv2d(512*3, 128*3, kernel_size=1, padding=0),
                                         nn.BatchNorm2d(128*3),
                                         nn.ReLU(inplace=True))

        self.res_0 = self._make_layer(BasicBlock, 128*3, 256, 2, stride=2)
        self.res_1 = self._make_layer(BasicBlock, 256, 512, 2, stride=2)

        self.fc = nn.Sequential(nn.Dropout(0.5),
                                nn.Linear(512, 256),
                                nn.ReLU(inplace=True),
                                nn.Linear(256, num_class))

    def _make_layer(self, block, inplanes, planes, blocks, stride=1):
        downsample = None
        if stride != 1 :
            downsample = nn.Sequential(
                nn.Conv2d(inplanes, planes * block.expansion,
                          kernel_size=1, stride=stride, bias=False),
                nn.BatchNorm2d(planes * block.expansion),)

        layers = []
        layers.append(block(inplanes, planes, stride, downsample))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(block(self.inplanes, planes))

        return nn.Sequential(*layers)


    def forward(self, x):
        batch_size,C,H,W = x.shape

        color = x[:, 0:3,:,:]
        depth = x[:, 3:6,:,:]
        ir = x[:, 6:9,:,:]

        color_feas = self.color_moudle.forward_res3(color)
        depth_feas = self.depth_moudle.forward_res3(depth)
        ir_feas = self.ir_moudle.forward_res3(ir)

        color_feas = self.color_SE(color_feas)
        depth_feas = self.depth_SE(depth_feas)
        ir_feas = self.ir_SE(ir_feas)

        fea = torch.cat([color_feas, depth_feas, ir_feas], dim=1)
        fea = self.bottleneck(fea)

        x = self.res_0(fea)
        x = self.res_1(x)
        x = F.adaptive_avg_pool2d(x, output_size=1).view(batch_size, -1)
        x = self.fc(x)
        return x,None,None

    def set_mode(self, mode, is_freeze_bn=False ):
        self.mode = mode
        if mode in ['eval', 'valid', 'test']:
            self.eval()
        elif mode in ['backup']:
            self.train()
            if is_freeze_bn==True: ##freeze
                for m in self.modules():
                    if isinstance(m, BatchNorm2d):
                        m.eval()
                        m.weight.requires_grad = False
                        m.bias.requires_grad   = False
Beispiel #7
0
def run_check_net():
    num_class = 2
    net = Net(num_class)
    print(net)
Beispiel #8
0
class RealFace:
    def __init__(self):
        from model.FaceBagNet_model_A import Net
        self.net = Net(num_class=2, is_first_bn=True)
        model_pth = "./global_min_acer_model.pth"
        if torch.cuda.is_available():
            state_dict = torch.load(model_pth, map_location='cuda')

        else:
            state_dict = torch.load(model_pth, map_location='cpu')
        new_state_dict = OrderedDict()

        for k, v in state_dict.items():
            name = k[7:]
            new_state_dict[name] = v
        self.net.load_state_dict(new_state_dict)

        if torch.cuda.is_available():
            self.net = self.net.cuda()

    def classify(self, color):
        return self.detect(color)

    def detect(self, color):
        color = cv2.resize(color, (RESIZE_SIZE, RESIZE_SIZE))

        def color_augmentor(image, target_shape=(64, 64, 3), is_infer=False):
            if is_infer:
                augment_img = iaa.Sequential([
                    iaa.Fliplr(0),
                ])

            image = augment_img.augment_image(image)
            image = TTA_36_cropps(image, target_shape)

            return image

        color = color_augmentor(color, target_shape=(64, 64, 3), is_infer=True)
        n = len(color)

        color = np.concatenate(color, axis=0)

        image = color
        image = np.transpose(image, (0, 3, 1, 2))  # change dims
        image = image.astype(np.float32)
        image = image / 255.0
        input_image = torch.FloatTensor(image)

        if (len(input_image.size()) == 4) and torch.cuda.is_available():
            input_image = input_image.unsqueeze(0).cuda()
        elif (len(input_image.size()) == 4) and not torch.cuda.is_available():
            input_image = input_image.unsqueeze(0)

        b, n, c, w, h = input_image.size()
        input_image = input_image.view(b * n, c, w, h)

        if torch.cuda.is_available():
            input_image = input_image.cuda()

        with torch.no_grad():
            logit, _, _ = self.net(input_image)
            logit = logit.view(b, n, 2)
            logit = torch.mean(logit, dim=1, keepdim=False)
            prob = F.softmax(logit, 1)

        print('probabilistic: ', prob)
        print('predict: ', np.argmax(prob.detach().cpu().numpy()))
        return np.argmax(prob.detach().cpu().numpy())
Beispiel #9
0
class FaceBagNet:
    def __init__(self, model_path, patch_size=48, torch_device="cpu"):

        # TODO: bn, id_class?
        self.model_path = model_path
        self.patch_size = patch_size

        self.neural_net = Net(num_class=2, id_class=300, is_first_bn=True)
        self.neural_net.load_pretrain(self.model_path)

        self.neural_net = torch.nn.DataParallel(self.neural_net)

        self.neural_net.to(torch_device)

        self.torch_device = torch_device

        # TODO: this line
        self.neural_net.eval()

        self.augmentor = color_augumentor

    # returns probability that the image is genuine (not presented)
    def predict(self, full_size_image):

        image = deepcopy(full_size_image)  # TODO: remove copying

        image = self.augmentor(image,
                               target_shape=(self.patch_size, self.patch_size,
                                             3),
                               is_infer=True)

        n = len(image)
        image = np.concatenate(image, axis=0)
        image = np.transpose(image, (0, 3, 1, 2))
        image = image.astype(np.float32)
        image = image.reshape([n, 3, self.patch_size, self.patch_size])
        image = np.array([image])
        image = image / 255.0

        input_tensor = torch.FloatTensor(image)

        shape = input_tensor.shape
        b, n, c, w, h = shape
        # print(b, n, c, w, h)

        input_tensor = input_tensor.view(b * n, c, w, h)

        # inpt = inpt.cuda() if torch.cuda.is_available() else inpt.cpu()
        input_tensor = input_tensor.to(self.torch_device)

        # print(input_tensor)

        with torch.no_grad():
            logit, _, _ = self.neural_net(input_tensor)
            logit = logit.view(b, n, 2)
            logit = torch.mean(logit, dim=1, keepdim=False)
            prob = F.softmax(logit, 1)

        is_real = list(prob.data.cpu().numpy()[:, 1])[0]

        return is_real