コード例 #1
0
def test():
    # 加载模型
    net = KFSGNet()
    net.float().cuda()
    net.eval()
    if (config['checkout'] != ''):
        net.load_state_dict(torch.load(config['checkout']))

    dataset = KFDataset(config)
    dataset.load()
    dataLoader = DataLoader(dataset, 32)
    all_result = []
    lookup_df = pd.read_csv(config['lookup'])
    num = len(dataset)
    for i, (images, ids) in enumerate(dataLoader):
        print('{} / {}'.format(i, num))
        images = Variable(images).float().cuda()
        ids = ids.numpy()
        pred_heatmaps = net.forward(images)
        """
        可视化预测结果
        demo_img = images[0].cpu().data.numpy()[0]
        demo_img = (demo_img * 255.).astype(np.uint8)
        demo_heatmaps = pred_heatmaps[0].cpu().data.numpy()[np.newaxis,...]
        demo_pred_poins = get_peak_points(demo_heatmaps)[0] # (15,2)
        plt.imshow(demo_img,cmap='gray')
        plt.scatter(demo_pred_poins[:,0],demo_pred_poins[:,1])
        plt.show()
        """

        pred_points = get_peak_points(
            pred_heatmaps.cpu().data.numpy())  #(N,15,2)
        pred_points = pred_points.reshape((pred_points.shape[0], -1))  #(N,30)

        # 筛选出要查询的features
        for idx, img_id in enumerate(ids):
            result_img = lookup_df[lookup_df['ImageId'] == img_id]
            # 映射feature names to ids
            fea_names = result_img['FeatureName'].as_matrix()
            fea_ids = [config['featurename2id'][name] for name in fea_names]
            pred_values = pred_points[idx][fea_ids]
            result_img['Location'] = pred_values
            all_result.append(result_img)

        # loss = get_mse(demo_pred_poins[np.newaxis,...],gts)
    result_df = pd.concat(all_result)
    result_df = result_df.drop(columns=['ImageId', 'FeatureName'])
    result_df.to_csv('data/result_909.csv', index=False)
コード例 #2
0
ファイル: test.py プロジェクト: lyffly/KeyPointsDetection
def test():
    # 加载模型
    net = KFSGNet()
    net.float().cuda()
    net.eval()
    if (config['checkout'] != ''):
        net.load_state_dict(torch.load(config['checkout']))

    all_result = []

    camera = cv2.VideoCapture(0)

    if not camera.isOpened():
        print("camera is not ready !!!!!")
        exit(0)

    while True:
        ret, frame = camera.read()
        if ret is None:
            break
        image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

        height = len(image)
        width = len(image[0])

        t0 = time.time()

        image = image[0:height, 0:height]

        image_resized = cv2.resize(image, (256, 256))

        image1 = Variable(toTensor(image_resized)).cuda()
        pred_heatmaps = net(image1)
        #print(pred_heatmaps.shape)

        #cv2.imshow("heatmap",pred_heatmaps.cpu().data.numpy()[0][0])

        pred_points = get_peak_points(
            pred_heatmaps.cpu().data.numpy())  #(N,4,2)
        pred_points = pred_points.reshape((pred_points.shape[0], -1))  #(N,8)

        print(pred_points)

        image_resized = cv2.cvtColor(image_resized, cv2.COLOR_RGB2BGR)

        cv2.imshow("result", image_resized)
        cv2.waitKey(1)
コード例 #3
0
def evaluate():
    # 加载模型
    net = KFSGNet()
    net.float().cuda()
    net.eval()
    #if (config['checkout'] != ''):
    #    net.load_state_dict(torch.load(config['checkout']))

    net.load_state_dict(torch.load("kd_epoch_519_model.ckpt"))
    dataset = KFDataset(config)

    dataLoader = DataLoader(dataset, 1)
    for i, (images, _, gts) in enumerate(dataLoader):
        images = Variable(images).float().cuda()

        pred_heatmaps = net.forward(images)
        print(pred_heatmaps.cpu().data.numpy())
        demo_img = images[0].cpu().data.numpy().reshape((128, 128, 3))
        demo_img = (demo_img * 255.).astype(np.uint8)
        demo_heatmaps = pred_heatmaps[0].cpu().data.numpy()[np.newaxis, ...]
        demo_pred_poins = get_peak_points(demo_heatmaps)[0]  # (4,2)

        plt.figure(1)
        plt.subplot(2, 2, 1)
        plt.imshow(demo_heatmaps[0][0], alpha=.5)
        plt.subplot(2, 2, 2)
        plt.imshow(demo_heatmaps[0][1], alpha=.5)
        plt.subplot(2, 2, 3)
        plt.imshow(demo_heatmaps[0][2], alpha=.5)
        plt.subplot(2, 2, 4)
        plt.imshow(demo_heatmaps[0][3], alpha=.5)

        plt.figure(2)
        plt.imshow(demo_img, cmap='gray')
        plt.scatter(demo_pred_poins[:, 0], demo_pred_poins[:, 1])
        plt.text(demo_pred_poins[0, 0], demo_pred_poins[0, 1], "P1", color='r')
        plt.text(demo_pred_poins[1, 0], demo_pred_poins[1, 1], "P2", color='r')
        plt.text(demo_pred_poins[2, 0], demo_pred_poins[2, 1], "P3", color='r')
        plt.text(demo_pred_poins[3, 0], demo_pred_poins[3, 1], "P4", color='r')

        plt.show()
コード例 #4
0
def evaluate():
    # 加载模型
    net = KFSGNet()
    net.float().cuda()
    net.eval()
    if (config['checkout'] != ''):
        net.load_state_dict(torch.load(config['checkout']))

    dataset = KFDataset(config)
    dataset.load()
    dataLoader = DataLoader(dataset,1)
    for i,(images,_,gts) in enumerate(dataLoader):
        images = Variable(images).float().cuda()

        pred_heatmaps = net.forward(images)
        demo_img = images[0].cpu().data.numpy()[0]
        demo_img = (demo_img * 255.).astype(np.uint8)
        demo_heatmaps = pred_heatmaps[0].cpu().data.numpy()[np.newaxis,...]
        demo_pred_poins = get_peak_points(demo_heatmaps)[0] # (15,2)
        plt.imshow(demo_img,cmap='gray')
        plt.scatter(demo_pred_poins[:,0],demo_pred_poins[:,1])
        plt.show()
コード例 #5
0
                name = param_map[id(u)] if params is not None else ''
                node_name = '%s\n %s' % (name, size_to_str(u.size()))
                dot.node(str(id(var)), node_name, fillcolor='lightblue')
            else:
                dot.node(str(id(var)), str(type(var).__name__))
            seen.add(var)
            if hasattr(var, 'next_functions'):
                for u in var.next_functions:
                    if u[0] is not None:
                        dot.edge(str(id(u[0])), str(id(var)))
                        add_nodes(u[0])
            if hasattr(var, 'saved_tensors'):
                for t in var.saved_tensors:
                    dot.edge(str(id(t)), str(id(var)))
                    add_nodes(t)

    add_nodes(var.grad_fn)
    return dot


if __name__ == '__main__':
    from models import KFSGNet
    from torch.autograd import Variable
    import torch

    net = KFSGNet()
    x = Variable(torch.randn((1, 1, 96, 96)))
    y = net(x)
    g = make_dot(y)
    g.view()
    pass
コード例 #6
0
    def __init__(self, scale=2, num_layers_res=2):
        super(NetSR, self).__init__()

        #----------input conv-------------------
        self.conv_input = nn.Conv2d(in_channels=3,
                                    out_channels=64,
                                    kernel_size=3,
                                    stride=1,
                                    padding=1,
                                    bias=False)
        self.bn_input = nn.BatchNorm2d(64)
        self.relu_input = nn.ReLU(inplace=True)

        #----------residual-------------------
        self.residual = nn.Sequential(
            make_layer(_Residual_Block, num_layers_res, inc=64, outc=64),
            make_layer(_Residual_Block, num_layers_res, inc=64, outc=64),
            make_layer(_Residual_Block, num_layers_res, inc=64, outc=64))
        self.conv_input_out1 = nn.Conv2d(in_channels=64,
                                         out_channels=3,
                                         kernel_size=3,
                                         stride=1,
                                         padding=1,
                                         bias=False)

        self.conv_input_1 = nn.Conv2d(in_channels=3,
                                      out_channels=64,
                                      kernel_size=3,
                                      stride=2,
                                      padding=1,
                                      bias=False)
        self.bn_input_1 = nn.BatchNorm2d(64)
        self.relu_input_1 = nn.ReLU(inplace=True)

        self.residual1 = nn.Sequential(
            make_layer(_Residual_Block, num_layers_res, inc=64, outc=64),
            make_layer(_Residual_Block, num_layers_res, inc=64, outc=64),
            make_layer(_Residual_Block, num_layers_res, inc=64, outc=64),
            make_layer(_Residual_Block, num_layers_res, inc=64, outc=64),
            make_layer(_Residual_Block, num_layers_res, inc=64, outc=64),
            make_layer(_Residual_Block, num_layers_res, inc=64, outc=64),
            make_layer(_Residual_Block, num_layers_res, inc=64, outc=64),
            make_layer(_Residual_Block, num_layers_res, inc=64, outc=64),
            make_layer(_Residual_Block, num_layers_res, inc=64, outc=64),
            make_layer(_Residual_Block, num_layers_res, inc=64, outc=64),
            make_layer(_Residual_Block, num_layers_res, inc=64, outc=64),
            make_layer(_Residual_Block, num_layers_res, inc=64, outc=64))

        self.conv_input_2 = nn.Conv2d(in_channels=64,
                                      out_channels=64,
                                      kernel_size=3,
                                      stride=1,
                                      padding=1,
                                      bias=False)
        self.bn_input_2 = nn.BatchNorm2d(64)
        self.relu_input_2 = nn.ReLU(inplace=True)

        self.conv_input_3 = nn.Conv2d(in_channels=75,
                                      out_channels=64,
                                      kernel_size=3,
                                      stride=1,
                                      padding=1,
                                      bias=False)
        self.bn_input_3 = nn.BatchNorm2d(64)
        self.relu_input_3 = nn.ReLU(inplace=True)

        self.conv_input_4 = nn.ConvTranspose2d(in_channels=64,
                                               out_channels=64,
                                               kernel_size=3,
                                               stride=2,
                                               padding=1,
                                               output_padding=1,
                                               bias=False)
        self.bn_input_4 = nn.BatchNorm2d(64)
        self.relu_input_4 = nn.ReLU(inplace=True)

        self.residual2 = nn.Sequential(
            make_layer(_Residual_Block, num_layers_res, inc=64, outc=64),
            make_layer(_Residual_Block, num_layers_res, inc=64, outc=64),
            make_layer(_Residual_Block, num_layers_res, inc=64, outc=64))

        self.conv_input_out2 = nn.Conv2d(in_channels=64,
                                         out_channels=3,
                                         kernel_size=3,
                                         stride=1,
                                         padding=1,
                                         bias=False)

        self.conv_input_hg = nn.Conv2d(in_channels=3,
                                       out_channels=64,
                                       kernel_size=7,
                                       stride=2,
                                       padding=3,
                                       bias=False)
        self.bn_input_hg = nn.BatchNorm2d(64)
        self.relu_input_hg = nn.ReLU(inplace=True)

        self.residual3 = nn.Sequential(
            make_layer(_Residual_Block, num_layers_res, inc=64, outc=128),
            make_layer(_Residual_Block, num_layers_res, inc=128, outc=128),
            make_layer(_Residual_Block, num_layers_res, inc=128, outc=128))

        self.HG = KFSGNet()
コード例 #7
0
        for c in range(C):
            max_v = heatmaps_targets[n, c, :, :].max().data[0]
            if max_v != 0.0:
                N_idx.append(n)
                C_idx.append(c)
    mask = Variable(torch.zeros(heatmaps_targets.size()))
    mask[N_idx, C_idx, :, :] = 1.
    mask = mask.float().cuda()
    return mask, [N_idx, C_idx]


if __name__ == '__main__':
    pprint.pprint(config)
    torch.manual_seed(0)
    cudnn.benchmark = True
    net = KFSGNet()
    net.float().cuda()
    net.train()
    criterion = nn.MSELoss()
    # optimizer = optim.SGD(net.parameters(), lr=config['lr'], momentum=config['momentum'] , weight_decay=config['weight_decay'])
    optimizer = optim.Adam(net.parameters(), lr=config['lr'])
    trainDataset = KFDataset(config)
    trainDataset.load()
    trainDataLoader = DataLoader(trainDataset, config['batch_size'], True)
    sample_num = len(trainDataset)

    if (config['checkout'] != ''):
        net.load_state_dict(torch.load(config['checkout']))

    for epoch in range(config['start_epoch'],
                       config['epoch_num'] + config['start_epoch']):