Exemplo n.º 1
0
def inference(net,
              img_path='',
              output_path='./',
              output_name='f',
              use_gpu=True):
    '''

    :param net:
    :param img_path:
    :param output_path:
    :return:
    '''
    # adj
    adj2_ = torch.from_numpy(graph.cihp2pascal_nlp_adj).float()
    adj2_test = adj2_.unsqueeze(0).unsqueeze(0).expand(1, 1, 7,
                                                       20).cuda().transpose(
                                                           2, 3)

    adj1_ = Variable(
        torch.from_numpy(graph.preprocess_adj(graph.pascal_graph)).float())
    adj3_test = adj1_.unsqueeze(0).unsqueeze(0).expand(1, 1, 7, 7).cuda()

    cihp_adj = graph.preprocess_adj(graph.cihp_graph)
    adj3_ = Variable(torch.from_numpy(cihp_adj).float())
    adj1_test = adj3_.unsqueeze(0).unsqueeze(0).expand(1, 1, 20, 20).cuda()

    # multi-scale
    scale_list = [1, 0.5, 0.75, 1.25, 1.5, 1.75]
    img = read_img(img_path)
    testloader_list = []
    testloader_flip_list = []
    for pv in scale_list:
        composed_transforms_ts = transforms.Compose([
            tr.Scale_only_img(pv),
            tr.Normalize_xception_tf_only_img(),
            tr.ToTensor_only_img()
        ])

        composed_transforms_ts_flip = transforms.Compose([
            tr.Scale_only_img(pv),
            tr.HorizontalFlip_only_img(),
            tr.Normalize_xception_tf_only_img(),
            tr.ToTensor_only_img()
        ])

        testloader_list.append(img_transform(img, composed_transforms_ts))
        # print(img_transform(img, composed_transforms_ts))
        testloader_flip_list.append(
            img_transform(img, composed_transforms_ts_flip))
    # print(testloader_list)
    start_time = timeit.default_timer()
    # One testing epoch
    net.eval()
    # 1 0.5 0.75 1.25 1.5 1.75 ; flip:

    for iii, sample_batched in enumerate(
            zip(testloader_list, testloader_flip_list)):
        inputs, labels = sample_batched[0]['image'], sample_batched[0]['label']
        inputs_f, _ = sample_batched[1]['image'], sample_batched[1]['label']
        inputs = inputs.unsqueeze(0)
        inputs_f = inputs_f.unsqueeze(0)
        inputs = torch.cat((inputs, inputs_f), dim=0)
        if iii == 0:
            _, _, h, w = inputs.size()
        # assert inputs.size() == inputs_f.size()

        # Forward pass of the mini-batch
        inputs = Variable(inputs, requires_grad=False)

        with torch.no_grad():
            if use_gpu >= 0:
                inputs = inputs.cuda()
            # outputs = net.forward(inputs)
            outputs = net.forward(inputs, adj1_test.cuda(), adj3_test.cuda(),
                                  adj2_test.cuda())
            outputs = (outputs[0] + flip(flip_cihp(outputs[1]), dim=-1)) / 2
            outputs = outputs.unsqueeze(0)

            if iii > 0:
                outputs = F.upsample(outputs,
                                     size=(h, w),
                                     mode='bilinear',
                                     align_corners=True)
                outputs_final = outputs_final + outputs
            else:
                outputs_final = outputs.clone()
    ################ plot pic
    predictions = torch.max(outputs_final, 1)[1]
    results = predictions.cpu().numpy()
    vis_res = decode_labels(results)

    parsing_im = Image.fromarray(vis_res[0])
    parsing_im.save(output_path + '/{}.png'.format(output_name))
    cv2.imwrite(output_path + '/{}_gray.png'.format(output_name),
                results[0, :, :])

    end_time = timeit.default_timer()
    print('time used for the multi-scale image inference' + ' is :' +
          str(end_time - start_time))
Exemplo n.º 2
0
def inference(net, img_path="", output_path="./", use_gpu=True):
    """

    :param net:
    :param img_path:
    :param output_path:
    :return:
    """

    # adj
    adj2_ = torch.from_numpy(graph.cihp2pascal_nlp_adj).float()
    adj2_test = (adj2_.unsqueeze(0).unsqueeze(0).expand(1, 1, 7,
                                                        20).cuda().transpose(
                                                            2, 3))

    adj1_ = Variable(
        torch.from_numpy(graph.preprocess_adj(graph.pascal_graph)).float())
    adj3_test = adj1_.unsqueeze(0).unsqueeze(0).expand(1, 1, 7, 7).cuda()

    cihp_adj = graph.preprocess_adj(graph.cihp_graph)
    adj3_ = Variable(torch.from_numpy(cihp_adj).float())
    adj1_test = adj3_.unsqueeze(0).unsqueeze(0).expand(1, 1, 20, 20).cuda()

    # multi-scale
    scale_list = [1, 0.5, 0.75, 1.25, 1.5, 1.75]
    real_img = read_img(img_path)

    img = real_img.resize((256, 256))
    testloader_list = []
    testloader_flip_list = []
    for pv in scale_list:
        composed_transforms_ts = transforms.Compose([
            tr.Scale_only_img(pv),
            tr.Normalize_xception_tf_only_img(),
            tr.ToTensor_only_img(),
        ])

        composed_transforms_ts_flip = transforms.Compose([
            tr.Scale_only_img(pv),
            tr.HorizontalFlip_only_img(),
            tr.Normalize_xception_tf_only_img(),
            tr.ToTensor_only_img(),
        ])

        testloader_list.append(img_transform(img, composed_transforms_ts))
        # print(img_transform(img, composed_transforms_ts))
        testloader_flip_list.append(
            img_transform(img, composed_transforms_ts_flip))
    # print(testloader_list)
    start_time = timeit.default_timer()
    # One testing epoch
    net.eval()
    # 1 0.5 0.75 1.25 1.5 1.75 ; flip:

    for iii, sample_batched in enumerate(
            zip(testloader_list, testloader_flip_list)):
        inputs, labels = sample_batched[0]["image"], sample_batched[0]["label"]
        inputs_f, _ = sample_batched[1]["image"], sample_batched[1]["label"]
        inputs = inputs.unsqueeze(0)
        inputs_f = inputs_f.unsqueeze(0)
        inputs = torch.cat((inputs, inputs_f), dim=0)
        if iii == 0:
            _, _, h, w = inputs.size()
        # assert inputs.size() == inputs_f.size()

        # Forward pass of the mini-batch
        inputs = Variable(inputs, requires_grad=False)

        with torch.no_grad():
            if use_gpu >= 0:
                inputs = inputs.cuda()
            # outputs = net.forward(inputs)
            outputs = net.forward(inputs, adj1_test.cuda(), adj3_test.cuda(),
                                  adj2_test.cuda())
            outputs = (outputs[0] + flip(flip_cihp(outputs[1]), dim=-1)) / 2
            outputs = outputs.unsqueeze(0)

            if iii > 0:
                outputs = F.upsample(outputs,
                                     size=(h, w),
                                     mode="bilinear",
                                     align_corners=True)
                outputs_final = outputs_final + outputs
            else:
                outputs_final = outputs.clone()

    #foreground_unroll = torch.resize(foreground_unroll, (H, W))

    ################ plot pic
    predictions = torch.max(outputs_final, 1)[1]
    results = predictions.cpu().numpy()
    vis_res = decode_labels(results)

    parsing_im = Image.fromarray(vis_res[0])
    parsing_im.save(os.path.join(output_path, 'real_mask.png'))
    #cv2.imwrite("outputs/{}_gray.png".format(output_name), results[0, :, :])

    end_time = timeit.default_timer()
    print("time used for the multi-scale image inference" + " is :" +
          str(end_time - start_time))

    # guided filter code goes here
    guidance_map = real_img.resize((512, 512))
    guidance_map = transforms.ToTensor()(guidance_map)
    guidance_map = guidance_map.unsqueeze(0).cuda()

    guide = GuidedFilter(
        #guidance_map.shape[-2:],
        r=16,
        eps=1e-3,
        downsample_stride=4,
    ).cuda()
    guide.conv_mean_weights = guide.conv_mean_weights.cuda()

    predictions = F.interpolate(outputs_final,
                                size=guidance_map.shape[-2:],
                                mode='bilinear',
                                align_corners=False)

    # get guided predictions;
    # guidance_map: [1, 3, 512, 512], predictions: [1, 20, 512, 512]
    for idx, channel in enumerate(predictions[0, ...]):
        current_channel = predictions[:, idx, :, :]
        current_channel = torch.unsqueeze(current_channel,
                                          axis=0)  # [1, 1, 512, 512]
        if idx == 0:
            guided_predictions = guide(guidance_map,
                                       current_channel)  # [1, 1, 512, 512]
        else:
            guided_predictions = torch.cat(
                (guided_predictions, guide(guidance_map, current_channel)),
                axis=1)
    # guided_predictions: [1, 20, 512, 512]
    guided_predictions = F.softmax(guided_predictions, dim=1)
    foreground_predictions = guided_predictions[
        0, 1:, ...]  # [19, 512, 512], 0 is background
    foreground_soft_mask = torch.sum(foreground_predictions, axis=0)  # [H, W]
    foreground_soft_mask = torch.unsqueeze(foreground_soft_mask,
                                           axis=-1)  # [H, W, 1]
    foreground_soft_mask = torch.cat(
        (foreground_soft_mask, foreground_soft_mask, foreground_soft_mask),
        axis=-1)
    foreground_soft_mask = foreground_soft_mask.cpu().numpy()
    foreground_soft_mask = (foreground_soft_mask * 255).astype(np.uint8)
    cv2.imwrite(os.path.join(output_path, 'guided_mask.png'),
                foreground_soft_mask)

    hair_predictions = guided_predictions[0, 2, :, :]
    face_predictions = guided_predictions[0, 13, :, :]
    foreground_softmask = hair_predictions + face_predictions  # [512, 512]
    foreground_softmask = torch.unsqueeze(foreground_softmask, axis=-1)
    foreground_softmask = torch.cat(
        (foreground_softmask, foreground_softmask, foreground_softmask),
        axis=-1)
    foreground_softmask = foreground_softmask.cpu().numpy()
    with open(os.path.join(output_path, 'hair_face_softmask.pkl'),
              'wb') as handle:
        pickle.dump(foreground_softmask, handle)
    foreground_softmask = (foreground_softmask * 255).astype(np.uint8)
    cv2.imwrite(os.path.join(output_path, 'guided_hair_face_mask.png'),
                foreground_softmask)
Exemplo n.º 3
0
def inference(net, imgFile, opts):
    print('Processing file {}'.format(imgFile))
    im = Image.open(os.path.join(opts.input_path, imgFile))
    imgNp = np.array(im)

    adj2_ = torch.from_numpy(graph.cihp2pascal_nlp_adj).float()
    adj2_test = adj2_.unsqueeze(0).unsqueeze(0).expand(1, 1, 7, 20).cuda()

    adj1_ = Variable(
        torch.from_numpy(graph.preprocess_adj(graph.pascal_graph)).float())
    adj1_test = adj1_.unsqueeze(0).unsqueeze(0).expand(1, 1, 7, 7).cuda()

    cihp_adj = graph.preprocess_adj(graph.cihp_graph)
    adj3_ = Variable(torch.from_numpy(cihp_adj).float())
    adj3_test = adj3_.unsqueeze(0).unsqueeze(0).expand(1, 1, 20, 20).cuda()

    net.eval()

    img = convertImg(imgNp)

    ## multi scale
    scale_list = [1, 0.5, 0.75, 1.25, 1.5, 1.75]
    testloader_list = []
    testloader_flip_list = []
    for pv in scale_list:
        composed_transforms_ts = transforms.Compose([
            tr.Scale_only_img(pv),
            tr.Normalize_xception_tf_only_img(),
            tr.ToTensor_only_img()
        ])

        composed_transforms_ts_flip = transforms.Compose([
            tr.Scale_only_img(pv),
            tr.HorizontalFlip_only_img(),
            tr.Normalize_xception_tf_only_img(),
            tr.ToTensor_only_img()
        ])

        testloader_list.append(img_transform(img, composed_transforms_ts))
        testloader_flip_list.append(
            img_transform(img, composed_transforms_ts_flip))

    for iii, sample_batched in enumerate(
            zip(testloader_list, testloader_flip_list)):
        inputs, labels = sample_batched[0]['image'], sample_batched[0]['label']
        inputs_f, _ = sample_batched[1]['image'], sample_batched[1]['label']
        inputs = inputs.unsqueeze(0)
        inputs_f = inputs_f.unsqueeze(0)
        inputs = torch.cat((inputs, inputs_f), dim=0)
        if iii == 0:
            _, _, h, w = inputs.size()
        # assert inputs.size() == inputs_f.size()

        # Forward pass of the mini-batch
        inputs = Variable(inputs, requires_grad=False)

        with torch.no_grad():
            inputs = inputs.cuda()
            # outputs = net.forward(inputs)
            outputs = net.forward(inputs, adj1_test.cuda(), adj3_test.cuda(),
                                  adj2_test.cuda())
            outputs = (outputs[0] + flip(outputs[1], dim=-1)) / 2
            outputs = outputs.unsqueeze(0)

            if iii > 0:
                outputs = F.upsample(outputs,
                                     size=(h, w),
                                     mode='bilinear',
                                     align_corners=True)
                outputs_final = outputs_final + outputs
            else:
                outputs_final = outputs.clone()

    predictions = torch.max(outputs_final, 1)[1]
    prob_predictions = torch.max(outputs_final, 1)[0]
    results = predictions.cpu().numpy()
    prob_results = prob_predictions.cpu().numpy()
    vis_res = decode_labels(results)

    if not os.path.isdir(opts.output_path):
        os.makedirs(opts.output_path)
    parsing_im = Image.fromarray(vis_res[0])
    parsing_im.save(opts.output_path +
                    '/{}.vis.png'.format(imgFile.split('.')[0]))
    cv2.imwrite(opts.output_path + '/{}.png'.format(imgFile.split('.')[0]),
                results[0, :, :])