Exemplo n.º 1
0
def main():
    args = get_arguments()

    num_classes = dataset_settings[args.dataset]['num_classes']
    input_size = dataset_settings[args.dataset]['input_size']
    label = dataset_settings[args.dataset]['label']

    model = network(num_classes=num_classes, pretrained=None).cuda()
    model = nn.DataParallel(model)
    state_dict = torch.load(args.restore_weight)
    model.load_state_dict(state_dict)
    model.eval()

    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.406, 0.456, 0.485], std=[0.225, 0.224, 0.229])
    ])
    dataset = SCHPDataset(root=args.input, input_size=input_size, transform=transform)
    dataloader = DataLoader(dataset)

    if not os.path.exists(args.output):
        os.makedirs(args.output)

    palette = get_palette(num_classes)

    with torch.no_grad():
        for idx, batch in enumerate(dataloader):

            image, meta = batch
            img_name = meta['name'][0]
            c = meta['center'].numpy()[0]
            s = meta['scale'].numpy()[0]
            w = meta['width'].numpy()[0]
            h = meta['height'].numpy()[0]

            output = model(image.cuda())
            upsample = torch.nn.Upsample(size=input_size, mode='bilinear', align_corners=True)
            upsample_output = upsample(output)
            upsample_output = upsample_output.squeeze()
            upsample_output = upsample_output.permute(1, 2, 0) #CHW -> HWC

            logits_result = transform_logits(upsample_output.data.cpu().numpy(), c, s, w, h, input_size=input_size)
            parsing_result = np.argmax(logits_result, axis=2)

            parsing_result_path = os.path.join(args.output, img_name[:-4]+'.png')
            # plt.imshow(parsing_result)
            # plt.show()

            output_img = Image.fromarray(np.asarray(parsing_result, dtype=np.uint8))
            output_img.putpalette(palette)
            output_img.save(parsing_result_path)

            if args.logits:
                #logits_result_path = os.path.join(args.output, img_name[:-4] + '.npy')
                np_result_path = os.path.join(args.output, img_name[:-4] + '.npy')

                #np.save(logits_result_path, logits_result)
                np.save(np_result_path, parsing_result)
    return
Exemplo n.º 2
0
def valid(model, valloader, input_size, num_samples, gpus):
    model.eval()

    parsing_preds = np.zeros((num_samples, input_size[0], input_size[1]),
                             dtype=np.uint8)

    scales = np.zeros((num_samples, 2), dtype=np.float32)
    centers = np.zeros((num_samples, 2), dtype=np.int32)
    widths = np.zeros((num_samples), dtype=np.int32)
    heights = np.zeros((num_samples), dtype=np.int32)

    idx = 0
    interp = torch.nn.Upsample(size=(input_size[0], input_size[1]), mode='bilinear', align_corners=True)
    with torch.no_grad():
        for index, batch in enumerate(valloader):
            image, _, _, meta, heatmap_params = batch
            num_images = image.size(0)
            if index % 10 == 0:
                print('%d  processd' % (index * num_images))

            c = meta['center'].numpy()
            s = meta['scale'].numpy()
            w = meta['width'].numpy()
            h = meta['height'].numpy()
            scales[idx:idx + num_images, :] = s[:, :]
            centers[idx:idx + num_images, :] = c[:, :]
            widths[idx:idx + num_images] = w
            heights[idx:idx + num_images] = h

            outputs = model(image.cuda(), heatmap_params)
            if gpus > 1:
                for output in outputs:
                    parsing = output[0][-1]
                    nums = len(parsing)
                    print(nums)
                    print(parsing.shape)
                    parsing = interp(parsing).data.cpu().numpy()
                    parsing = parsing.transpose(0, 2, 3, 1)  # NCHW NHWC
                    for i_ in range(nums):

                        parsing_preds[idx+i_] = transform_logits(parsing.data.cpu().numpy(), centers[idx+i_], scales[idx+i_], heights[idx+i_], widths[idx+i_], input_size=input_size)
                        parsing_preds[idx+i_] = np.asarray(np.argmax(parsing_preds[idx+i_] , axis=2), dtype=np.uint8)

                    idx += nums
            else:
                parsing = outputs[0][-1]
                parsing = interp(parsing).data.cpu().numpy()
                parsing = parsing.transpose(0, 2, 3, 1)  # NCHW NHWC
                parsing_preds[idx:idx + num_images, :, :] = np.asarray(np.argmax(parsing, axis=3), dtype=np.uint8)

                idx += num_images

    parsing_preds = parsing_preds[:num_samples, :, :]

    return parsing_preds, scales, centers
def main():
    args = get_arguments()

    num_classes = dataset_settings[args.dataset]['num_classes']
    input_size = dataset_settings[args.dataset]['input_size']
    label = dataset_settings[args.dataset]['label']

    model = network(num_classes=num_classes, pretrained=None).cuda()
    model = nn.DataParallel(model)
    state_dict = torch.load(args.restore_weight)
    model.load_state_dict(state_dict)
    model.eval()

    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.406, 0.456, 0.485],
                             std=[0.225, 0.224, 0.229])
    ])
    INPUT_DIR = os.path.join(args.root, args.input)
    dataset = SCHPDataset(root=INPUT_DIR,
                          input_size=input_size,
                          transform=transform)
    dataloader = DataLoader(dataset)

    #palette = get_palette(num_classes)
    #mask_palette = get_palette_mask(num_classes)
    palette = get_palette(20)
    mask_palette = get_palette_mask(20)
    full_mask_palette = get_palette_fullP_mask(20)

    with torch.no_grad():
        for idx, batch in enumerate(dataloader):

            image, meta = batch
            img_name = meta['name'][0]
            c = meta['center'].numpy()[0]
            s = meta['scale'].numpy()[0]
            w = meta['width'].numpy()[0]
            h = meta['height'].numpy()[0]

            output = model(image.cuda())
            upsample = torch.nn.Upsample(size=input_size,
                                         mode='bilinear',
                                         align_corners=True)
            upsample_output = upsample(output)
            upsample_output = upsample_output.squeeze()
            upsample_output = upsample_output.permute(1, 2, 0)  #CHW -> HWC

            logits_result = transform_logits(
                upsample_output.data.cpu().numpy(),
                c,
                s,
                w,
                h,
                input_size=input_size)
            parsing_result = np.argmax(logits_result, axis=2)

            OUTDIR_GRAY = os.path.join(args.root, args.output)
            OUTDIR_VIS = os.path.join(args.root, args.output_vis)
            #OUTDIR_COLOR=args.root + '/train_color/'
            #OUTDIR_WB=args.root + '/train_img_WB/'
            #OUTDIR_EDGE=args.root + '/train_edge/'
            os.makedirs(OUTDIR_GRAY, exist_ok=True)
            os.makedirs(OUTDIR_VIS, exist_ok=True)
            #os.makedirs(OUTDIR_COLOR, exist_ok=True)
            #os.makedirs(OUTDIR_EDGE, exist_ok=True)
            # os.makedirs(OUTDIR_WB, exist_ok=True)
            prefix = img_name.split('-')[0]
            parsing_result_path = os.path.join(OUTDIR_VIS,
                                               img_name[:-4] + '.png')
            parsing_result_path_gray = os.path.join(OUTDIR_GRAY,
                                                    img_name[:-4] + '.png')
            #cloth_mask_path = os.path.join(OUTDIR_EDGE,img_name[:-4]+'.png')
            #white_mask_path = os.path.join(OUTDIR_WB,img_name[:-4]+'.jpg')
            #cloth_path = os.path.join(OUTDIR_COLOR,img_name[:-4]+'.jpg')
            output_img = Image.fromarray(
                np.asarray(parsing_result, dtype=np.uint8))
            #original mask all class
            original_mask = output_img
            original_mask = map_pixel(original_mask, img_name)
            original_mask.save(parsing_result_path_gray)
            original_mask.putpalette(palette)
            original_mask.save(parsing_result_path)

            # if not 'front' in img_name: continue
            # #new mask with only upper cloth
            # new_mask = output_img
            # new_mask.putpalette(mask_palette)
            # RGB_mask = new_mask.convert('RGB')
            # L_mask = new_mask.convert('L')

            # new_mask_full = output_img
            # new_mask_full.putpalette(full_mask_palette)
            # RGB_mask_full = new_mask_full.convert('RGB')
            # L_mask_full = new_mask_full.convert('L')

            # original_image_path = os.path.join(INPUT_DIR, img_name)
            # original_image = Image.open(original_image_path)
            ## #original_save_path = os.path.join(OUTDIR ,img_name[:-4]+'.png')
            ## #original_image.save(original_save_path)
            # masked_image = ImageChops.multiply(original_image, RGB_mask)
            # masked_image_full = ImageChops.multiply(original_image, RGB_mask_full)
            ## #reverse_mask = ImageOps.invert(RGB_mask)
            ## #white = Image.new("RGB", original_image.size, "white")
            ## #background = ImageChops.multiply(white, reverse_mask)
            ## #masked_image.paste(white, (0,0), reverse_mask)

            # bg_image_full = Image.new("RGBA", masked_image_full.size,(255,255,255,255))
            # white_masked_image = bg_image_full.paste(masked_image_full.convert('RGBA'),(0,0),L_mask_full)
            # bg_image_full = bg_image_full.convert("RGB")
            # bg_image_full.save(white_mask_path)

            # if not '-p-' in img_name: continue
            # bg_image = Image.new("RGBA", masked_image.size,(255,255,255,255))
            # white_masked_image = bg_image.paste(masked_image.convert('RGBA'),(0,0),L_mask)
            # bg_image = bg_image.convert("RGB")
            # L_mask.save(cloth_mask_path)
            # bg_image.save(cloth_path)

            if args.logits:
                logits_result_path = os.path.join(args.root,
                                                  img_name[:-4] + '.npy')
                np.save(logits_result_path, logits_result)
    return
Exemplo n.º 4
0
def main():
    args = get_arguments()

    num_classes = dataset_settings[args.dataset]['num_classes']
    input_size = dataset_settings[args.dataset]['input_size']
    label = dataset_settings[args.dataset]['label']

    model = network(num_classes=num_classes, pretrained=None).cuda()
    model = nn.DataParallel(model)
    state_dict = torch.load(args.restore_weight)
    model.load_state_dict(state_dict)
    model.eval()

    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.406, 0.456, 0.485],
                             std=[0.225, 0.224, 0.229])
    ])
    dataset = SCHPDataset(root=args.input,
                          input_size=input_size,
                          transform=transform)
    dataloader = DataLoader(dataset)

    palette = get_palette(num_classes)

    print(f"Found {len(dataloader)} files")
    with torch.no_grad():
        for idx, batch in tqdm(enumerate(dataloader), total=len(dataloader)):

            image, meta = batch
            img_name = meta['name'][0]
            img_path = meta['path'][0]
            c = meta['center'].numpy()[0]
            s = meta['scale'].numpy()[0]
            w = meta['width'].numpy()[0]
            h = meta['height'].numpy()[0]

            output = model(image.cuda())
            upsample = torch.nn.Upsample(size=input_size,
                                         mode='bilinear',
                                         align_corners=True)
            upsample_output = upsample(output)
            upsample_output = upsample_output.squeeze()
            upsample_output = upsample_output.permute(1, 2, 0)  #CHW -> HWC

            logits_result = transform_logits(
                upsample_output.data.cpu().numpy(),
                c,
                s,
                w,
                h,
                input_size=input_size)

            parsing_result = np.argmax(logits_result, axis=2)

            img_subpath = img_path.replace(args.input, "").lstrip("/")
            parsing_result_path = os.path.join(
                args.output, img_subpath[:-4] + args.postfix_filename + '.png')
            os.makedirs(os.path.dirname(parsing_result_path), exist_ok=True)

            output_img = Image.fromarray(
                np.asarray(parsing_result, dtype=np.uint8))
            output_img.putpalette(palette)  # colors the labels it seems
            output_img.save(parsing_result_path)
            if args.logits:
                fname = img_name[:-4]
                logits_result_path = os.path.join(
                    args.output,
                    img_subpath[:-4] + args.postfix_filename + '.npy')
                if args.argmax_logits:
                    logits_result_path += "c"  # c for compressed
                    result = parsing_result
                else:
                    result = logits_result
                np.save(logits_result_path, result)
    return
def main():
    args = get_arguments()

    num_classes = dataset_settings[args.dataset]['num_classes']
    input_size = dataset_settings[args.dataset]['input_size']
    label = dataset_settings[args.dataset]['label']

    model = network(num_classes=num_classes, pretrained=None).cuda()
    model = nn.DataParallel(model)
    state_dict = torch.load(args.restore_weight)
    model.load_state_dict(state_dict)
    model.eval()

    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.406, 0.456, 0.485],
                             std=[0.225, 0.224, 0.229])
    ])
    subdirs = [x[1] for x in os.walk(args.input)]
    for subdir in subdirs[0]:
        DIR = args.input + '/' + subdir
        dataset = SCHPDataset(root=DIR,
                              input_size=input_size,
                              transform=transform)
        dataloader = DataLoader(dataset)

        PARSING_DIR = args.input + '/test_label'
        os.makedirs(PARSING_DIR, exist_ok=True)
        CLOTHING_DIR = args.input + '/test_color'
        os.makedirs(CLOTHING_DIR, exist_ok=True)
        CLOTHMASK_DIR = args.input + '/test_edge'
        os.makedirs(CLOTHMASK_DIR, exist_ok=True)

        palette = get_palette(num_classes)
        mask_palette = get_palette_mask(num_classes)

        with torch.no_grad():
            for idx, batch in enumerate(dataloader):

                image, meta = batch
                img_name = meta['name'][0]
                c = meta['center'].numpy()[0]
                s = meta['scale'].numpy()[0]
                w = meta['width'].numpy()[0]
                h = meta['height'].numpy()[0]

                output = model(image.cuda())
                upsample = torch.nn.Upsample(size=input_size,
                                             mode='bilinear',
                                             align_corners=True)
                upsample_output = upsample(output)
                upsample_output = upsample_output.squeeze()
                upsample_output = upsample_output.permute(1, 2, 0)  #CHW -> HWC

                logits_result = transform_logits(
                    upsample_output.data.cpu().numpy(),
                    c,
                    s,
                    w,
                    h,
                    input_size=input_size)
                parsing_result = np.argmax(logits_result, axis=2)

                parsing_result_path = os.path.join(PARSING_DIR,
                                                   img_name[:-4] + '.png')
                cloth_mask_path = os.path.join(CLOTHMASK_DIR,
                                               img_name[:-4] + '.jpg')
                cloth_path = os.path.join(CLOTHING_DIR, img_name[:-4] + '.png')
                output_img = Image.fromarray(
                    np.asarray(parsing_result, dtype=np.uint8))
                #original mask all class
                original_mask = output_img
                original_mask.save(parsing_result_path)
                # original_mask.putpalette(palette)
                # original_mask.save(parsing_result_path)
                if not '-p-' in img_name: continue
                #new mask with only upper cloth
                new_mask = output_img
                new_mask.putpalette(mask_palette)
                RGB_mask = new_mask.convert('RGB')
                L_mask = new_mask.convert('L')
                original_image_path = os.path.join(DIR, img_name)
                print('original_image_path is ', original_image_path)
                original_image = Image.open(original_image_path)
                #original_save_path = os.path.join(OUTDIR ,img_name[:-4]+'.png')
                #original_image.save(original_save_path)
                masked_image = ImageChops.multiply(original_image, RGB_mask)
                bg_image = Image.new("RGBA", masked_image.size,
                                     (255, 255, 255, 255))
                white_masked_image = bg_image.paste(
                    masked_image.convert('RGBA'), (0, 0), L_mask)
                #reverse_mask = ImageOps.invert(RGB_mask)
                #white = Image.new("RGB", original_image.size, "white")
                #background = ImageChops.multiply(white, reverse_mask)
                #masked_image.paste(white, (0,0), reverse_mask)
                L_mask.save(cloth_mask_path)
                bg_image.save(cloth_path)

                if args.logits:
                    logits_result_path = os.path.join(args.output,
                                                      img_name[:-4] + '.npy')
                    np.save(logits_result_path, logits_result)
    return
Exemplo n.º 6
0
def main():
    args = get_arguments()

    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    gpus = [int(i) for i in args.gpu.split(',')]

    num_classes = dataset_settings[args.dataset]['num_classes']
    input_size = dataset_settings[args.dataset]['input_size']
    label = dataset_settings[args.dataset]['label']

    model = network(num_classes=num_classes,
                    pretrained=None,
                    with_my_bn=args.with_my_bn,
                    batch_size=args.batch_size).cuda()
    model = nn.DataParallel(model)
    state_dict = torch.load(args.restore_weight)
    model.load_state_dict(state_dict)
    model.eval()

    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.406, 0.456, 0.485],
                             std=[0.225, 0.224, 0.229])
    ])
    lip_dataset = LIPDataSet(args.data_dir,
                             'val',
                             crop_size=input_size,
                             transform=transform,
                             rotation_factor=0,
                             flip_prob=-0.1,
                             scale_factor=0)
    # dataset = SCHPDataset(root=args.input, input_size=input_size, transform=transform)
    dataloader = DataLoader(lip_dataset,
                            batch_size=args.batch_size * len(gpus),
                            drop_last=True)

    if not os.path.exists(args.output):
        os.makedirs(args.output)

    palette = get_palette(num_classes)

    with torch.no_grad():
        for idx, batch in enumerate(dataloader):

            image, meta, heatmaps = batch

            img_names = meta['name']
            cs = meta['center'].numpy()
            ss = meta['scale'].numpy()
            ws = meta['width'].numpy()
            hs = meta['height'].numpy()

            print(image.shape)
            output = model(image.cuda(), heatmaps)
            print('output shape', len(output), len(output[0]),
                  len(output[0][1]))
            output = output[0][1]
            print(output.shape)
            upsample = torch.nn.Upsample(size=input_size,
                                         mode='bilinear',
                                         align_corners=True)
            upsample_outputs = upsample(output)
            upsample_outputs = upsample_outputs.squeeze()
            # print('upsample_output ', upsample_output.shape)
            for upsample_output, img_name, c, s, w, h in zip(
                    upsample_outputs, img_names, cs, ss, ws, hs):
                upsample_output = upsample_output.permute(1, 2, 0)  #CHW -> HWC

                logits_result = transform_logits(
                    upsample_output.data.cpu().numpy(),
                    c,
                    s,
                    w,
                    h,
                    input_size=input_size)
                parsing_result = np.argmax(logits_result, axis=2)

                parsing_result_path = os.path.join(args.output,
                                                   img_name[:-4] + '.png')
                output_img = Image.fromarray(
                    np.asarray(parsing_result, dtype=np.uint8))
                output_img.putpalette(palette)
                output_img.save(parsing_result_path)
                if args.logits:
                    logits_result_path = os.path.join(args.output,
                                                      img_name[:-4] + '.npy')
                    np.save(logits_result_path, logits_result)
    return