def process_image():

    input_size = [512, 512]
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.406, 0.456, 0.485],
                             std=[0.225, 0.224, 0.229])
    ])
    dataset = SimpleFolderDataset(root=os.getcwd() + image_saved_path,
                                  input_size=input_size,
                                  transform=transform)
    dataloader = DataLoader(dataset)

    palette = get_palette(num_classes)

    with torch.no_grad():
        for idx, batch in enumerate(tqdm(dataloader)):
            image, meta = batch
            img_name = meta['name'][0]
            c = meta['center'].numpy()[0]
            s = meta['scale'].numpy()[0]
            w = meta['width'].numpy()[0]
            h = meta['height'].numpy()[0]

            output = model(image.cuda())
            upsample = torch.nn.Upsample(size=input_size,
                                         mode='bilinear',
                                         align_corners=True)
            upsample_output = upsample(output[0][-1][0].unsqueeze(0))
            upsample_output = upsample_output.squeeze()
            upsample_output = upsample_output.permute(1, 2, 0)  # CHW -> HWC

            logits_result = transform_logits(
                upsample_output.data.cpu().numpy(),
                c,
                s,
                w,
                h,
                input_size=input_size)
            parsing_result = np.argmax(logits_result, axis=2)
            parsing_result_path = os.path.join(
                os.getcwd() + process_image_path, img_name[:-4] + '.png')
            output_img = Image.fromarray(
                np.asarray(parsing_result, dtype=np.uint8))
            output_img.putpalette(palette)
            output_img.save(parsing_result_path)
def main():
    args = get_arguments()

    gpus = [int(i) for i in args.gpu.split(',')]
    assert len(gpus) == 1
    if not args.gpu == 'None':
        os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    num_classes = dataset_settings[args.dataset]['num_classes']
    input_size = dataset_settings[args.dataset]['input_size']
    label = dataset_settings[args.dataset]['label']
    print("Evaluating total class number {} with {}".format(
        num_classes, label))

    model = networks.init_model('resnet101',
                                num_classes=num_classes,
                                pretrained=None)

    state_dict = torch.load(args.model_restore)['state_dict']
    from collections import OrderedDict
    new_state_dict = OrderedDict()
    for k, v in state_dict.items():
        name = k[7:]  # remove `module.`
        new_state_dict[name] = v
    model.load_state_dict(new_state_dict)
    model.cuda()
    model.eval()

    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.406, 0.456, 0.485],
                             std=[0.225, 0.224, 0.229])
    ])
    dataset = SimpleFolderDataset(root=args.input_dir,
                                  input_size=input_size,
                                  transform=transform)
    dataloader = DataLoader(dataset)

    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)

    palette = get_palette(num_classes)
    with torch.no_grad():
        for idx, batch in enumerate(tqdm(dataloader)):
            image, meta = batch
            img_name = meta['name'][0]
            c = meta['center'].numpy()[0]
            s = meta['scale'].numpy()[0]
            w = meta['width'].numpy()[0]
            h = meta['height'].numpy()[0]

            output = model(image.cuda())
            upsample = torch.nn.Upsample(size=input_size,
                                         mode='bilinear',
                                         align_corners=True)
            upsample_output = upsample(output[0][-1][0].unsqueeze(0))
            upsample_output = upsample_output.squeeze()
            upsample_output = upsample_output.permute(1, 2, 0)  # CHW -> HWC

            logits_result = transform_logits(
                upsample_output.data.cpu().numpy(),
                c,
                s,
                w,
                h,
                input_size=input_size)
            parsing_result = np.argmax(logits_result, axis=2)
            parsing_result_path = os.path.join(args.output_dir,
                                               img_name[:-4] + '.png')
            output_img = Image.fromarray(
                np.asarray(parsing_result, dtype=np.uint8))
            output_img.putpalette(palette)
            output_img.save(parsing_result_path)
            if args.logits:
                logits_result_path = os.path.join(args.output_dir,
                                                  img_name[:-4] + '.npy')
                np.save(logits_result_path, logits_result)
    return
Пример #3
0
    def Process_image(self):

        if ((os.path.isfile(images_folder_path + '/front.png')) &
            (os.path.isfile(images_folder_path + '/side.png'))):

            input_size = [512, 512]
            transform = transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize(mean=[0.406, 0.456, 0.485],
                                     std=[0.225, 0.224, 0.229])
            ])
            dataset = SimpleFolderDataset(root=images_folder_path,
                                          input_size=input_size,
                                          transform=transform)
            dataloader = DataLoader(dataset)

            palette = get_palette(num_classes)

            with torch.no_grad():
                for idx, batch in enumerate(tqdm(dataloader)):
                    image, meta = batch
                    img_name = meta['name'][0]
                    c = meta['center'].numpy()[0]
                    s = meta['scale'].numpy()[0]
                    w = meta['width'].numpy()[0]
                    h = meta['height'].numpy()[0]

                    output = model(image.cuda())
                    upsample = torch.nn.Upsample(size=input_size,
                                                 mode='bilinear',
                                                 align_corners=True)
                    upsample_output = upsample(output[0][-1][0].unsqueeze(0))
                    upsample_output = upsample_output.squeeze()
                    upsample_output = upsample_output.permute(1, 2,
                                                              0)  # CHW -> HWC

                    logits_result = transform_logits(
                        upsample_output.data.cpu().numpy(),
                        c,
                        s,
                        w,
                        h,
                        input_size=input_size)
                    parsing_result = np.argmax(logits_result, axis=2)
                    parsing_result_path = os.path.join(output_path,
                                                       img_name[:-4] + '.png')
                    output_img = Image.fromarray(
                        np.asarray(parsing_result, dtype=np.uint8))
                    output_img.putpalette(palette)
                    output_img.save(parsing_result_path)

            self.process_button = False

            actual_height = (12 * self.height_feet + self.height_inch
                             ) * 2.54  # in cm, (1 inch = 2.54 cm)

            f_path = images_folder_path + '/front.png'
            human_part_seg_f_path = output_path + '/front.png'
            i1 = cv2.imread(f_path)
            m1 = cv2.imread(human_part_seg_f_path)

            s_path = images_folder_path + '/side.png'
            human_part_seg_s_path = output_path + '/side.png'
            i2 = cv2.imread(s_path)
            m2 = cv2.imread(human_part_seg_s_path)

            actual_height = (12 * self.height_feet +
                             self.height_inch) * 2.54  # conversion in cm
            get_measuremnets(i1, m1, i2, m2, actual_height, True)

        else:
            if self.img_timer_flag == False:
                self.current_img_timer = time.time()
                self.img_timer_flag = True

            count_down = self.image_timer_value - np.int(
                time.time() - self.current_img_timer)

            if (self.img_timer_flag and count_down <= self.image_timer_value):
                self.current_frame = write_data(self.current_frame,
                                                'Please save some images', 0.0,
                                                0.4, 1.0, 0.15, 0.18, 0.10, 1,
                                                2, (255, 255, 255))

            if count_down == 0 or self.button_selected(self.return_point):
                self.process_button = False
                self.img_timer_flag = False
def main():
    args = get_arguments()

    gpus = [int(i) for i in args.gpu.split(',')]
    assert len(gpus) == 1
    if not args.gpu == 'None':
        os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    num_classes = dataset_settings[args.dataset]['num_classes']
    input_size = dataset_settings[args.dataset]['input_size']
    label = dataset_settings[args.dataset]['label']
    print("Evaluating total class number {} with {}".format(
        num_classes, label))

    model = networks.init_model('resnet101',
                                num_classes=num_classes,
                                pretrained=None)

    state_dict = torch.load(args.model_restore)['state_dict']
    from collections import OrderedDict
    new_state_dict = OrderedDict()
    for k, v in state_dict.items():
        name = k[7:]  # remove `module.`
        new_state_dict[name] = v
    model.load_state_dict(new_state_dict)
    model.cuda()
    model.eval()

    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.406, 0.456, 0.485],
                             std=[0.225, 0.224, 0.229])
    ])
    dataset = SimpleFolderDataset(root=args.input_dir,
                                  input_size=input_size,
                                  transform=transform)
    dataloader = DataLoader(dataset)

    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)

    palette = get_palette(num_classes)
    with torch.no_grad():
        for idx, batch in enumerate(tqdm(dataloader)):
            image, meta = batch
            img_name = meta['name'][0]
            img_path = meta['img_path'][0]
            c = meta['center'].numpy()[0]
            s = meta['scale'].numpy()[0]
            w = meta['width'].numpy()[0]
            h = meta['height'].numpy()[0]

            output = model(image.cuda())
            upsample = torch.nn.Upsample(size=input_size,
                                         mode='bilinear',
                                         align_corners=True)
            upsample_output = upsample(output[0][-1][0].unsqueeze(0))
            upsample_output = upsample_output.squeeze()
            upsample_output = upsample_output.permute(1, 2, 0)  # CHW -> HWC

            logits_result = transform_logits(
                upsample_output.data.cpu().numpy(),
                c,
                s,
                w,
                h,
                input_size=input_size)
            parsing_result = np.argmax(logits_result, axis=2)

            output_img = Image.fromarray(
                np.asarray(parsing_result, dtype=np.uint8))
            output_img.putpalette(palette)
            png_path = os.path.join(args.output_dir, img_name[:-4] + '.png')
            output_img.save(png_path)
            # 'lip': {
            #     'input_size': [473, 473],
            #     'num_classes': 20,
            #     'label': ['Background', 'Hat', 'Hair', 'Glove', 'Sunglasses', 'Upper-clothes', 'Dress', 'Coat',
            #               'Socks', 'Pants', 'Jumpsuits', 'Scarf', 'Skirt', 'Face', 'Left-arm', 'Right-arm',
            #               'Left-leg', 'Right-leg', 'Left-shoe', 'Right-shoe']
            # },

            parsing_result = (parsing_result >= 5) & (parsing_result != 13)
            parsing_result = parsing_result.astype(int)
            parsing_result = parsing_result * 255

            org_img = Image.open(img_path)
            f = Image.fromarray(np.asarray(parsing_result, dtype=np.uint8))
            org_img.putalpha(f)
            org_img = np.array(org_img)

            # https://stackoverflow.com/a/55973647/1513627
            # Alpha -> Green
            org_img[org_img[..., -1] == 0] = [0, 255, 0, 0]
            jpg_path = os.path.join(args.output_dir, img_name[:-4] + '.jpg')
            Image.fromarray(org_img).convert('RGB').save(jpg_path)

            if args.logits:
                logits_result_path = os.path.join(args.output_dir,
                                                  img_name[:-4] + '.npy')
                np.save(logits_result_path, logits_result)
    return
Пример #5
0
def process_image(feet, inch):

    if (os.path.isfile(os.getcwd() + image_saved_path + '/front.png')
            and os.path.isfile(os.getcwd() + image_saved_path + '/side.png')):
        input_size = [512, 512]
        transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.406, 0.456, 0.485],
                                 std=[0.225, 0.224, 0.229])
        ])
        dataset = SimpleFolderDataset(root=os.getcwd() + image_saved_path,
                                      input_size=input_size,
                                      transform=transform)
        dataloader = DataLoader(dataset)

        palette = get_palette(num_classes)

        with torch.no_grad():
            for idx, batch in enumerate(tqdm(dataloader)):
                image, meta = batch
                img_name = meta['name'][0]
                c = meta['center'].numpy()[0]
                s = meta['scale'].numpy()[0]
                w = meta['width'].numpy()[0]
                h = meta['height'].numpy()[0]

                output = model(image.cuda())
                upsample = torch.nn.Upsample(size=input_size,
                                             mode='bilinear',
                                             align_corners=True)
                upsample_output = upsample(output[0][-1][0].unsqueeze(0))
                upsample_output = upsample_output.squeeze()
                upsample_output = upsample_output.permute(1, 2,
                                                          0)  # CHW -> HWC

                logits_result = transform_logits(
                    upsample_output.data.cpu().numpy(),
                    c,
                    s,
                    w,
                    h,
                    input_size=input_size)
                parsing_result = np.argmax(logits_result, axis=2)
                parsing_result_path = os.path.join(
                    os.getcwd() + process_image_path, img_name[:-4] + '.png')
                output_img = Image.fromarray(
                    np.asarray(parsing_result, dtype=np.uint8))
                output_img.putpalette(palette)
                output_img.save(parsing_result_path)

        actual_height = (12 * feet + inch) * 2.54  # in cm
        i1_path = os.getcwd() + image_saved_path + '/front.png'
        i1 = cv2.imread(i1_path)
        m1_path = os.getcwd() + process_image_path + '/front.png'
        m1 = cv2.imread(m1_path)
        i2_path = os.getcwd() + image_saved_path + '/side.png'
        i2 = cv2.imread(i2_path)
        m2_path = os.getcwd() + process_image_path + '/side.png'
        m2 = cv2.imread(m2_path)
        status, waist, chest, thigh, front_sleeve_in_cm, dis_in_cm, image, side_image = get_measuremnets(
            i1, m1, i2, m2, actual_height, True)

        return status, waist, chest, thigh, front_sleeve_in_cm, dis_in_cm, image, side_image

    else:
        return False, -1, -1, -1, -1, -1, None, None