def process_image():

    input_size = [512, 512]
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.406, 0.456, 0.485],
                             std=[0.225, 0.224, 0.229])
    ])
    dataset = SimpleFolderDataset(root=os.getcwd() + image_saved_path,
                                  input_size=input_size,
                                  transform=transform)
    dataloader = DataLoader(dataset)

    palette = get_palette(num_classes)

    with torch.no_grad():
        for idx, batch in enumerate(tqdm(dataloader)):
            image, meta = batch
            img_name = meta['name'][0]
            c = meta['center'].numpy()[0]
            s = meta['scale'].numpy()[0]
            w = meta['width'].numpy()[0]
            h = meta['height'].numpy()[0]

            output = model(image.cuda())
            upsample = torch.nn.Upsample(size=input_size,
                                         mode='bilinear',
                                         align_corners=True)
            upsample_output = upsample(output[0][-1][0].unsqueeze(0))
            upsample_output = upsample_output.squeeze()
            upsample_output = upsample_output.permute(1, 2, 0)  # CHW -> HWC

            logits_result = transform_logits(
                upsample_output.data.cpu().numpy(),
                c,
                s,
                w,
                h,
                input_size=input_size)
            parsing_result = np.argmax(logits_result, axis=2)
            parsing_result_path = os.path.join(
                os.getcwd() + process_image_path, img_name[:-4] + '.png')
            output_img = Image.fromarray(
                np.asarray(parsing_result, dtype=np.uint8))
            output_img.putpalette(palette)
            output_img.save(parsing_result_path)
def main():
    args = get_arguments()

    gpus = [int(i) for i in args.gpu.split(',')]
    assert len(gpus) == 1
    if not args.gpu == 'None':
        os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    num_classes = dataset_settings[args.dataset]['num_classes']
    input_size = dataset_settings[args.dataset]['input_size']
    label = dataset_settings[args.dataset]['label']
    print("Evaluating total class number {} with {}".format(
        num_classes, label))

    model = networks.init_model('resnet101',
                                num_classes=num_classes,
                                pretrained=None)

    state_dict = torch.load(args.model_restore)['state_dict']
    from collections import OrderedDict
    new_state_dict = OrderedDict()
    for k, v in state_dict.items():
        name = k[7:]  # remove `module.`
        new_state_dict[name] = v
    model.load_state_dict(new_state_dict)
    model.cuda()
    model.eval()

    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.406, 0.456, 0.485],
                             std=[0.225, 0.224, 0.229])
    ])
    dataset = SimpleFolderDataset(root=args.input_dir,
                                  input_size=input_size,
                                  transform=transform)
    dataloader = DataLoader(dataset)

    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)

    palette = get_palette(num_classes)
    with torch.no_grad():
        for idx, batch in enumerate(tqdm(dataloader)):
            image, meta = batch
            img_name = meta['name'][0]
            c = meta['center'].numpy()[0]
            s = meta['scale'].numpy()[0]
            w = meta['width'].numpy()[0]
            h = meta['height'].numpy()[0]

            output = model(image.cuda())
            upsample = torch.nn.Upsample(size=input_size,
                                         mode='bilinear',
                                         align_corners=True)
            upsample_output = upsample(output[0][-1][0].unsqueeze(0))
            upsample_output = upsample_output.squeeze()
            upsample_output = upsample_output.permute(1, 2, 0)  # CHW -> HWC

            logits_result = transform_logits(
                upsample_output.data.cpu().numpy(),
                c,
                s,
                w,
                h,
                input_size=input_size)
            parsing_result = np.argmax(logits_result, axis=2)
            parsing_result_path = os.path.join(args.output_dir,
                                               img_name[:-4] + '.png')
            output_img = Image.fromarray(
                np.asarray(parsing_result, dtype=np.uint8))
            output_img.putpalette(palette)
            output_img.save(parsing_result_path)
            if args.logits:
                logits_result_path = os.path.join(args.output_dir,
                                                  img_name[:-4] + '.npy')
                np.save(logits_result_path, logits_result)
    return
Example #3
0
    def Process_image(self):

        if ((os.path.isfile(images_folder_path + '/front.png')) &
            (os.path.isfile(images_folder_path + '/side.png'))):

            input_size = [512, 512]
            transform = transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize(mean=[0.406, 0.456, 0.485],
                                     std=[0.225, 0.224, 0.229])
            ])
            dataset = SimpleFolderDataset(root=images_folder_path,
                                          input_size=input_size,
                                          transform=transform)
            dataloader = DataLoader(dataset)

            palette = get_palette(num_classes)

            with torch.no_grad():
                for idx, batch in enumerate(tqdm(dataloader)):
                    image, meta = batch
                    img_name = meta['name'][0]
                    c = meta['center'].numpy()[0]
                    s = meta['scale'].numpy()[0]
                    w = meta['width'].numpy()[0]
                    h = meta['height'].numpy()[0]

                    output = model(image.cuda())
                    upsample = torch.nn.Upsample(size=input_size,
                                                 mode='bilinear',
                                                 align_corners=True)
                    upsample_output = upsample(output[0][-1][0].unsqueeze(0))
                    upsample_output = upsample_output.squeeze()
                    upsample_output = upsample_output.permute(1, 2,
                                                              0)  # CHW -> HWC

                    logits_result = transform_logits(
                        upsample_output.data.cpu().numpy(),
                        c,
                        s,
                        w,
                        h,
                        input_size=input_size)
                    parsing_result = np.argmax(logits_result, axis=2)
                    parsing_result_path = os.path.join(output_path,
                                                       img_name[:-4] + '.png')
                    output_img = Image.fromarray(
                        np.asarray(parsing_result, dtype=np.uint8))
                    output_img.putpalette(palette)
                    output_img.save(parsing_result_path)

            self.process_button = False

            actual_height = (12 * self.height_feet + self.height_inch
                             ) * 2.54  # in cm, (1 inch = 2.54 cm)

            f_path = images_folder_path + '/front.png'
            human_part_seg_f_path = output_path + '/front.png'
            i1 = cv2.imread(f_path)
            m1 = cv2.imread(human_part_seg_f_path)

            s_path = images_folder_path + '/side.png'
            human_part_seg_s_path = output_path + '/side.png'
            i2 = cv2.imread(s_path)
            m2 = cv2.imread(human_part_seg_s_path)

            actual_height = (12 * self.height_feet +
                             self.height_inch) * 2.54  # conversion in cm
            get_measuremnets(i1, m1, i2, m2, actual_height, True)

        else:
            if self.img_timer_flag == False:
                self.current_img_timer = time.time()
                self.img_timer_flag = True

            count_down = self.image_timer_value - np.int(
                time.time() - self.current_img_timer)

            if (self.img_timer_flag and count_down <= self.image_timer_value):
                self.current_frame = write_data(self.current_frame,
                                                'Please save some images', 0.0,
                                                0.4, 1.0, 0.15, 0.18, 0.10, 1,
                                                2, (255, 255, 255))

            if count_down == 0 or self.button_selected(self.return_point):
                self.process_button = False
                self.img_timer_flag = False
Example #4
0
def main():
    args = get_arguments()

    # gpus = [int(i) for i in args.gpu.split(',')]
    # assert len(gpus) == 1
    if not args.gpu == 'None':
        os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    num_classes = dataset_settings[args.dataset]['num_classes']
    input_size = dataset_settings[args.dataset]['input_size']
    label = dataset_settings[args.dataset]['label']
    print("Evaluating total class number {} with {}".format(
        num_classes, label))

    model = networks.init_model('resnet101',
                                num_classes=num_classes,
                                pretrained=None)

    state_dict = torch.load(args.model_restore)  #['state_dict']
    from collections import OrderedDict
    new_state_dict = OrderedDict()
    secretary = change_dict.dictModify(new_dict=new_state_dict,
                                       old_dict=state_dict)
    new_state_dict = secretary.arange()
    # for k, v in state_dict.items():
    #     name = k[7:]  # remove `module.`
    #     new_state_dict[name] = v

    model.load_state_dict(new_state_dict)
    # print(model)
    #  # model.cuda()
    # model = torch.load("log/entire_model.pth", map_location=torch.device('cpu'))
    model.eval()
    input = torch.randn(1, 3, 473, 473)
    ONNX_FILE_PATH = "pretrain_model/local.onnx"
    ONNX_SIM_FILE_PATH = "pretrain_model/sim_local.onnx"

    # torch.onnx.export(model, input, ONNX_FILE_PATH, input_names=['input'], output_names=['output'], opset_version=11) #, operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK)
    # onnx_model = onnx.load(ONNX_FILE_PATH)
    # sim_model, check = onnxsim.simplify(onnx_model)
    # onnx.save(sim_model, ONNX_SIM_FILE_PATH)

    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.406, 0.456, 0.485],
                             std=[0.225, 0.224, 0.229])
    ])
    transformer = SimpleVideo(transforms=transform, input_size=[473, 473])

    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)

    color_man = gray2color.makeColor(num_classes)
    VIDEO_PATH = "input.mp4"
    cap = cv2.VideoCapture(VIDEO_PATH)
    frame_width = int(cap.get(3))
    frame_height = int(cap.get(4))
    out = cv2.VideoWriter('outpy.avi',
                          cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 24,
                          (frame_width, frame_height))
    with torch.no_grad():
        start = time.time()
        count = 0
        ret = True
        while (cap.isOpened()):
            ret, frame = cap.read()
            if ret == True:
                frame, meta = transformer.get_item(frame)
                c = meta['center']
                s = meta['scale']
                w = meta['width']
                h = meta['height']
                # out_put = model(frame)
                output = model(frame)
                # output = model(image.cuda())
                upsample = torch.nn.Upsample(size=input_size,
                                             mode='bilinear',
                                             align_corners=True)
                upsample_output = upsample(output[0][-1][0].unsqueeze(0))
                upsample_output = upsample_output.squeeze()
                upsample_output = upsample_output.permute(1, 2,
                                                          0)  # CHW -> HWC

                logits_result = transform_logits(
                    upsample_output.data.cpu().numpy(),
                    c,
                    s,
                    w,
                    h,
                    input_size=input_size)
                parsing_result = np.argmax(logits_result, axis=2)
                output_img = Image.fromarray(
                    np.asarray(parsing_result, dtype=np.uint8))
                out_img = np.array(output_img)
                output_img = color_man.G2C(out_img)

                out.write(np.array(output_img))
                cv2.imshow("Tanned", np.array(output_img))
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break

                if args.logits:
                    logits_result_path = os.path.join(args.output_dir,
                                                      img_name[:-4] + '.npy')
                    np.save(logits_result_path, logits_result)
                count += 1
            else:
                break
        end = time.time()
        cap.release()
        out.release()
        print(
            "Processed {} images using {:.5} seconds, average each image took {:.5} seconds"
            .format(count, end - start, (end - start) / (count + 0.1)))
    return
Example #5
0
def main():
    """Create the model and start the evaluation process."""
    args = get_arguments()
    multi_scales = [float(i) for i in args.multi_scales.split(',')]
    gpus = [int(i) for i in args.gpu.split(',')]
    assert len(gpus) == 1
    if not args.gpu == 'None':
        os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    cudnn.benchmark = True
    cudnn.enabled = True

    h, w = map(int, args.input_size.split(','))
    input_size = [h, w]

    model = networks.init_model(args.arch,
                                num_classes=args.num_classes,
                                pretrained=None)

    IMAGE_MEAN = model.mean
    IMAGE_STD = model.std
    INPUT_SPACE = model.input_space
    print('image mean: {}'.format(IMAGE_MEAN))
    print('image std: {}'.format(IMAGE_STD))
    print('input space:{}'.format(INPUT_SPACE))
    if INPUT_SPACE == 'BGR':
        print('BGR Transformation')
        transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize(mean=IMAGE_MEAN, std=IMAGE_STD),
        ])
    if INPUT_SPACE == 'RGB':
        print('RGB Transformation')
        transform = transforms.Compose([
            transforms.ToTensor(),
            BGR2RGB_transform(),
            transforms.Normalize(mean=IMAGE_MEAN, std=IMAGE_STD),
        ])

    # Data loader
    lip_test_dataset = CropDataValSet(args.data_dir,
                                      args.split_name,
                                      crop_size=input_size,
                                      transform=transform,
                                      flip=args.flip)
    num_samples = len(lip_test_dataset)
    print('Totoal testing sample numbers: {}'.format(num_samples))
    testloader = data.DataLoader(lip_test_dataset,
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 pin_memory=True)

    # Load model weight
    state_dict = torch.load(args.model_restore)
    from collections import OrderedDict
    new_state_dict = OrderedDict()
    for k, v in state_dict.items():
        name = k[7:]  # remove `module.`
        new_state_dict[name] = v
    model.load_state_dict(new_state_dict)
    model.cuda()
    model.eval()

    sp_results_dir = os.path.join(args.log_dir, args.split_name + '_parsing')
    if not os.path.exists(sp_results_dir):
        os.makedirs(sp_results_dir)

    palette = get_palette(20)
    parsing_preds = []
    scales = np.zeros((num_samples, 2), dtype=np.float32)
    centers = np.zeros((num_samples, 2), dtype=np.int32)
    with torch.no_grad():
        for idx, batch in enumerate(tqdm(testloader)):
            image, meta = batch
            if (len(image.shape) > 4):
                image = image.squeeze()
            im_name = meta['name'][0]
            c = meta['center'].numpy()[0]
            s = meta['scale'].numpy()[0]
            w = meta['width'].numpy()[0]
            h = meta['height'].numpy()[0]
            scales[idx, :] = s
            centers[idx, :] = c
            parsing, logits = multi_scale_testing(model,
                                                  image.cuda(),
                                                  crop_size=input_size,
                                                  flip=args.flip,
                                                  multi_scales=multi_scales)
            if args.save_results:
                parsing_result = transform_parsing(parsing, c, s, w, h,
                                                   input_size)
                parsing_result_path = os.path.join(sp_results_dir,
                                                   im_name + '.png')
                output_im = PILImage.fromarray(
                    np.asarray(parsing_result, dtype=np.uint8))
                output_im.putpalette(palette)
                output_im.save(parsing_result_path)
                # save logits
                logits_result = transform_logits(logits, c, s, w, h,
                                                 input_size)
                logits_result_path = os.path.join(sp_results_dir,
                                                  im_name + '.npy')
                np.save(logits_result_path, logits_result)
    return
Example #6
0
def main():
    args = get_arguments()

    # gpus = [int(i) for i in args.gpu.split(',')]
    # assert len(gpus) == 1
    if not args.gpu == 'None':
        os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    num_classes = dataset_settings[args.dataset]['num_classes']
    input_size = dataset_settings[args.dataset]['input_size']
    label = dataset_settings[args.dataset]['label']
    # Model in torch init
    model = networks.init_model('resnet101',
                                num_classes=num_classes,
                                pretrained=None)

    state_dict = torch.load(args.model_restore)  #['state_dict']
    from collections import OrderedDict
    new_state_dict = OrderedDict()
    secretary = change_dict.dictModify(new_dict=new_state_dict,
                                       old_dict=state_dict)
    new_state_dict = secretary.arange()
    model.load_state_dict(new_state_dict)

    model.eval()

    print("Evaluating total class number {} with {}".format(
        num_classes, label))

    sess = onnxruntime.InferenceSession("pretrain_model/sim_local.onnx")

    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.406, 0.456, 0.485],
                             std=[0.225, 0.224, 0.229])
    ])
    transformer = SimpleVideo(transforms=transform, input_size=[473, 473])

    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)

    color_man = gray2color.makeColor(num_classes)
    VIDEO_PATH = "input.mp4"
    cap = cv2.VideoCapture(VIDEO_PATH)
    frame_width = int(cap.get(3))
    frame_height = int(cap.get(4))
    out = cv2.VideoWriter('outpy.avi',
                          cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 24,
                          (frame_width * 3, frame_height))
    with torch.no_grad():
        start = time.time()
        count = 0
        ret = True
        while (cap.isOpened()):
            ret, frame = cap.read()
            if ret == True:
                # w, h, _ = frame.shape
                # c, s, w, h = cal_chwsr(0, 0, w, h)
                # frame = torch.tensor(frame)
                print("frame: {}".format(frame.shape))
                frame, meta = transformer.get_item(frame)
                c = meta['center']
                s = meta['scale']
                w = meta['width']
                h = meta['height']

                # out_put = model(frame)
                # input_name = sess.get_inputs()[0].name
                # print("input name:", input_name)
                # input_shape = sess.get_inputs()[0].shape
                # print("input shape:", input_shape)
                # input_type = sess.get_inputs()[0].type
                # print("input type:", input_type)
                input_name = sess.get_inputs()[0].name
                # output_name = sess.get_outputs()[0].name
                output_name = list(['output', '1250', '1245'])

                pre_output = sess.run(
                    output_name,
                    input_feed={sess.get_inputs()[0].name: np.array(frame)})
                output = [[pre_output[0], pre_output[1]], [pre_output[2]]]
                output_t = model(frame)
                # Post-process for output from onnx model
                upsample = torch.nn.Upsample(size=input_size,
                                             mode='bilinear',
                                             align_corners=True)
                upsample_output = upsample(
                    torch.tensor(output[0][-1][0]).unsqueeze(0))
                upsample_output = upsample_output.squeeze()
                upsample_output = upsample_output.permute(1, 2,
                                                          0)  # CHW -> HWC

                logits_result = transform_logits(
                    upsample_output.data.cpu().numpy(),
                    c,
                    s,
                    w,
                    h,
                    input_size=input_size)
                parsing_result = np.argmax(logits_result, axis=2)
                output_img = Image.fromarray(
                    np.asarray(parsing_result, dtype=np.uint8))
                out_img_o = np.array(output_img)
                output_img_o = color_man.G2C(out_img_o)
                # Post-process for torch model
                upsample_output = upsample(
                    torch.tensor(output_t[0][-1][0]).unsqueeze(0))
                upsample_output = upsample_output.squeeze()
                upsample_output = upsample_output.permute(1, 2,
                                                          0)  # CHW -> HWC

                logits_result = transform_logits(
                    upsample_output.data.cpu().numpy(),
                    c,
                    s,
                    w,
                    h,
                    input_size=input_size)
                parsing_result = np.argmax(logits_result, axis=2)
                output_img = Image.fromarray(
                    np.asarray(parsing_result, dtype=np.uint8))
                out_t_min = np.array(output_img)
                out_img_t = np.array(output_img)
                output_img_t = color_man.G2C(out_img_t)

                # final = cv2.hconcat(output_img_t, out_img_t-out_img_o)
                final = cv2.hconcat([
                    output_img_t,
                    cv2.cvtColor(out_img_t - out_img_o, cv2.COLOR_GRAY2RGB) *
                    100, output_img_o
                ])

                out.write(np.array(final))
                cv2.imshow("Tanned", np.array(final))
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break

                if args.logits:
                    logits_result_path = os.path.join(args.output_dir,
                                                      img_name[:-4] + '.npy')
                    np.save(logits_result_path, logits_result)
                count += 1
            else:
                break
        end = time.time()
        cap.release()
        out.release()
        print(
            "Processed {} images using {:.5} seconds, average each image took {:.5} seconds"
            .format(count, end - start, (end - start) / (count + 0.1)))
    return
def main():
    args = get_arguments()

    gpus = [int(i) for i in args.gpu.split(',')]
    assert len(gpus) == 1
    if not args.gpu == 'None':
        os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    num_classes = dataset_settings[args.dataset]['num_classes']
    input_size = dataset_settings[args.dataset]['input_size']
    label = dataset_settings[args.dataset]['label']
    print("Evaluating total class number {} with {}".format(
        num_classes, label))

    model = networks.init_model('resnet101',
                                num_classes=num_classes,
                                pretrained=None)

    state_dict = torch.load(args.model_restore)['state_dict']
    from collections import OrderedDict
    new_state_dict = OrderedDict()
    for k, v in state_dict.items():
        name = k[7:]  # remove `module.`
        new_state_dict[name] = v
    model.load_state_dict(new_state_dict)
    model.cuda()
    model.eval()

    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.406, 0.456, 0.485],
                             std=[0.225, 0.224, 0.229])
    ])
    dataset = SimpleFolderDataset(root=args.input_dir,
                                  input_size=input_size,
                                  transform=transform)
    dataloader = DataLoader(dataset)

    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)

    palette = get_palette(num_classes)
    with torch.no_grad():
        for idx, batch in enumerate(tqdm(dataloader)):
            image, meta = batch
            img_name = meta['name'][0]
            img_path = meta['img_path'][0]
            c = meta['center'].numpy()[0]
            s = meta['scale'].numpy()[0]
            w = meta['width'].numpy()[0]
            h = meta['height'].numpy()[0]

            output = model(image.cuda())
            upsample = torch.nn.Upsample(size=input_size,
                                         mode='bilinear',
                                         align_corners=True)
            upsample_output = upsample(output[0][-1][0].unsqueeze(0))
            upsample_output = upsample_output.squeeze()
            upsample_output = upsample_output.permute(1, 2, 0)  # CHW -> HWC

            logits_result = transform_logits(
                upsample_output.data.cpu().numpy(),
                c,
                s,
                w,
                h,
                input_size=input_size)
            parsing_result = np.argmax(logits_result, axis=2)

            output_img = Image.fromarray(
                np.asarray(parsing_result, dtype=np.uint8))
            output_img.putpalette(palette)
            png_path = os.path.join(args.output_dir, img_name[:-4] + '.png')
            output_img.save(png_path)
            # 'lip': {
            #     'input_size': [473, 473],
            #     'num_classes': 20,
            #     'label': ['Background', 'Hat', 'Hair', 'Glove', 'Sunglasses', 'Upper-clothes', 'Dress', 'Coat',
            #               'Socks', 'Pants', 'Jumpsuits', 'Scarf', 'Skirt', 'Face', 'Left-arm', 'Right-arm',
            #               'Left-leg', 'Right-leg', 'Left-shoe', 'Right-shoe']
            # },

            parsing_result = (parsing_result >= 5) & (parsing_result != 13)
            parsing_result = parsing_result.astype(int)
            parsing_result = parsing_result * 255

            org_img = Image.open(img_path)
            f = Image.fromarray(np.asarray(parsing_result, dtype=np.uint8))
            org_img.putalpha(f)
            org_img = np.array(org_img)

            # https://stackoverflow.com/a/55973647/1513627
            # Alpha -> Green
            org_img[org_img[..., -1] == 0] = [0, 255, 0, 0]
            jpg_path = os.path.join(args.output_dir, img_name[:-4] + '.jpg')
            Image.fromarray(org_img).convert('RGB').save(jpg_path)

            if args.logits:
                logits_result_path = os.path.join(args.output_dir,
                                                  img_name[:-4] + '.npy')
                np.save(logits_result_path, logits_result)
    return
Example #8
0
def process_image(feet, inch):

    if (os.path.isfile(os.getcwd() + image_saved_path + '/front.png')
            and os.path.isfile(os.getcwd() + image_saved_path + '/side.png')):
        input_size = [512, 512]
        transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.406, 0.456, 0.485],
                                 std=[0.225, 0.224, 0.229])
        ])
        dataset = SimpleFolderDataset(root=os.getcwd() + image_saved_path,
                                      input_size=input_size,
                                      transform=transform)
        dataloader = DataLoader(dataset)

        palette = get_palette(num_classes)

        with torch.no_grad():
            for idx, batch in enumerate(tqdm(dataloader)):
                image, meta = batch
                img_name = meta['name'][0]
                c = meta['center'].numpy()[0]
                s = meta['scale'].numpy()[0]
                w = meta['width'].numpy()[0]
                h = meta['height'].numpy()[0]

                output = model(image.cuda())
                upsample = torch.nn.Upsample(size=input_size,
                                             mode='bilinear',
                                             align_corners=True)
                upsample_output = upsample(output[0][-1][0].unsqueeze(0))
                upsample_output = upsample_output.squeeze()
                upsample_output = upsample_output.permute(1, 2,
                                                          0)  # CHW -> HWC

                logits_result = transform_logits(
                    upsample_output.data.cpu().numpy(),
                    c,
                    s,
                    w,
                    h,
                    input_size=input_size)
                parsing_result = np.argmax(logits_result, axis=2)
                parsing_result_path = os.path.join(
                    os.getcwd() + process_image_path, img_name[:-4] + '.png')
                output_img = Image.fromarray(
                    np.asarray(parsing_result, dtype=np.uint8))
                output_img.putpalette(palette)
                output_img.save(parsing_result_path)

        actual_height = (12 * feet + inch) * 2.54  # in cm
        i1_path = os.getcwd() + image_saved_path + '/front.png'
        i1 = cv2.imread(i1_path)
        m1_path = os.getcwd() + process_image_path + '/front.png'
        m1 = cv2.imread(m1_path)
        i2_path = os.getcwd() + image_saved_path + '/side.png'
        i2 = cv2.imread(i2_path)
        m2_path = os.getcwd() + process_image_path + '/side.png'
        m2 = cv2.imread(m2_path)
        status, waist, chest, thigh, front_sleeve_in_cm, dis_in_cm, image, side_image = get_measuremnets(
            i1, m1, i2, m2, actual_height, True)

        return status, waist, chest, thigh, front_sleeve_in_cm, dis_in_cm, image, side_image

    else:
        return False, -1, -1, -1, -1, -1, None, None