def main(args):
    # Select the hardware device to use for inference.
    if torch.cuda.is_available():
        device = torch.device('cuda', torch.cuda.current_device())
        torch.backends.cudnn.benchmark = True
    else:
        device = torch.device('cpu')

    # Disable gradient calculations.
    torch.set_grad_enabled(False)

    pretrained = not args.model_file

    if pretrained:
        print(
            'No model weights file specified, using pretrained weights instead.'
        )

    # Create the model, downloading pretrained weights if necessary.
    if args.arch == 'hg1':
        model = hg1(pretrained=pretrained)
    elif args.arch == 'hg2':
        model = hg2(pretrained=pretrained)
    elif args.arch == 'hg8':
        model = hg8(pretrained=pretrained)
    else:
        raise Exception('unrecognised model architecture: ' + args.model)
    model = model.to(device)

    if not pretrained:
        assert os.path.isfile(args.model_file)
        print('Loading model weights from file: {}'.format(args.model_file))
        checkpoint = torch.load(args.model_file)
        state_dict = checkpoint['state_dict']
        if sorted(state_dict.keys())[0].startswith('module.'):
            model = DataParallel(model)
        model.load_state_dict(state_dict)

    # Initialise the MPII validation set dataloader.
    # val_dataset = Mpii(args.image_path, is_train=False)
    # val_loader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False,
    #                         num_workers=args.workers, pin_memory=True)

    # Generate predictions for the validation set.
    # _, _, predictions = do_validation_epoch(val_loader, model, device, Mpii.DATA_INFO, args.flip)

    model = hg1(pretrained=True)
    predictor = HumanPosePredictor(model, device='cpu')
    # my_image = image_loader("../inference-img/1.jpg")
    # joints = image_inference(predictor, image_path=None, my_image=my_image)
    # imshow(my_image, joints=joints)
    if args.camera == False:
        inference_video(predictor, "../inference-video/R6llTwEh07w.mp4")

    elif args.camera:
        inference_video(predictor, 0)
def main(args):
    # Select the hardware device to use for inference.
    if torch.cuda.is_available():
        device = torch.device('cuda', torch.cuda.current_device())
        torch.backends.cudnn.benchmark = True
    else:
        device = torch.device('cpu')

    # Disable gradient calculations.
    torch.set_grad_enabled(False)

    pretrained = not args.model_file

    if pretrained:
        print(
            'No model weights file specified, using pretrained weights instead.'
        )

    # Create the model, downloading pretrained weights if necessary.
    if args.arch == 'hg1':
        model = hg1(pretrained=pretrained)
    elif args.arch == 'hg2':
        model = hg2(pretrained=pretrained)
    elif args.arch == 'hg8':
        model = hg8(pretrained=pretrained)
    else:
        raise Exception('unrecognised model architecture: ' + args.model)
    model = model.to(device)

    if not pretrained:
        assert os.path.isfile(args.model_file)
        print('Loading model weights from file: {}'.format(args.model_file))
        checkpoint = torch.load(args.model_file)
        state_dict = checkpoint['state_dict']
        if sorted(state_dict.keys())[0].startswith('module.'):
            model = DataParallel(model)
        model.load_state_dict(state_dict)

    # Initialise the MPII validation set dataloader.
    val_dataset = Mpii(args.image_path, is_train=False)
    val_loader = DataLoader(val_dataset,
                            batch_size=args.batch_size,
                            shuffle=False,
                            num_workers=args.workers,
                            pin_memory=True)

    # Generate predictions for the validation set.
    _, _, predictions = do_validation_epoch(val_loader, model, device,
                                            Mpii.DATA_INFO, args.flip)

    # Report PCKh for the predictions.
    print('\nFinal validation PCKh scores:\n')
    print_mpii_validation_accuracy(predictions)
예제 #3
0
from PIL import Image
import torch
import numpy as np
import xgboost as xgb
import matplotlib.pyplot as plt
from matplotlib import collections as mc
import time, cv2, json, os
from astropy.convolution import Gaussian1DKernel, convolve
import PIL.ExifTags as ExifTags
from predict_arm_angle import *

# ...load image of a person into a PyTorch tensor...

name = "IMG_4529"

predictor = HumanPosePredictor(hg8(pretrained=True), device='cuda')
angle_predictor = arm_angle_predict()
model = xgb.XGBClassifier(max_depth=3,
                          learning_rate=0.1,
                          n_estimators=100,
                          silent=True,
                          objective='binary:logistic',
                          booster='gbtree',
                          n_jobs=1,
                          nthread=None,
                          gamma=0,
                          min_child_weight=1,
                          max_delta_step=0,
                          subsample=1,
                          colsample_bytree=1,
                          colsample_bylevel=1,
예제 #4
0
def main(args):
    """Train/ Cross validate for data source = YogiDB."""
    # Create data loader
    """Generic(data.Dataset)(image_set, annotations,
                     is_train=True, inp_res=256, out_res=64, sigma=1,
                     scale_factor=0, rot_factor=0, label_type='Gaussian',
                     rgb_mean=RGB_MEAN, rgb_stddev=RGB_STDDEV)."""
    annotations_source = 'basic-thresholder'

    # Get the data from yogi
    db_obj = YogiDB(config.db_url)
    imageset = db_obj.get_filtered(ImageSet,
                                   name=args.image_set_name)
    annotations = db_obj.get_annotations(image_set_name=args.image_set_name,
                                         annotation_source=annotations_source)
    pts = torch.Tensor(annotations[0]['joint_self'])
    num_classes = pts.size(0)
    crop_size = 512
    if args.crop:
        crop_size = args.crop
        crop = True
    else:
        crop = False

    # Using the default RGB mean and std dev as 0
    RGB_MEAN = torch.as_tensor([0.0, 0.0, 0.0])
    RGB_STDDEV = torch.as_tensor([0.0, 0.0, 0.0])

    dataset = Generic(image_set=imageset,
                      inp_res=args.inp_res,
                      out_res=args.out_res,
                      annotations=annotations,
                      mode=args.mode,
                      crop=crop, crop_size=crop_size,
                      rgb_mean=RGB_MEAN, rgb_stddev=RGB_STDDEV)

    train_dataset = dataset
    train_dataset.is_train = True
    train_loader = DataLoader(train_dataset,
                              batch_size=args.train_batch, shuffle=True,
                              num_workers=args.workers, pin_memory=True)

    val_dataset = dataset
    val_dataset.is_train = False
    val_loader = DataLoader(val_dataset,
                            batch_size=args.test_batch, shuffle=False,
                            num_workers=args.workers, pin_memory=True)

    # Select the hardware device to use for inference.
    if torch.cuda.is_available():
        device = torch.device('cuda', torch.cuda.current_device())
        torch.backends.cudnn.benchmark = True
    else:
        device = torch.device('cpu')

    # Disable gradient calculations by default.
    torch.set_grad_enabled(False)

    # create checkpoint dir
    os.makedirs(args.checkpoint, exist_ok=True)

    if args.arch == 'hg1':
        model = hg1(pretrained=False, num_classes=num_classes)
    elif args.arch == 'hg2':
        model = hg2(pretrained=False, num_classes=num_classes)
    elif args.arch == 'hg8':
        model = hg8(pretrained=False, num_classes=num_classes)
    else:
        raise Exception('unrecognised model architecture: ' + args.model)

    model = DataParallel(model).to(device)

    if args.optimizer == "Adam":
        optimizer = Adam(model.parameters(),
                         lr=args.lr,
                         momentum=args.momentum,
                         weight_decay=args.weight_decay)
    else:
        optimizer = RMSprop(model.parameters(),
                            lr=args.lr,
                            momentum=args.momentum,
                            weight_decay=args.weight_decay)
    best_acc = 0

    # optionally resume from a checkpoint
    title = args.data_identifier + ' ' + args.arch
    if args.resume:
        assert os.path.isfile(args.resume)
        print("=> loading checkpoint '{}'".format(args.resume))
        checkpoint = torch.load(args.resume)
        args.start_epoch = checkpoint['epoch']
        best_acc = checkpoint['best_acc']
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        print("=> loaded checkpoint '{}' (epoch {})"
              .format(args.resume, checkpoint['epoch']))
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title, resume=True)
    else:
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
        logger.set_names(['Epoch', 'LR', 'Train Loss', 'Val Loss', 'Train Acc', 'Val Acc'])

    # train and eval
    lr = args.lr
    for epoch in range(args.start_epoch, args.epochs):
        lr = adjust_learning_rate(optimizer, epoch, lr, args.schedule, args.gamma)
        print('\nEpoch: %d | LR: %.8f' % (epoch + 1, lr))

        # train for one epoch
        train_loss, train_acc = do_training_epoch(train_loader, model, device, optimizer)

        # evaluate on validation set
        if args.debug == 1:
            valid_loss, valid_acc, predictions, validation_log = do_validation_epoch(val_loader, model, device, False, True, os.path.join(args.checkpoint, 'debug.csv'), epoch + 1)
        else:
            valid_loss, valid_acc, predictions, _ = do_validation_epoch(val_loader, model, device, False)

        # append logger file
        logger.append([epoch + 1, lr, train_loss, valid_loss, train_acc, valid_acc])

        # remember best acc and save checkpoint
        is_best = valid_acc > best_acc
        best_acc = max(valid_acc, best_acc)
        save_checkpoint({
            'epoch': epoch + 1,
            'arch': args.arch,
            'state_dict': model.state_dict(),
            'best_acc': best_acc,
            'optimizer': optimizer.state_dict(),
        }, predictions, is_best, checkpoint=args.checkpoint, snapshot=args.snapshot)

    logger.close()
    logger.plot(['Train Acc', 'Val Acc'])
    savefig(os.path.join(args.checkpoint, 'log.eps'))
def main(args):
    # Select the hardware device to use for inference.
    if torch.cuda.is_available():
        device = torch.device('cuda', torch.cuda.current_device())
        torch.backends.cudnn.benchmark = True
    else:
        device = torch.device('cpu')

    # Disable gradient calculations by default.
    torch.set_grad_enabled(False)

    # create checkpoint dir
    os.makedirs(args.checkpoint, exist_ok=True)

    if args.arch == 'hg1':
        model = hg1(pretrained=False)
    elif args.arch == 'hg2':
        model = hg2(pretrained=False)
    elif args.arch == 'hg8':
        model = hg8(pretrained=False)
    else:
        raise Exception('unrecognised model architecture: ' + args.arch)

    model = DataParallel(model).to(device)

    optimizer = RMSprop(model.parameters(),
                        lr=args.lr,
                        momentum=args.momentum,
                        weight_decay=args.weight_decay)

    best_acc = 0

    # optionally resume from a checkpoint
    if args.resume:
        assert os.path.isfile(args.resume)
        print("=> loading checkpoint '{}'".format(args.resume))
        checkpoint = torch.load(args.resume)
        args.start_epoch = checkpoint['epoch']
        best_acc = checkpoint['best_acc']
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        print("=> loaded checkpoint '{}' (epoch {})".format(
            args.resume, checkpoint['epoch']))
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'), resume=True)
    else:
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'))
        logger.set_names(
            ['Epoch', 'LR', 'Train Loss', 'Val Loss', 'Train Acc', 'Val Acc'])

    # create data loader
    train_dataset = Mpii(args.image_path, is_train=True)
    train_loader = DataLoader(train_dataset,
                              batch_size=args.train_batch,
                              shuffle=True,
                              num_workers=args.workers,
                              pin_memory=True)

    val_dataset = Mpii(args.image_path, is_train=False)
    val_loader = DataLoader(val_dataset,
                            batch_size=args.test_batch,
                            shuffle=False,
                            num_workers=args.workers,
                            pin_memory=True)

    # train and eval
    lr = args.lr
    for epoch in trange(args.start_epoch,
                        args.epochs,
                        desc='Overall',
                        ascii=True):
        lr = adjust_learning_rate(optimizer, epoch, lr, args.schedule,
                                  args.gamma)

        # train for one epoch
        train_loss, train_acc = do_training_epoch(train_loader,
                                                  model,
                                                  device,
                                                  Mpii.DATA_INFO,
                                                  optimizer,
                                                  acc_joints=Mpii.ACC_JOINTS)

        # evaluate on validation set
        valid_loss, valid_acc, predictions = do_validation_epoch(
            val_loader,
            model,
            device,
            Mpii.DATA_INFO,
            False,
            acc_joints=Mpii.ACC_JOINTS)

        # print metrics
        tqdm.write(
            f'[{epoch + 1:3d}/{args.epochs:3d}] lr={lr:0.2e} '
            f'train_loss={train_loss:0.4f} train_acc={100 * train_acc:0.2f} '
            f'valid_loss={valid_loss:0.4f} valid_acc={100 * valid_acc:0.2f}')

        # append logger file
        logger.append(
            [epoch + 1, lr, train_loss, valid_loss, train_acc, valid_acc])
        logger.plot_to_file(os.path.join(args.checkpoint, 'log.svg'),
                            ['Train Acc', 'Val Acc'])

        # remember best acc and save checkpoint
        is_best = valid_acc > best_acc
        best_acc = max(valid_acc, best_acc)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': model.state_dict(),
                'best_acc': best_acc,
                'optimizer': optimizer.state_dict(),
            },
            predictions,
            is_best,
            checkpoint=args.checkpoint,
            snapshot=args.snapshot)

    logger.close()
예제 #6
0
def main(args):
    """Predict for data source = YogiDB."""
    # Select the hardware device to use for inference.
    if torch.cuda.is_available():
        device = torch.device('cuda', torch.cuda.current_device())
        torch.backends.cudnn.benchmark = True
    else:
        device = torch.device('cpu')

    # Disable gradient calculations.
    torch.set_grad_enabled(False)

    pretrained = not args.model_file

    if pretrained:
        print('No model weights file specified, exiting.')
        exit()

    # Get the data from yogi
    db_obj = YogiDB(config.db_url)
    imageset = db_obj.get_filtered(ImageSet, name=args.image_set_name)
    annotations_source = 'basic-thresholder'
    annotations = db_obj.get_annotations(annotation_source=annotations_source)
    pts = torch.Tensor(annotations[0]['joint_self'])
    num_classes = pts.size(0)
    # Initialise the Yogi validation set dataloader.
    val_dataset = Generic(image_set=imageset,
                          inp_res=args.inp_res,
                          out_res=args.out_res,
                          annotations=annotations,
                          mode=args.mode)

    val_loader = DataLoader(val_dataset,
                            batch_size=args.batch_size,
                            shuffle=False,
                            num_workers=args.workers,
                            pin_memory=True)

    # Create the model, downloading pretrained weights if necessary.
    if args.arch == 'hg1':
        model = hg1(pretrained=False, num_classes=num_classes)
    elif args.arch == 'hg2':
        model = hg2(pretrained=False, num_classes=num_classes)
    elif args.arch == 'hg8':
        model = hg8(pretrained=False, num_classes=num_classes)
    else:
        raise Exception('unrecognised model architecture: ' + args.model)
    model = model.to(device)

    # create output dir
    os.makedirs(args.output_location, exist_ok=True)
    title = args.image_set_name + ' ' + args.arch
    filename_pre = title + '_preds_' + datetime.now().strftime("%Y%m%dT%H%M%S")
    log_filepath = os.path.join(args.output_location,
                                filename_pre + '_log.txt')
    logger = Logger(log_filepath, title=title)
    logger.set_names(['Val Loss', 'Val Acc'])

    # Generate predictions for the validation set.
    valid_loss, valid_acc, predictions = do_validation_epoch(
        val_loader, model, device, args.flip)

    # append logger file
    logger.append([valid_loss, valid_acc])
    # TODO: Report PCKh for the predictions.
    # print('\nFinal validation PCKh scores:\n')
    # print_mpii_validation_accuracy(predictions)

    predictions = to_numpy(predictions)
    filepath = os.path.join(args.output_location, filename_pre + '.mat')
    scipy.io.savemat(filepath, mdict={'preds': predictions})
예제 #7
0
from stacked_hourglass import HumanPosePredictor, hg2, hg8, hg1
from stacked_hourglass.utils.transforms import shufflelr, crop, color_normalize, fliplr, transform
from PIL import Image
import torch
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import collections as mc
import time, math
import PIL.ExifTags as ExifTags

# ...load image of a person into a PyTorch tensor...

model = hg8(pretrained=True)
predictor = HumanPosePredictor(model, device='cpu')

print("==model loaded==")

RGB_MEAN = torch.as_tensor([0.4404, 0.4440, 0.4327])
RGB_STDDEV = torch.as_tensor([0.2458, 0.2410, 0.2468])
#im = np.asarray(Image.open("./images/test14.jpeg"))
orgImg = Image.open("./dataset/up/44.png")
try:
    for orientation in ExifTags.TAGS.keys():
        if ExifTags.TAGS[orientation] == 'Orientation': break
    exif = dict(orgImg._getexif().items())
    if exif[orientation] == 3:
        orgImg = orgImg.rotate(180, expand=True)
    elif exif[orientation] == 6:
        orgImg = orgImg.rotate(270, expand=True)
    elif exif[orientation] == 8:
        orgImg = orgImg.rotate(90, expand=True)
예제 #8
0
    def __init__(self, fn):

        filename = fn  # 파일 이름을 저장

        # get video and slice into frame
        viddir = filename
        vidcap = cv2.VideoCapture(viddir)
        vidlen = len(viddir)
        except_filename = 0

        # 불러온 위치 주소에서 [영상이름].mp4 부분을 삭제
        for i in range(vidlen - 1, -1, -1):
            if viddir[i] == '/':
                except_filename = i
                break
        viddir = viddir[0:except_filename + 1]

        # 실제 프레임 이미지들이 저장될 위치
        paradir = viddir + '/tmp+img'
        self.createFolder(paradir)

        # getFrame 함수를 통해 영상을 프레임별로 나눔
        sec = 0
        frameRate = 0.1  # //it will capture image in each 0.5 second
        count = 100
        success = self.getFrame(sec, vidcap, viddir, count)
        while success:
            count = count + 1
            sec = sec + frameRate
            sec = round(sec, 2)
            success = self.getFrame(sec, vidcap, viddir, count)

        self.predictor = HumanPosePredictor(hg8(pretrained=True),
                                            device='cuda')
        print("==model loaded==")
        # ...load image of a person into a PyTorch tensor...

        name = ""
        predictor = HumanPosePredictor(hg8(pretrained=True), device='cuda')
        model = xgb.XGBClassifier(max_depth=3,
                                  learning_rate=0.1,
                                  n_estimators=100,
                                  silent=True,
                                  objective='binary:logistic',
                                  booster='gbtree',
                                  n_jobs=1,
                                  nthread=None,
                                  gamma=0,
                                  min_child_weight=1,
                                  max_delta_step=0,
                                  subsample=1,
                                  colsample_bytree=1,
                                  colsample_bylevel=1,
                                  reg_alpha=0,
                                  reg_lambda=1,
                                  scale_pos_weight=1,
                                  base_score=0.5,
                                  random_state=0,
                                  seed=None,
                                  missing=None)
        model.load_model(viddir + 'xgboost.bst')  # load model
        g = Gaussian1DKernel(stddev=4)

        print("==model loaded==")

        RGB_MEAN = torch.as_tensor([0.4404, 0.4440, 0.4327])
        RGB_STDDEV = torch.as_tensor([0.2458, 0.2410, 0.2468])

        images = os.listdir("./" + name)
        print(images)
        images = sorted(images, key=lambda x: int(x.split(".")[0]))
        print("frames : ", len(images))

        result = {"name": name, "frames": dict()}

        threshold = 0.4
        img_array = list()
        testResult = list()
        for i in images:
            orgImg = Image.open("./" + name + i)
            #orgImg = orgImg.rotate(270, expand=True)
            print("!precessing " + str(i))

            try:
                for orientation in ExifTags.TAGS.keys():
                    if ExifTags.TAGS[orientation] == 'Orientation': break
                exif = dict(orgImg._getexif().items())
                if exif[orientation] == 3:
                    orgImg = orgImg.rotate(180, expand=True)
                elif exif[orientation] == 6:
                    orgImg = orgImg.rotate(270, expand=True)
                elif exif[orientation] == 8:
                    orgImg = orgImg.rotate(90, expand=True)
            except:
                pass

            im = np.asarray(orgImg)
            idx = int(i[:-4])

            img = torch.tensor(im).transpose(0, 2)
            img = color_normalize(img, RGB_MEAN, RGB_STDDEV)
            joints = predictor.estimate_joints(img, flip=True)
            xs, ys = list(joints[:, 0].numpy()), list(joints[:, 1].numpy())

            angles = self.predictFromjoints(joints)

            prob = model.predict_proba(np.asarray(angles).reshape(1, 4))[0][1]
            testResult.append(prob)
            smoothing = convolve(testResult[:], g)
            #smoothing = selfG.smoothListGaussian(testResult[:])

            ascending = True
            descending = False
            count = 0
            for p in range(1, len(smoothing) - 1):
                if ((smoothing[p] < threshold
                     and smoothing[p + 1] >= threshold) and ascending):
                    ascending = False
                    descending = True
                elif ((smoothing[p] > threshold
                       and smoothing[p + 1] <= threshold) and descending):
                    ascending = True
                    descending = False
                    count += 1

            orgImg = np.array(orgImg)
            height, width, layers = orgImg.shape
            size = (width, height)
            img_array.append(cv2.cvtColor(orgImg, cv2.COLOR_BGR2RGB))
            # orgImg = cv2.line(orgImg, (ys[0], xs[0]), (ys[1], xs[1]), (10, 255, 0), 2)
            # orgImg = cv2.line(orgImg, (ys[1], xs[1]), (ys[2], xs[2]), (50, 160, 50), 2)
            # orgImg = cv2.line(orgImg, (ys[5], xs[5]), (ys[4], xs[4]), (50, 0, 255), 2)
            # orgImg = cv2.line(orgImg, (ys[4], xs[4]), (ys[3], xs[3]), (255, 0, 0), 2)
            # orgImg = cv2.line(orgImg, (ys[3], xs[3]), (ys[6], xs[6]), (255, 0, 0), 2)
            # orgImg = cv2.line(orgImg, (ys[6], xs[6]), (ys[2], xs[2]), (30, 30, 130), 2)
            orgImg = cv2.line(orgImg, (ys[6], xs[6]), (ys[7], xs[7]),
                              (153, 255, 255), 3)  # 등
            # orgImg = cv2.line(orgImg, (ys[7], xs[7]), (ys[8], xs[8]), (255, 0, 0), 2)
            # orgImg = cv2.line(orgImg, (ys[8], xs[8]), (ys[9], xs[9]), (255, 0, 0), 2)
            orgImg = cv2.line(orgImg, (ys[10], xs[10]), (ys[11], xs[11]),
                              (153, 255, 255), 3)  # 왼쪽전완
            orgImg = cv2.line(orgImg, (ys[11], xs[11]), (ys[12], xs[12]),
                              (153, 255, 255), 3)
            orgImg = cv2.line(orgImg, (ys[12], xs[12]), (ys[7], xs[7]),
                              (153, 255, 255), 3)  # 왼쪽광배
            orgImg = cv2.line(orgImg, (ys[14], xs[14]), (ys[13], xs[13]),
                              (153, 255, 255), 3)  # 오른쪽 이두
            orgImg = cv2.line(orgImg, (ys[7], xs[7]), (ys[13], xs[13]),
                              (153, 255, 255), 3)  # 오른쪽 광배
            orgImg = cv2.line(orgImg, (ys[14], xs[14]), (ys[15], xs[15]),
                              (153, 255, 255), 3)  # 오른쪽 전완
            if (len(smoothing) > 0):
                orgImg = cv2.putText(orgImg, str(smoothing[-1]), (30, 30),
                                     cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0),
                                     2, cv2.LINE_AA)
                orgImg = cv2.putText(orgImg, "count : " + str(count), (30, 75),
                                     cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0),
                                     2, cv2.LINE_AA)

            #cv2.imshow('image', cv2.cvtColor(orgImg, cv2.COLOR_BGR2RGB))

            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

        out = cv2.VideoWriter('result.avi', cv2.VideoWriter_fourcc(*'mp4v'),
                              30, size)
        for i in range(len(img_array)):
            out.write(img_array[i])
        out.release()

        print("done!")

        cv2.destroyAllWindows()
 def __init__(self):
     self.predictor = HumanPosePredictor(hg8(pretrained=True), device='cpu')
     print("==model loaded==")