Example #1
0
    def vp3d_model(self):
        
        clip_df = interpolate(self.clip_df,interpolate_feet=False)
        
        clip_df =  delete_nans(clip_df)
        multiplier = round(800/224,2)
        clip_df =  rescale_keypoints(clip_df,multiplier)

        actions, poses = fetch_keypoints(clip_df)
        classes = 8

        chk_filename = os.path.join(CHECKPATH,"Recipe-2-epoch-19.pth")
        model = build_model(chk_filename, in_joints, in_dims, out_joints, filter_widths, True, channels, embedding_len,classes)
        
        pretrained = torch.load('../../virtual_trainer/Virtual_trainer/checkpoint/combinedlearning2-5.pth')
        model.load_state_dict(pretrained['model_state_dict'])

        with torch.no_grad():
            model.eval() 
            if torch.cuda.is_available():
                model = model.cuda()
                # poses = poses.cuda()
            try:
                poses = np.concatenate(poses)
            except ValueError:
                self.prediction = "No human detected"
                return self
            poses = np.pad(poses,((54,0),(0,0),(0,0)),'edge')
            poses = torch.Tensor(np.expand_dims(poses,axis=0)).cuda()
            # print(f'Poses shape: {poses.shape}')
            embeds, preds = model(poses)
            kp_3d = model.transform.get_kp()
            n_frames = kp_3d.shape[1]
            kp_3d *= np.array([1,-1,1])
            kp_3d = kp_3d.reshape(-1)
            # print(f'Preds shape:{preds.shape}')
            # print(preds)
            softmax = torch.nn.Softmax(1)
            pred= softmax(preds)
            pred = pred.detach().cpu().numpy().squeeze()
            print(pred)
            preds = np.argmax(pred,axis=1)
            print(preds)
            values, counts = np.unique(preds,return_counts=True)
            # print(values)
            # print(counts)
            ind = np.argmax(counts)
            print(EXC_DICT[values[ind]])
            # msgbox(f'Predicted exercise: {EXC_DICT[values[ind]]}','Result')
            self.prediction = EXC_DICT[values[ind]]
            print(self.prediction)
            return kp_3d, n_frames
Example #2
0
    def vp3d_recipe2(self):
        
        clip_df = interpolate(self.clip_df,interpolate_feet=False)
        
        clip_df =  delete_nans(clip_df)
        multiplier = round(800/224,2)
        clip_df =  rescale_keypoints(clip_df,multiplier)

        actions, poses = fetch_keypoints(clip_df)
        classes = 8

        
        chk_filename = os.path.join(DATAPOINT,'BaseModels', 'epoch_45.bin')
        pretrained_weights = torch.load(chk_filename, map_location=lambda storage, loc: storage)


        model = NaiveBaselineModel(in_joints, in_dims, out_joints, filter_widths, pretrained_weights, embedding_len, classes,
                                    causal=True, dropout=0.25, channels=channels)
        receptive_field = model.base_model.receptive_field()
        pad = (receptive_field - 1) 
        causal_shift = pad
        chk_filename = os.path.join(CHECKPATH,"Recipe-2-epoch-19.pth")
        checkp = torch.load('/home/artursil/Documents/virtual_trainer/Virtual_trainer/checkpoint/Recipe-2-epoch-19.pth')
        # checkp = torch.load('/home/artursil/Documents/virtual_trainer/Virtual_trainer/checkpoint/model-6.pth')
        checkp['model_state_dict']
        model.load_state_dict(checkp['model_state_dict'])

        model_rank =  SimpleRegression([128,64,32])
        chk_filename = os.path.join(CHECKPATH,"regressor-simple-regressor-grouped-512-750.pth")
        model_rank.load_state_dict(torch.load(chk_filename)['model_state_dict'])
        with torch.no_grad():
            model.eval() 
            if torch.cuda.is_available():
                model = model.cuda()
                model_rank = model_rank.cuda()
                # poses = poses.cuda()
            try:
                poses = np.concatenate(poses)
            except ValueError:
                self.prediction = "No human detected"
                return self
            poses = np.pad(poses,((54,0),(0,0),(0,0)),'edge')
            poses = torch.Tensor(np.expand_dims(poses,axis=0)).cuda()
            # print(f'Poses shape: {poses.shape}')
            # embeds, preds = model(poses)
            preds = model(poses)
            softmax = torch.nn.Softmax(1)
            pred= softmax(preds)
            pred = pred.detach().cpu().numpy().squeeze()
            print(pred)
            preds = np.argmax(pred,axis=0)
            print(preds)
            values, counts = np.unique(preds,return_counts=True)
            print(values)
            print(counts)
            ind = np.argmax(counts)
            print(values[ind])
            print(EXC_DICT[values[ind]])
            self.prediction = EXC_DICT[values[ind]]

        ######
        chk_filename = os.path.join(CHECKPATH,"Recipe-2-epoch-19.pth")
        model = build_model(chk_filename, in_joints, in_dims, out_joints, filter_widths, True, channels, embedding_len,classes)
        with torch.no_grad():
            model.eval() 
            if torch.cuda.is_available():
                model = model.cuda()
                model_rank = model_rank.cuda()
                # poses = poses.cuda()
            embeds,preds = model(poses)
            softmax = torch.nn.Softmax(1)
            pred= softmax(preds)
            pred = pred.detach().cpu().numpy().squeeze()
            print(pred)
            preds = np.argmax(pred,axis=1)
            print(preds)
            values, counts = np.unique(preds,return_counts=True)
            print(values)
            print(counts)
            ind = np.argmax(counts)
            print(values[ind])
            print(EXC_DICT[values[ind]])
            self.prediction = EXC_DICT[values[ind]]
            



        ######

            # ratings=model_rank(embeds).detach().detach().cpu().numpy()
            # self.rating = np.mean(ratings)
            return self
Example #3
0
    def vp3d_recipe2(self):
        clip_df = interpolate(self.clip_df, interpolate_feet=False)

        clip_df = delete_nans(clip_df)
        multiplier = round(800 / 224, 2)
        clip_df = rescale_keypoints(clip_df, multiplier)

        actions, poses = fetch_keypoints(clip_df)
        # classes = 8

        # chk_filename = os.path.join(DATAPOINT,'BaseModels', 'epoch_45.bin')
        # pretrained_weights = torch.load(chk_filename, map_location=lambda storage, loc: storage)

        # model = NaiveBaselineModel(in_joints, in_dims, out_joints, filter_widths, pretrained_weights, embedding_len, classes,
        #                             causal=True, dropout=0.25, channels=channels)
        # receptive_field = model.base_model.receptive_field()
        # pad = (receptive_field - 1)
        # causal_shift = pad
        # chk_filename = os.path.join(CHECKPATH,"Recipe-2-epoch-19.pth")
        # checkp = torch.load(chk_filename)
        # model.load_state_dict(checkp['model_state_dict'])

        # model_embs = HeadlessNet2(copy.deepcopy(model))
        # model_rank =  SimpleRegression([128,64,32])
        # chk_filename = os.path.join(CHECKPATH,"regressor-simple-regressor-grouped-recipe2-512-600.pth")
        # model_rank.load_state_dict(torch.load(chk_filename)['model_state_dict'])
        model = self.class_model
        model_embs = self.model_embs
        model_rank = self.model_rank
        with torch.no_grad():
            model.eval()
            model_rank.eval()
            model_embs.eval()
            if torch.cuda.is_available():
                model = model.cuda()
                model_embs = model_embs.cuda()
                model_rank = model_rank.cuda()
            try:
                poses = np.concatenate(poses)
            except ValueError:
                self.prediction = "No human detected"
                # self.rating = [{'x':1,'y':9},
                #                 {'x':2,'y':8}]
                self.rating = None
                return self
            poses = np.pad(poses, ((54, 0), (0, 0), (0, 0)), 'edge')
            poses = torch.Tensor(np.expand_dims(poses, axis=0)).cuda()
            preds = model(poses)
            embeds = model_embs(poses)
            embeds = embeds.permute(0, 2, 1)
            softmax = torch.nn.Softmax(1)
            pred = softmax(preds)
            pred = pred.detach().cpu().numpy().squeeze()
            preds = np.argmax(pred, axis=0)
            print(preds)
            values, counts = np.unique(preds, return_counts=True)
            clip_length = len(preds)
            ind = np.argmax(counts)
            print(counts[values == 7] / clip_length)
            other_count = counts[values == 7] / clip_length
            if other_count < 0.4 and values[ind] == 6:
                values = values[values != 6]
                counts = counts[values != 6]
                ind = np.argmax(counts)
            if counts[values == 7] / clip_length > 0.4:
                print('0.15 or more')
                self.prediction = 'Cleanandjerk'
            else:
                self.prediction = EXC_DICT[values[ind]]
            self.img_q.put(self.prediction)
            ratings = model_rank(embeds).detach().detach().cpu().numpy()
            self.rating = ratings.tolist()
            self.rating = [{
                'x': x,
                'y': y[0]
            } for x, y in enumerate(self.rating[0])]
            # print(self.rating)
            self.clip_df_tmp = pd.DataFrame()
            self.clip_df = pd.DataFrame()
            self.new_pred = True
            return self
Example #4
0
def main(url, max_duration=6):
    if url.find('instagram.com/p/') > -1:
        print(url.split('/p/')[-1])
        url = ExerciseScraper.get_new_url(
            url.split('/p/')[-1].replace('/', ''))
    print(url)
    path = '/tmp/'
    weight_name = './openpose/weights/openpose_mpii_best.pth.tar'
    model = get_model('vgg19')
    model.load_state_dict(torch.load(weight_name)['state_dict'])
    model = torch.nn.DataParallel(model)
    model = model.cuda()
    model.float()
    dataset = VideosDataset.from_url(url, path, max_duration)

    clip_df = pd.DataFrame()

    with torch.no_grad():
        model.eval()
        batch = dataset[0]
        X_full, orig_images_full, _ = batch
        for ix in range(X_full.shape[0]):
            X = X_full[ix, :, :, :].unsqueeze(0).to(DEVICE)
            img = orig_images_full[ix, :, :, :]

            predicted_outputs, _ = model(X)

            output1, output2 = predicted_outputs[-2], predicted_outputs[-1]
            output = output2.detach().cpu().numpy()

            frame = img.copy()
            frameWidth = frame.shape[1]
            frameHeight = frame.shape[0]

            detected_keypoints = []
            keypoints_list = np.zeros((0, 3))
            keypoint_id = 0

            for part in range(NPOINTS):
                probMap = output[0, part, :, :]
                probMap = cv2.resize(probMap, (frame.shape[1], frame.shape[0]))

                keypoints = getKeypoints(probMap, THRESHOLD)
                keypoints_with_id = []
                for i in range(len(keypoints)):
                    keypoints_with_id.append(keypoints[i] + (keypoint_id, ))
                    keypoints_list = np.vstack([keypoints_list, keypoints[i]])
                    keypoint_id += 1

                detected_keypoints.append(keypoints_with_id)

            frameClone = frame.copy()

            valid_pairs, invalid_pairs = getValidPairs(
                np.expand_dims(output1[0].detach().cpu().numpy(), axis=0),
                detected_keypoints, frameWidth, frameHeight)
            personwiseKeypoints = getPersonwiseKeypoints(
                valid_pairs, invalid_pairs, keypoints_list)

            spotter = False
            if len(personwiseKeypoints) > 1:
                spotter = detect_spotters(personwiseKeypoints)

            if len(personwiseKeypoints) > 1:
                personwiseKeypoints = more_keypoints(personwiseKeypoints, 6)

            if len(personwiseKeypoints) > 1:
                personwiseKeypoints = bigger_person(personwiseKeypoints,
                                                    keypoints_list, frameWidth,
                                                    frameHeight)

            if len(personwiseKeypoints) > 1:
                personwiseKeypoints = head_middle(personwiseKeypoints,
                                                  keypoints_list, frameWidth,
                                                  frameHeight)

            if len(personwiseKeypoints) > 0:
                final_keypoints = {}
                num_keypoints = 0
                for k, x in enumerate(
                        personwiseKeypoints[0][:len(personwiseKeypoints[0]) -
                                               1]):
                    final_keypoints[
                        f"{k}_0"] = np.nan if x == -1 else keypoints_list[int(
                            x)][:2][0]
                    final_keypoints[
                        f"{k}_1"] = np.nan if x == -1 else keypoints_list[int(
                            x)][:2][1]
                    if x != -1:
                        num_keypoints += 1
            else:
                final_keypoints = {}
                for k in range(15):
                    final_keypoints[f"{k}_0"] = np.nan
                    final_keypoints[f"{k}_1"] = np.nan
                num_keypoints = 0

            df_dict = {
                "vid_nr": ix,
                "filename": url,
                "clip_id": 0,
                "target": 0,
                "num_keypoints": num_keypoints,
                "spotter": spotter
            }
            df_dict.update(final_keypoints)
            clip_df = clip_df.append(pd.DataFrame(df_dict, index=[0]),
                                     ignore_index=True)

            pic_key = picture_keypoints(personwiseKeypoints, keypoints_list,
                                        frameClone)
            #    plt.imshow(pic_key)
            cv2.imshow('window', cv2.resize(pic_key, (640, 360)))
            if cv2.waitKey(25) & 0xFF == ord('q'):
                cv2.destroyAllWindows()
                break
        clip_df = interpolate(clip_df, interpolate_feet=False)
    clip_df = delete_nans(clip_df)
    multiplier = round(800 / 224, 2)
    clip_df = rescale_keypoints(clip_df, multiplier)

    actions, poses = fetch_keypoints(clip_df)
    classes = 8
    pretrained_weights = torch.load(chk_filename,
                                    map_location=lambda storage, loc: storage)
    eval_model = NaiveBaselineModel(in_joints,
                                    in_dims,
                                    out_joints,
                                    filter_widths,
                                    pretrained_weights,
                                    embedding_len,
                                    classes,
                                    causal=True,
                                    dropout=0.25,
                                    channels=channels)
    receptive_field = eval_model.base_model.receptive_field()
    pad = (receptive_field - 1)
    causal_shift = pad

    checkp = torch.load(
        '/home/artursil/Documents/virtual_trainer/Virtual_trainer/checkpoint/Recipe-2-epoch-19.pth'
    )
    # checkp = torch.load('/home/artursil/Documents/virtual_trainer/Virtual_trainer/checkpoint/model-6.pth')
    checkp['model_state_dict']
    eval_model.load_state_dict(checkp['model_state_dict'])

    # generator = SimpleSequenceGenerator(batch_size,actions,poses,pad=pad,causal_shift=causal_shift,test_split=split_ratio)
    with torch.no_grad():
        eval_model.eval()
        if torch.cuda.is_available():
            eval_model = eval_model.cuda()
            # poses = poses.cuda()
        poses = np.concatenate(poses)
        poses = np.pad(poses, ((54, 0), (0, 0), (0, 0)), 'edge')
        poses = torch.Tensor(np.expand_dims(poses, axis=0)).cuda()
        pred = eval_model(poses)
        softmax = torch.nn.Softmax(1)
        pred = softmax(pred)
        pred = pred.detach().cpu().numpy().squeeze()
        print(pred)
        preds = np.argmax(pred, axis=0)
        print(preds)
        values, counts = np.unique(preds, return_counts=True)
        print(values)
        print(counts)
        ind = np.argmax(counts)
        print(EXC_DICT[values[ind]])
        msgbox(f'Predicted exercise: {EXC_DICT[values[ind]]}', 'Result')
Example #5
0
def openpose_main(model):

    clip_df = pd.DataFrame()
    with torch.no_grad():
        model.eval()
        ix = 0
        while True:
            st = time.time()
            img_resp = requests.get(url)
            print(time.time() - st)
            img_arr = np.array(bytearray(img_resp.content), dtype=np.uint8)
            img = cv2.imdecode(img_arr, -1)
            img, orig_img = VideoScraper.transform_single_frame(img, 220)

            X = torch.Tensor(img).unsqueeze(0).to(DEVICE)
            img = orig_img

            predicted_outputs, _ = model(X)

            output1, output2 = predicted_outputs[-2], predicted_outputs[-1]
            output = output2.detach().cpu().numpy()

            frame = img.copy()
            frameWidth = frame.shape[1]
            frameHeight = frame.shape[0]

            detected_keypoints = []
            keypoints_list = np.zeros((0, 3))
            keypoint_id = 0

            for part in range(NPOINTS):
                probMap = output[0, part, :, :]
                probMap = cv2.resize(probMap, (frame.shape[1], frame.shape[0]))

                keypoints = getKeypoints(probMap, THRESHOLD)
                keypoints_with_id = []
                for i in range(len(keypoints)):
                    keypoints_with_id.append(keypoints[i] + (keypoint_id, ))
                    keypoints_list = np.vstack([keypoints_list, keypoints[i]])
                    keypoint_id += 1

                detected_keypoints.append(keypoints_with_id)

            frameClone = frame.copy()

            valid_pairs, invalid_pairs = getValidPairs(
                np.expand_dims(output1[0].detach().cpu().numpy(), axis=0),
                detected_keypoints, frameWidth, frameHeight)
            personwiseKeypoints = getPersonwiseKeypoints(
                valid_pairs, invalid_pairs, keypoints_list)

            spotter = False
            if len(personwiseKeypoints) > 1:
                spotter = detect_spotters(personwiseKeypoints)

            if len(personwiseKeypoints) > 1:
                personwiseKeypoints = more_keypoints(personwiseKeypoints, 6)

            if len(personwiseKeypoints) > 1:
                personwiseKeypoints = bigger_person(personwiseKeypoints,
                                                    keypoints_list, frameWidth,
                                                    frameHeight)

            if len(personwiseKeypoints) > 1:
                personwiseKeypoints = head_middle(personwiseKeypoints,
                                                  keypoints_list, frameWidth,
                                                  frameHeight)

            if len(personwiseKeypoints) > 0:
                final_keypoints = {}
                num_keypoints = 0
                for k, x in enumerate(
                        personwiseKeypoints[0][:len(personwiseKeypoints[0]) -
                                               1]):
                    final_keypoints[
                        f"{k}_0"] = np.nan if x == -1 else keypoints_list[int(
                            x)][:2][0]
                    final_keypoints[
                        f"{k}_1"] = np.nan if x == -1 else keypoints_list[int(
                            x)][:2][1]
                    if x != -1:
                        num_keypoints += 1
            else:
                final_keypoints = {}
                for k in range(15):
                    final_keypoints[f"{k}_0"] = np.nan
                    final_keypoints[f"{k}_1"] = np.nan
                num_keypoints = 0

            df_dict = {
                "vid_nr": ix,
                "filename": url,
                "clip_id": 0,
                "target": 0,
                "num_keypoints": num_keypoints,
                "spotter": spotter
            }
            df_dict.update(final_keypoints)
            clip_df = clip_df.append(pd.DataFrame(df_dict, index=[0]),
                                     ignore_index=True)

            pic_key = picture_keypoints(personwiseKeypoints, keypoints_list,
                                        frameClone)
            #    plt.imshow(pic_key)
            #cv2.imshow('window',cv2.resize(pic_key,(640,360)))
            ret, jpeg = cv2.imencode('.jpg', pic_key)
            return jpeg.tobytes()

            ix += 1
            print(ix)
            if cv2.waitKey(25) & 0xFF == ord('q'):
                cv2.destroyAllWindows()
                break
        clip_df = interpolate(clip_df, interpolate_feet=False)
    clip_df = delete_nans(clip_df)
    multiplier = round(800 / 224, 2)
    clip_df = rescale_keypoints(clip_df, multiplier)

    actions, poses = fetch_keypoints(clip_df)
    classes = 8
    pretrained_weights = torch.load(chk_filename,
                                    map_location=lambda storage, loc: storage)
    eval_model = NaiveBaselineModel(in_joints,
                                    in_dims,
                                    out_joints,
                                    filter_widths,
                                    pretrained_weights,
                                    embedding_len,
                                    classes,
                                    causal=True,
                                    dropout=0.25,
                                    channels=channels)
    receptive_field = eval_model.base_model.receptive_field()
    pad = (receptive_field - 1)
    causal_shift = pad

    checkp = torch.load(
        '/home/artursil/Documents/virtual_trainer/Virtual_trainer/checkpoint/Recipe-2-epoch-19.pth'
    )
    # checkp = torch.load('/home/artursil/Documents/virtual_trainer/Virtual_trainer/checkpoint/model-6.pth')
    checkp['model_state_dict']
    eval_model.load_state_dict(checkp['model_state_dict'])

    # generator = SimpleSequenceGenerator(batch_size,actions,poses,pad=pad,causal_shift=causal_shift,test_split=split_ratio)
    with torch.no_grad():
        eval_model.eval()
        if torch.cuda.is_available():
            eval_model = eval_model.cuda()
            # poses = poses.cuda()
        try:
            poses = np.concatenate(poses)
        except ValueError:
            msgbox(f'There is no human in the video !!!')
            raise ValueError(
                'clip_df is empty because openpose hasnt detected a person')

        poses = np.pad(poses, ((54, 0), (0, 0), (0, 0)), 'edge')
        poses = torch.Tensor(np.expand_dims(poses, axis=0)).cuda()
        pred = eval_model(poses)
        softmax = torch.nn.Softmax(1)
        pred = softmax(pred)
        pred = pred.detach().cpu().numpy().squeeze()
        print(pred)
        preds = np.argmax(pred, axis=0)
        print(preds)
        values, counts = np.unique(preds, return_counts=True)
        print(values)
        print(counts)
        ind = np.argmax(counts)
        print(EXC_DICT[values[ind]])
        msgbox(f'Predicted exercise: {EXC_DICT[values[ind]]}', 'Result')