if np.isfinite(chunk).all():
                        output[subject][name] = np.array(
                            chunk, dtype='float32') / 1000
                        output_2d[subject][name] = list(
                            np.array(all_chunks_2d[i],
                                     dtype='float32').transpose(1, 0, 2, 3))
                    chunk_indices.append(
                        (current_index, next_index, np.isfinite(chunk).all(),
                         split, name))
                    current_index = next_index
                assert current_index == index[subject][canonical_name][1]
                if canonical_name not in frame_mapping[subject_name]:
                    frame_mapping[subject_name][canonical_name] = []
                frame_mapping[subject_name][canonical_name] += chunk_indices

        metadata = suggest_metadata('humaneva' + str(num_joints))
        output_filename = 'data_3d_' + metadata['layout_name']
        output_prefix_2d = 'data_2d_' + metadata['layout_name'] + '_'

        if args.convert_3d:
            print('Saving...')
            np.savez_compressed(output_filename, positions_3d=output)
            np.savez_compressed(output_prefix_2d + 'gt',
                                positions_2d=output_2d,
                                metadata=metadata)
            print('Done.')

    else:
        print('Please specify the dataset source')
        exit(0)
Пример #2
0
                        type=str,
                        metavar='PATH',
                        help='input path to 2D detections')
    parser.add_argument(
        '-o',
        '--output',
        default='detectron_pt_coco',
        type=str,
        metavar='PATH',
        help='output suffix for 2D detections (e.g. detectron_pt_coco)')

    args = parser.parse_args()

    if not args.input:
        print('Please specify the input directory')
        exit(0)

    # according to output name,generate some format. we use detectron
    import_func = suggest_pose_importer('detectron_pt_coco')
    metadata = suggest_metadata('detectron_pt_coco')

    print('Parsing 2D detections from', args.input)
    keypoints = import_func(args.input)

    output = keypoints.astype(np.float32)
    # 生成的数据用于后面的3D检测
    np.savez_compressed(output_prefix_2d + 'test' + args.output,
                        positions_2d=output,
                        metadata=metadata)
    print('npz name is ', output_prefix_2d + 'test' + args.output)
             current_index = index[subject][canonical_name][0]
             chunk_indices = []
             for i, chunk in enumerate(all_chunks):
                 next_index = current_index + len(chunk)
                 name = canonical_name + ' chunk' + str(i)
                 if np.isfinite(chunk).all():
                     output[subject][name] = np.array(chunk, dtype='float32') / 1000
                     output_2d[subject][name] = list(np.array(all_chunks_2d[i], dtype='float32').transpose(1, 0, 2, 3))
                 chunk_indices.append((current_index, next_index, np.isfinite(chunk).all(), split, name))
                 current_index = next_index
             assert current_index == index[subject][canonical_name][1]
             if canonical_name not in frame_mapping[subject_name]:
                 frame_mapping[subject_name][canonical_name] = []
             frame_mapping[subject_name][canonical_name] += chunk_indices
     
     metadata = suggest_metadata('humaneva' + str(num_joints))
     output_filename = 'data_3d_' + metadata['layout_name']
     output_prefix_2d = 'data_2d_' + metadata['layout_name'] + '_'
     
     if args.convert_3d:
         print('Saving...')
         np.savez_compressed(output_filename, positions_3d=output)
         np.savez_compressed(output_prefix_2d + 'gt', positions_2d=output_2d, metadata=metadata)
         print('Done.')
     
 else:
     print('Please specify the dataset source')
     exit(0)
     
 if args.convert_2d:
     if not args.output:
        
    parser = argparse.ArgumentParser(description='Custom dataset creator')
    parser.add_argument('-i', '--input', type=str, default='', metavar='PATH', help='detections directory')
    parser.add_argument('-o', '--output', type=str, default='', metavar='PATH', help='output suffix for 2D detections')
    args = parser.parse_args()
    
    if not args.input:
        print('Please specify the input directory')
        exit(0)
        
    if not args.output:
        print('Please specify an output suffix (e.g. detectron_pt_coco)')
        exit(0)
    
    print('Parsing 2D detections from', args.input)
    
    metadata = suggest_metadata('coco')
    metadata['video_metadata'] = {}
    
    output = {}
    file_list = glob(args.input + '/*.npz')
    for f in file_list:
        canonical_name = os.path.splitext(os.path.basename(f))[0]
        data, video_metadata = decode(f)
        output[canonical_name] = {}
        output[canonical_name]['custom'] = [data[0]['keypoints'].astype('float32')]
        metadata['video_metadata'][canonical_name] = video_metadata

    print('Saving...')
    np.savez_compressed(output_prefix_2d + args.output, positions_2d=output, metadata=metadata)
    print('Done.')
    
    parser.add_argument('-i', '--input', default='', type=str, metavar='PATH', help='input path to 2D detections')
    parser.add_argument('-o', '--output', default='', type=str, metavar='PATH', help='output suffix for 2D detections (e.g. detectron_pt_coco)')
    
    args = parser.parse_args()
        
    if not args.input:
        print('Please specify the input directory')
        exit(0)
        
    if not args.output:
        print('Please specify an output suffix (e.g. detectron_pt_coco)')
        exit(0)

    import_func = suggest_pose_importer(args.output)
    metadata = suggest_metadata(args.output)

    print('Parsing 2D detections from', args.input)

    output = {}
    file_list = glob(args.input + '/S*/*.mp4.npz')
    for f in file_list:
        path, fname = os.path.split(f)
        subject = os.path.basename(path)
        assert subject.startswith('S'), subject + ' does not look like a subject directory'

        if '_ALL' in fname:
            continue
        
        m = re.search('(.*)\\.([0-9]+)\\.mp4\\.npz', fname)
        action = m.group(1)
    
    parser.add_argument('-i', '--input', default='', type=str, metavar='PATH', help='input path to 2D detections')
    parser.add_argument('-o', '--output', default='', type=str, metavar='PATH', help='output suffix for 2D detections (e.g. detectron_pt_coco)')
    
    args = parser.parse_args()
        
    if not args.input:
        print('Please specify the input directory')
        exit(0)
        
    if not args.output:
        print('Please specify an output suffix (e.g. detectron_pt_coco)')
        exit(0)

    import_func = suggest_pose_importer(args.output)
    metadata = suggest_metadata(args.output)

    print('Parsing 2D detections from', args.input)

    output = {}
    file_list = glob(args.input + '/S*/*.mp4.npz')
    for f in file_list:
        path, fname = os.path.split(f)
        subject = os.path.basename(path)
        assert subject.startswith('S'), subject + ' does not look like a subject directory'

        if '_ALL' in fname:
            continue
        
        m = re.search('(.*)\\.([0-9]+)\\.mp4\\.npz', fname)
        action = m.group(1)
def main(args):
    #if they passed a directory, get all images
    if os.path.isdir(args.im_or_folder):
        im_list = glob.iglob(args.im_or_folder + '/*.' + args.image_ext)

    #otherwise just used the one passed image
    else:
        im_list = [args.im_or_folder]

    #gets some suggested COCO metadata like keypoint symmetries
    metadata = suggest_metadata('coco')
    '''NOTES
    {} defines an empty dict
    [] defines an empty list
    These are fundamentally different types. A dict is an associative array, a list is a standard array with integral indices.'''

    #set video metadata to empty dict
    metadata['video_metadata'] = {}

    #intialize final output dict to empty dict
    output = {}

    #for each video passed
    for video_name in im_list:
        #get absolute path of output directory
        #out_name = os.path.join(args.output_dir, os.path.basename(video_name))
        out_name = os.path.join(args.output_dir, "data_2d_custom_myvideos")

        #print name of video that we're processing
        print('Processing {}'.format(video_name))

        #intialize arrays to empty
        boxes = []
        segments = []
        keypoints = []

        #iterate over each frame in the vid
        for frame_i, im in enumerate(
                read_video(video_name)
        ):  #enumerate creates array of pairs--index paired with the video name
            #get the time
            t = time.time()

            #run the inference
            kps = posenet.estimate_pose(im)

            #print how long it took to process frame
            print('Frame {} processed in {:.3f}s'.format(
                frame_i,
                time.time() - t))

            has_bbox = False
            '''
            if kps.has('pred_boxes'):
                bbox_tensor = outputs.pred_boxes.tensor.numpy()
                if len(bbox_tensor) > 0:
                    has_bbox = True
                    scores = outputs.scores.numpy()[:, None]
                    bbox_tensor = np.concatenate((bbox_tensor, scores), axis=1)'''
            '''
            else:
                print("No keypts or boxes found in frame, setting keypts and boxes to empty array")
                kps = []
                bbox_tensor = []'''

            #add keypoints (excluding confidence checks) for this frame to array for whole video
            keypoints.append(kps[:, :2])

        #store video resolution in metadata
        this_vid_metadata = {
            'w': im.shape[1],
            'h': im.shape[0],
        }

        #get name of video we're processing
        canonical_name = video_name

        #intialize value keyed at [canonical_name] to empty dict
        output[canonical_name] = {}

        #add keypoint locations to output array
        output[canonical_name]['custom'] = [np.array(keypoints)]

        #add video resolution metadata to output array
        metadata['video_metadata'][canonical_name] = this_vid_metadata

        #print(keypoints)

        #export npz file containing all data retrieved by inference
        np.savez_compressed(out_name, positions_2d=output, metadata=metadata)