示例#1
0
def ckpt_up_to_date(local_ckpt_prefix, bucket, folder):
    fm = FileManager(bucket)
    ckpt_names = fm.get_folder_list(folder)
    most_recent_prefix = most_recent_ckpt_from_list(ckpt_names)
    if not local_ckpt_prefix == most_recent_prefix:
        return False
    return True
示例#2
0
def extract_training_data(s3_bucket_name, top_dir):
    fm = FileManager(s3_bucket_name)

    annotation_dir = os.path.join(top_dir, 'annotations')
    frames_dir = os.path.join(top_dir, 'frames')
    
    annotations_names = fm.get_folder_list(annotation_dir, extension_filter='json')
    frame_names = [x for x in fm.get_folder_list(frames_dir) if len(x.split('.')) == 2]
    frame_exts = set([x.split('.')[1] for x in frame_names])
    frame_names = set(frame_names)
    
    data = []
    for filename in annotations_names:
        # if not json file, skip
        if not filename.split('.')[1] == 'json':
            continue
        # strange behavior with os.path.join
        # depending on whether or not looking in s3 bucket
        if s3_bucket_name is not None:
            annotation_filename = annotation_dir + filename
        else:
            annotation_filename = os.path.join(annotation_dir, filename)
        M = get_matrix_from_annotations(s3_bucket_name, annotation_filename)
        if M is not None:
            filename_noext = filename.split('.')[0]
            for ext in frame_exts:
                frame_name = filename_noext + '.' + ext
                if frame_name in frame_names:
                    # same strange behavior 
                    if s3_bucket_name is not None:
                        frame_path_from_bucket = top_dir + '/frames' + frame_name
                    else:
                        frame_path_from_bucket = os.path.join(top_dir, 'frames', frame_name)
                    data.append((frame_path_from_bucket, M))
    
    return data
示例#3
0
def download_ckpt_to_dir(bucket, folder, dest_dir):
    '''
    download most recent ckpt files (index, meta, data) from s3 bucket and directory.
    dest: local folder to put files into
    returns: path/to/ckpt_prefix
    '''

    fm = FileManager(bucket)
    # need .data and .index files (don't necessarily need meta, but will download)
    ckpt_names = fm.get_folder_list(folder)
    most_recent_ckpt_prefix = most_recent_ckpt_from_list(ckpt_names)
    print('Downloading ckpts from s3: {}'.format(most_recent_ckpt_prefix))
    ckpt_file_names = [x for x in ckpt_names if most_recent_ckpt_prefix in x]

    path_and_prefix = dest_dir + '/' + most_recent_ckpt_prefix
    for key in ckpt_file_names:
        dest_filepath = dest_dir + '/' + key
        fm.download_file(folder, key, dest_filepath)
    return path_and_prefix
示例#4
0
from utils.display import Display
from utils.file_manager import FileManager
from utils.warp_tools import *
from utils.rink_specs import HockeyRink
import random
import cv2
fm = FileManager('bsivisiondata')
d = Display()

annotations = fm.get_folder_list('PHI-PIT_6m-8m/annotations',
                                 extension_filter='json')
# random.shuffle(annotations)
for f in annotations:
    print f
    im_dict = fm.read_image_dict('PHI-PIT_6m-8m/annotations', f)
    if not 'warp' in im_dict:
        continue

    imname = f.split('.')[0] + '.png'
    im = fm.read_image_file('PHI-PIT_6m-8m/frames', imname)
    im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)

    H = np.array(im_dict['warp']['M'])
    hr = HockeyRink()

    scaled_H = scale_homography(H, 600, 300)
    H1280 = scale_homography(H, 1280, 720)
    '''
    NEEDED TO BE RESIZING IMAGES BEFORE CALLING WARP!!!
    '''