Exemple #1
0
    def __init__(self, root, verbose=True):
        """

        :param root: root location
        :param: verbose: {boolean}
        """
        data_root = join(root, 'epfl_campus')
        if not isdir(data_root):
            makedirs(data_root)

        self.verbose = verbose
        self.data_root = data_root

        # download data:
        seq_root = join(data_root, 'CampusSeq1')
        self.seq_root = seq_root
        if not isdir(seq_root):
            seq_zip = join(data_root, 'CampusSeq1.zip')
            if not isfile(seq_zip):
                url = 'http://188.138.127.15:81/Datasets/CampusSeq1.zip'
                if verbose:
                    print('\ndownload ' + url)
                download.download(url, seq_zip)
            if verbose:
                print('\nunzip ' + seq_zip)
            unzip.unzip(seq_zip, data_root, verbose, del_after_unzip=True)

        # gt is taken from here: http://campar.in.tum.de/Chair/MultiHumanPose
        P0 = np.array([[439.06, 180.81, -26.946, 185.95],
                       [-5.3416, 88.523, -450.95, 1324],
                       [0.0060594, 0.99348, -0.11385, 5.227]])
        P1 = np.array([[162.36, -438.34, -17.508, 3347.4],
                       [73.3, -10.043, -443.34, 1373.5],
                       [0.99035, -0.047887, -0.13009, 6.6849]])
        P2 = np.array([[237.58, 679.93, -26.772, -1558.3],
                       [-43.114, 21.982, -713.6, 1962.8],
                       [-0.83557, 0.53325, -0.13216, 11.202]])
        self.Calib = [P0, P1, P2]

        # GT binary file
        actorsGTmat = join(seq_root, 'actorsGT.mat')
        assert isfile(actorsGTmat)
        M = loadmat(actorsGTmat)
        Actor3d = M['actor3D'][0]
        persons = []
        for pid in range(3):
            pts = []
            Person = Actor3d[pid]
            n = len(Person)
            for frame in range(n):
                pose = Person[frame][0]
                if len(pose) == 1:
                    pts.append(None)
                elif len(pose) == 14:
                    pts.append(pose)
                else:
                    raise ValueError("Weird pose length:" + str(pose))

            persons.append(pts)
        self.Y = persons
Exemple #2
0
    def __init__(self, data_root, z_is_up=True, store_binary=False):
        """
        :param data_root: root location for data
        :param z_is_up: if True ensure that z points upwards
        :param store_binary: if True store the extracted video sequence as
            numpy binary for faster access
        """
        assert isdir(data_root)

        root = join(data_root, 'cmu_mocap')
        if not isdir(root):
            makedirs(root)

        subject_folder = join(root, 'all_asfamc/subjects')
        if not isdir(subject_folder):
            print("[CMU MoCap] download file")

            zip_files = ['allasfamc.zip']

            for zip_name in zip_files:
                url = 'http://mocap.cs.cmu.edu/' + zip_name
                zip_file = join(root, zip_name)
                if not isfile(zip_file):
                    print('\t[downloading] ', url)
                    download.download(url, zip_file)
                print('\t[unzipping] ', zip_file)
                unzip.unzip(zip_file, root)

        self.subjects = sorted(listdir(subject_folder))
        self.subject_folder = subject_folder
        self.z_is_up = z_is_up
        self.store_binary = store_binary
    def __init__(self, root='/tmp', url=None, name=None):
        """
            create a new instance of the ReId network
        :param root:
        """
        if url is None:
            url = 'http://188.138.127.15:81/models/reid.h5'
        if name is None:
            name = 'reid.h5'
        if not isdir(root):
            makedirs(root)

        filepath = join(root, name)
        if not isfile(filepath):
            print('could not find model.. downloading it')
            dl.download(url, filepath)

        if keras.__version__.startswith('2.2'):
            warnings.warn(
                "This model only works properly with keras 2.1.3. Weights for other versions might not work properly"
            )

        # ------- build model -------
        seq = Sequential()
        xception = Xception(weights='imagenet',
                            input_shape=(221, 221, 3),
                            include_top=False,
                            pooling='avg')
        seq.add(xception)

        # freeze first layers in pre-trained model
        for layer in xception.layers[0:-20]:
            layer.trainable = False

        input_a = Input(shape=(221, 221, 3))
        input_b = Input(shape=(221, 221, 3))

        out_a = seq(input_a)
        out_b = seq(input_b)

        concatenated = concatenate([out_a, out_b])
        hidden1 = Dense(128, activation='relu', name='dense_1')(concatenated)
        hidden_drp1 = Dropout(0.7)(hidden1)
        hidden2 = Dense(32, activation='relu', name='dense_2')(hidden_drp1)
        hidden_drp2 = Dropout(0.1)(hidden2)
        out = Dense(1, activation='sigmoid', name='dense_3')(hidden_drp2)

        model = Model([input_a, input_b], out)
        print('fp:', filepath)
        model.load_weights(filepath)
        self.model = model
Exemple #4
0
    def __init__(self, root):
        """ files
        :param root:
        """
        assert isdir(root)
        data_root = join(root, 'CAD_120')
        if not isdir(data_root):
            makedirs(data_root)
        self.data_root = data_root

        self.actions = sorted([
            'arranging_objects', 'cleaning_objects', 'having_meal',
            'making_cereal', 'microwaving_food', 'picking_objects',
            'stacking_objects', 'taking_food', 'taking_medicine',
            'unstacking_objects'
        ])

        base_url = 'http://pr.cs.cornell.edu/humanactivities/data/'

        self.subjects = [1, 3, 4, 5]

        # map skeleton representation to joint 3d locs only
        self.items = [0]
        for i in range(11, 155, 14):
            for j in range(0, 4):
                self.items.append(i + j)
        self.items = np.array(self.items + list((range(155, 171))))

        # map our reduced joint + conf to actual 3d data
        items3d = []
        for i in range(1, 61, 4):
            items3d += [i + j for j in range(3)]
        self.items3d = np.array(items3d)

        for pid in self.subjects:
            dir_name = 'Subject%01d_annotations' % pid
            dir_loc = join(data_root, dir_name)

            if not isdir(dir_loc):
                zip_loc = join(data_root, dir_name + '.tar.gz')

                if not isfile(zip_loc):
                    print('download ' + dir_name)
                    url = base_url + dir_name + '.tar.gz'
                    download.download(url, zip_loc)

                # unzip folder
                print('unzip ', zip_loc)
                unzip.unzip(zip_loc, dir_loc)
Exemple #5
0
    def __init__(self, root='/tmp', url=None, name=None):
        """
            create a new instance of the ReId network
        :param root:
        """
        if url is None:
            url = 'http://188.138.127.15:81/models/model_heavy_89acc.h5'
        if name is None:
            name = 'model_heavy_89acc.h5'
        if not isdir(root):
            makedirs(root)

        filepath = join(root, name)
        if not isfile(filepath):
            print('could not find model.. downloading it')
            dl.download(url, filepath)

        self.model = load_model(filepath)
Exemple #6
0
def get(data_root, frame):
    """
    :param data_root:
    :param frame: starting at frame 0
    :return:
    """
    seq_zipname = 'player2sequence1.zip'
    seq_dir = 'Sequence 1'
    player = 2

    root = join(data_root, 'football2')
    root = join(root, 'player' + str(player))
    if not isdir(root):
        makedirs(root)

    seq_url = 'http://www.csc.kth.se/cvap/cvg/MultiViewFootballData/' + seq_zipname
    seq_dir = join(root, seq_dir)

    if not isdir(seq_dir):
        seq_zip = join(root, seq_zipname)
        if not isfile(seq_zip):
            print('downloading... ', seq_url)
            download(seq_url, seq_zip)

        print('unzipping... ', seq_zip)
        unzip(seq_zip, root)

    pos2d_file = join(seq_dir, 'positions2d.txt')
    pos2d = np.loadtxt(pos2d_file)
    N = 14  # number joints
    C = 3  # number cameras
    T = len(pos2d) / 2 / N / C
    assert floor(T) == ceil(T)
    T = int(T)

    pos2d_result = np.zeros((2, N, C, T))
    counter = 0
    for t in range(T):
        for c in range(C):
            for n in range(N):
                for i in range(2):
                    pos2d_result[i, n, c, t] = pos2d[counter]
                    counter += 1
    pos2d = pos2d_result

    # ~~~ pos3d ~~~
    pos3d_file = join(seq_dir, 'positions3d.txt')
    assert isfile(pos3d_file)
    pos3d = np.loadtxt(pos3d_file)
    pos3d_result = np.zeros((3, N, T))
    assert T == int(len(pos3d) / 3 / N)
    counter = 0
    for t in range(T):
        for n in range(N):
            for i in range(3):
                pos3d_result[i, n, t] = pos3d[counter]
                counter += 1
    pos3d = pos3d_result

    # ~~~ Cameras ~~~
    cam_file = join(seq_dir, 'cameras.txt')
    assert isfile(cam_file)
    cams = np.loadtxt(cam_file)
    cameras = np.zeros((2, 4, C, T))
    assert T == int(len(cams) / 2 / 4 / C)

    counter = 0
    for t in range(T):
        for c in range(C):
            for j in range(4):
                for i in range(2):
                    cameras[i, j, c, t] = cams[counter]
                    counter += 1

    Im = []
    h = -1; w = -1
    for cam in ['Camera 1', 'Camera 2', 'Camera 3']:
        im_dir = join(seq_dir, cam)
        assert isdir(im_dir)
        im_name = join(im_dir, "%05d.png" % (frame+1))
        assert isfile(im_name)
        im = cv2.cvtColor(cv2.imread(im_name), cv2.COLOR_BGR2RGB)
        Im.append(im)

        if w == -1 or h == -1:
            assert h == -1 and w == -1
            h, w, _ = im.shape
        else:
            h_, w_, _ = im.shape
            assert h_ == h and w_ == w

    Im = np.array(Im)

    Calib = []
    for cid in [0, 1, 2]:
        cam = np.zeros((3, 4))
        cam[0:2, :] = cameras[:, :, cid, frame]
        cam[2,3] = 1
        Calib.append(AffineCamera(cam, w, h))

    # h x w x cam
    Pts2d = []
    for cid in [0, 1, 2]:
        d2d = pos2d[:,:,cid, frame]
        Pts2d.append(d2d)

    d3d = pos3d[:, :, frame]

    return Im, np.transpose(d3d), Calib
Exemple #7
0
    def __init__(self, root, verbose=True):
        """

        :param root:
        :param verbose:
        """
        if verbose:
            print('\n**Shelf dataset**')
        data_root = join(root, 'tum_shelf')
        if not isdir(data_root):
            makedirs(data_root)

        self.verbose = verbose
        self.data_root = data_root

        # download data
        url = 'http://campar.cs.tum.edu/files/belagian/multihuman/Shelf.tar.bz2'
        data_folder = join(data_root, 'Shelf')
        if not isdir(data_folder):
            zip_filename = join(data_root, 'Shelf.tar.bz2')
            if not isfile(zip_filename):
                if verbose:
                    print('\tdownload ' + url)
                download.download(url, zip_filename)

            if verbose:
                print('\nunzip ' + zip_filename)
                unzip.unzip(zip_filename, data_root, verbose)

        if verbose:
            print('\n')

        # load Calibration data
        seq_root = join(data_root, 'Shelf')
        self.seq_root = seq_root
        calibration_dir = join(seq_root, 'Calibration')
        assert isdir(calibration_dir)

        self.Calib = []
        for cam in ['P0.txt', 'P1.txt', 'P2.txt', 'P3.txt', 'P4.txt']:
            fname = join(calibration_dir, cam)
            assert isfile(fname)
            P = np.loadtxt(fname, delimiter=',')
            self.Calib.append(P)

        # GT binary file
        actorsGTmat = join(seq_root, 'actorsGT.mat')
        assert isfile(actorsGTmat)
        M = loadmat(actorsGTmat)
        Actor3d = M['actor3D'][0]
        persons = []
        for pid in range(4):
            pts = []
            Person = Actor3d[pid]
            n = len(Person)
            for frame in range(n):
                pose = Person[frame][0]
                if len(pose) == 1:
                    pts.append(None)
                elif len(pose) == 14:
                    pts.append(pose)
                else:
                    raise ValueError("Weird pose length:" + str(pose))

            persons.append(pts)
        self.Y = persons
Exemple #8
0
    def __init__(self, root, verbose=True):
        """

        :param root:
        :param verbose:
        """
        EXCLUDE_VID = {'1516'}  # this videos are 'broken'
        if verbose:
            print('\n**PennAction [cropped]**')

        data_root = join(root, 'pennaction_cropped')
        if not isdir(data_root):
            makedirs(data_root)

        url = 'http://188.138.127.15:81/Datasets/penn-crop.zip'

        data_folder = join(data_root, 'penn-crop')
        if not isdir(data_folder):
            zip_filename = join(data_root, 'penn-crop.zip')
            if not isfile(zip_filename):
                if verbose:
                    print('\tdownload ', url)
                download.download(url, zip_filename)

            if verbose:
                print('\tunzip ', zip_filename)
                unzip.unzip(zip_filename, data_root, verbose=verbose)

        self.data_folder = data_folder
        if verbose:
            print('')

        self.frames_folder = join(data_folder, 'frames')
        labels_folder = join(data_folder, 'labels')
        self.labels_folder = labels_folder
        assert isdir(labels_folder)

        ids = [name[0:4] for name in sorted(listdir(labels_folder))]
        self.ids = ids

        # split train/val
        validation_indices_file = join(data_folder, 'valid_ind.txt')
        assert isfile(validation_indices_file)
        validation_indices = np.loadtxt(validation_indices_file)
        validation_indices = ['%04d' % idx for idx in validation_indices]

        lookup = set(validation_indices)
        self.train_ids = []
        self.val_ids = []
        for vid in ids:
            if vid not in lookup and vid not in EXCLUDE_VID:
                self.train_ids.append(vid)
            elif vid not in EXCLUDE_VID:
                self.val_ids.append(vid)

        # find the meta-data for each video id

        self.meta = dict()
        for vid in ids:
            vid_labels_file = join(labels_folder, vid + '.mat')
            L = loadmat(vid_labels_file)
            n_frames = L['nframes']
            dimensions = L['dimensions']
            X = np.expand_dims(L['x'], axis=2)
            Y = np.expand_dims(L['y'], axis=2)
            V = np.expand_dims(L['visibility'], axis=2)
            gt = np.concatenate([X, Y, V], axis=2)

            self.meta[vid] = {
                'n_frames': n_frames[0][0],
                'dimensions': np.squeeze(dimensions),
                'gt': gt
            }
Exemple #9
0
            create a new instance of the ReId network
        :param root:
        """
        
#         webpage = urlopen(req).read()
        if url is None:
            url = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.4/xception_weights_tf_dim_ordering_tf_kernels_notop.h5'
        if name is None:
            name = 'reid.h5'
        if not isdir(root):
            makedirs(root)

        filepath = join(root, name)
        if not isfile(filepath):
            print('could not find model.. downloading it')
            dl.download(url, filepath)

        if keras.__version__.startswith('2.2'):
            warnings.warn(
                "This model only works properly with keras 2.1.3. Weights for other versions might not work properly")

        # ------- build model -------
        seq = Sequential()
        xception = Xception(weights='imagenet', input_shape=(221, 221, 3),
                            include_top=False, pooling='avg')
        seq.add(xception)

        # freeze first layers in pre-trained model
        for layer in xception.layers[0:-20]:
            layer.trainable = False