def test_split_ret_type(self):
     
     train_idx, val_idx = du.get_train_val_split_from_idx([0, 1, 2], [1])
     
     assert_is_not_none(train_idx)
     assert_is_instance(train_idx, list)
             
     assert_is_not_none(val_idx)
     assert_is_instance(val_idx, list)
    def test_split_ret_type(self):

        train_idx, val_idx = du.get_train_val_split_from_idx([0, 1, 2], [1])

        assert_is_not_none(train_idx)
        assert_is_instance(train_idx, list)

        assert_is_not_none(val_idx)
        assert_is_instance(val_idx, list)
 def test_split_empty(self):
     
     train_idx, val_idx = du.get_train_val_split_from_idx([], [])
     
     assert_is_not_none(train_idx)
     assert_is_instance(train_idx, list)
     assert_equals(len(train_idx), 0)
             
     assert_is_not_none(val_idx)
     assert_is_instance(val_idx, list)
     assert_equals(len(val_idx), 0)
    def test_split_empty(self):

        train_idx, val_idx = du.get_train_val_split_from_idx([], [])

        assert_is_not_none(train_idx)
        assert_is_instance(train_idx, list)
        assert_equals(len(train_idx), 0)

        assert_is_not_none(val_idx)
        assert_is_instance(val_idx, list)
        assert_equals(len(val_idx), 0)
 def test_split_empty_val(self):
     
     train_idx, val_idx = du.get_train_val_split_from_idx([0, 1, 2], [])
     
     assert_list_equal(train_idx, [0, 1, 2])
     assert_list_equal(val_idx, [])
 def test_split_not_iterable_outside_range(self):
     
     train_idx, val_idx = du.get_train_val_split_from_idx(3, [40])
     
     assert_list_equal(train_idx, [0, 1, 2])
     assert_list_equal(val_idx, [40])
 def test_split_not_iterable(self):
     
     train_idx, val_idx = du.get_train_val_split_from_idx(3, [1])
     
     assert_list_equal(train_idx, [0, 2])
     assert_list_equal(val_idx, [1])
 def test_split_outside_range(self):
     
     train_idx, val_idx = du.get_train_val_split_from_idx([0, 1, 2], [4])
     
     assert_list_equal(train_idx, [0, 1, 2])
     assert_list_equal(val_idx, [4])
def nyudv2_to_lmdb(path_mat, dst_prefix, dir_dst, val_list=[]):

    if not os.path.isfile(path_mat):
        raise IOError("Path is not a regular file (%s)" % path_mat)

    _, ext = os.path.splitext(path_mat)

    if ext != ".mat" and ext != ".h5" and ext != ".hdf5":
        raise IOError("Invalid file type, expecting mat/h5/hdf5 file (%s)" % path_mat)

    try:
        data = io.loadmat(path_mat)
    except (ValueError, NotImplementedError):
        data = h5py.File(path_mat)  # support version >= 7.3 matfile HDF5 format
        pass

    lmdb_info = []
    train_idx = None

    for typ in [NYUDV2DataType.IMAGES, NYUDV2DataType.LABELS, NYUDV2DataType.DEPTHS]:

        if typ == NYUDV2DataType.IMAGES:

            dat = [mu.cwh_to_chw(x).astype(np.float) for x in data[typ]]

        elif typ == NYUDV2DataType.LABELS:

            dat = np.expand_dims(data[typ], axis=1).astype(int)
            dat = big_arr_to_arrs(dat)

        elif typ == NYUDV2DataType.DEPTHS:

            dat = np.expand_dims(data[typ], axis=1).astype(np.float)
            dat = big_arr_to_arrs(dat)

        else:
            raise ValueError("unknown NYUDV2DataType")

        if train_idx is None:
            train_idx, val_idx = get_train_val_split_from_idx(len(dat), val_list)
            shuffle(train_idx)
            print (train_idx)

        #     # len(ndarray) same as ndarray.shape[0]
        #     if  len(labels) != len(imgs):
        #         raise ValueError("No. of images != no. of labels. (%d) != (%d)",
        #                          len(imgs), len(labels))
        #
        #     if  len(labels) != len(depths):
        #         raise ValueError("No. of depths != no. of labels. (%d) != (%d)",
        #                          len(depths), len(labels))

        print typ, len(dat), dat[0].shape

        fpath_lmdb = os.path.join(dir_dst, "%s%s_train_lmdb" % (dst_prefix, typ))
        to_lmdb.arrays_to_lmdb([dat[i] for i in train_idx], fpath_lmdb)

        lmdb_info.append((len(train_idx), fpath_lmdb))

        fpath_lmdb = os.path.join(dir_dst, "%s%s_val_lmdb" % (dst_prefix, typ))
        to_lmdb.arrays_to_lmdb([dat[i] for i in val_idx], fpath_lmdb)

        lmdb_info.append((len(val_idx), fpath_lmdb))

    return lmdb_info
Exemple #10
0
    def test_split_empty_val(self):

        train_idx, val_idx = du.get_train_val_split_from_idx([0, 1, 2], [])

        assert_list_equal(train_idx, [0, 1, 2])
        assert_list_equal(val_idx, [])
Exemple #11
0
    def test_split_not_iterable_outside_range(self):

        train_idx, val_idx = du.get_train_val_split_from_idx(3, [40])

        assert_list_equal(train_idx, [0, 1, 2])
        assert_list_equal(val_idx, [40])
Exemple #12
0
    def test_split_not_iterable(self):

        train_idx, val_idx = du.get_train_val_split_from_idx(3, [1])

        assert_list_equal(train_idx, [0, 2])
        assert_list_equal(val_idx, [1])
Exemple #13
0
    def test_split_outside_range(self):

        train_idx, val_idx = du.get_train_val_split_from_idx([0, 1, 2], [4])

        assert_list_equal(train_idx, [0, 1, 2])
        assert_list_equal(val_idx, [4])
Exemple #14
0
def nyudv2_to_lmdb(path_mat, dst_prefix, dir_dst, val_list=None):

    val_list = val_list or []
    if not os.path.isfile(path_mat):
        raise IOError("Path is not a regular file (%s)" % path_mat)

    _, ext = os.path.splitext(path_mat)

    if ext != '.mat' and ext != '.h5' and ext != '.hdf5':
        raise IOError("Invalid file type, expecting mat/h5/hdf5 file (%s)" %
                      path_mat)

    try:
        data = io.loadmat(path_mat)
    except (ValueError, NotImplementedError):
        data = h5py.File(
            path_mat)  # support version >= 7.3 matfile HDF5 format
        pass

    lmdb_info = []
    train_idx = None

    for typ in [
            NYUDV2DataType.IMAGES, NYUDV2DataType.LABELS, NYUDV2DataType.DEPTHS
    ]:

        if typ == NYUDV2DataType.IMAGES:

            dat = [mu.cwh_to_chw(x).astype(np.float) for x in data[typ]]

        elif typ == NYUDV2DataType.LABELS:

            dat = np.expand_dims(data[typ], axis=1).astype(int)
            dat = big_arr_to_arrs(dat)

        elif typ == NYUDV2DataType.DEPTHS:

            dat = np.expand_dims(data[typ], axis=1).astype(np.float)
            dat = big_arr_to_arrs(dat)

        else:
            raise ValueError("unknown NYUDV2DataType")

        if train_idx is None:
            train_idx, val_idx = get_train_val_split_from_idx(
                len(dat), val_list)
            shuffle(train_idx)
            print(train_idx)

    #     # len(ndarray) same as ndarray.shape[0]
    #     if  len(labels) != len(imgs):
    #         raise ValueError("No. of images != no. of labels. (%d) != (%d)",
    #                          len(imgs), len(labels))
    #
    #     if  len(labels) != len(depths):
    #         raise ValueError("No. of depths != no. of labels. (%d) != (%d)",
    #                          len(depths), len(labels))

        print typ, len(dat), dat[0].shape

        fpath_lmdb = os.path.join(dir_dst,
                                  '%s%s_train_lmdb' % (dst_prefix, typ))
        to_lmdb.arrays_to_lmdb([dat[i] for i in train_idx], fpath_lmdb)

        lmdb_info.append((len(train_idx), fpath_lmdb))

        fpath_lmdb = os.path.join(dir_dst, '%s%s_val_lmdb' % (dst_prefix, typ))
        to_lmdb.arrays_to_lmdb([dat[i] for i in val_idx], fpath_lmdb)

        lmdb_info.append((len(val_idx), fpath_lmdb))

    return lmdb_info