Esempio n. 1
0
def convert_h5_to_projected(dataset_path, save_filename):

    data = read_h5(dataset_path)

    data_ = {
        'train': {
            '2d':
            np.zeros((data['pose/3d-univ'][()].shape[0], 32, 2),
                     dtype=np.float32),
            '3d':
            np.zeros((data['pose/3d-univ'][()].shape[0], 32, 3),
                     dtype=np.float32),
            'w':
            np.zeros((data['pose/3d-univ'][()].shape[0]), dtype=np.float32),
            'h':
            np.zeros((data['pose/3d-univ'][()].shape[0]), dtype=np.float32)
        }
    }

    for i in tqdm(range(data['pose/3d'][()].shape[0])):
        camera_id = data['camera'][()][i]
        camera_index = [
            k for k in range(len(jnt.CAMERAS))
            if jnt.CAMERAS[k]['id'] == str(camera_id)
        ][0]

        camera = copy.deepcopy(
            [cam for cam in jnt.CAMERAS if cam['id'] == str(camera_id)][0])
        camera['translation'] = np.array(
            jnt.EXTRINSIC_PARAMS['S1'][camera_index]['translation']) / 1000
        camera['orientation'] = np.array(
            jnt.EXTRINSIC_PARAMS['S1'][camera_index]['orientation'])
        camera['center'] = normalize_screen_coordinates(np.array(
            camera['center']),
                                                        w=camera['res_w'],
                                                        h=camera['res_h'])
        camera['focal_length'] = np.array(
            camera['focal_length']) / camera['res_w'] * 2.0
        camera['intrinsic'] = np.concatenate(
            (camera['focal_length'], camera['center'],
             camera['radial_distortion'], camera['tangential_distortion']))

        pos_3d_world = data['pose/3d'][()][i, :, :] / 1000
        pos_3d_cam = data['pose/3d-univ'][(
        )][i, :, :] / 1000  #world_to_camera(pos_3d_world, R=camera['orientation'], t=camera['translation'])
        pos_2d = wrap(project_to_2d, True, pos_3d_cam, camera['intrinsic'])
        pos_2d_pixel_space = image_coordinates(pos_2d,
                                               w=camera['res_w'],
                                               h=camera['res_h'])

        data_['train']['2d'][i, :, :] = pos_2d_pixel_space[:, :]
        data_['train']['3d'][i, :, :] = pos_3d_cam[:, :]
        data_['train']['w'][i] = camera['res_w']
        data_['train']['h'][i] = camera['res_h']

    np.savez_compressed(save_filename, data=data_)
Esempio n. 2
0
def make_forward_scanner(dset_name, data_dir, input_spec, scan_spec,
                         scan_params, **params):
    """ Creates a DataProvider ForwardScanner from a dset name """

    # Reading EM image
    img = utils.read_h5(
        os.path.join(data_dir, dset_name + "_inputRawImages.h5"))
    img = (img / 2000.).astype("float32")

    # Creating DataProvider Dataset
    vd = dp.Dataset()

    vd.add_data(key="input", data=img)
    vd.set_spec(input_spec)

    # Returning DataProvider ForwardScanner
    return dp.ForwardScanner(vd, scan_spec, params=scan_params)
Esempio n. 3
0
def make_forward_scanner(dset_name, data_dir, input_spec, scan_spec,
                         scan_params, **params):
    """ Creates a DataProvider ForwardScanner from a dset name """

    # Reading EM image
    #    img = utils.read_h5(dset_name)
    print("image path", os.path.join(data_dir, dset_name + "_resized4_img.h5"))
    img = utils.read_h5(os.path.join(data_dir, dset_name + "_resized4_img.h5"))
    img = (img / 255.).astype("float32")

    # Creating DataProvider Dataset
    vd = dp.VolumeDataset()

    vd.add_raw_data(key="input", data=img)
    vd.set_spec(input_spec)

    # Returning DataProvider ForwardScanner
    return dp.ForwardScanner(vd, scan_spec, params=scan_params)
Esempio n. 4
0
def load_data(fname):

    vnames = ["x", "y", "t", "dh_xcal", "fac_gemb_err8", "smb_gemb_err8"]

    (x, y, t, dh, err_fac, err_smb) = read_h5(fname, vnames)

    # Create xarray
    ds = xr.Dataset(
        {
            "dh": (("y", "x", "t"), dh),
            "err_fac": (("y", "x", "t"), err_fac),
            "err_smb": (("y", "x", "t"), err_smb),
        },
        coords={"y": y, "x": x, "t": t},
    )

    print(ds)

    return ds
Esempio n. 5
0
from __future__ import print_function

from joint_set import JointSet
import constants as jnt
from utils import parse_metadata, read_h5, read_npz
from conversion import convert_json_to_npz, convert_h5_directory_to_augmented, convert_h5_to_projected
from visualize import Visualize
from tqdm import tqdm
import numpy as np

if __name__ == "__main__":

    viz = Visualize(50)

    data = read_h5("annot.h5")

    viz.place_random_cameras(50, [3000, 3500], data['pose/3d-univ'][()][0,
                                                                        0, :])

    data_2d = np.zeros((0, 32, 2))
    data_3d = np.zeros((0, 32, 3))

    point_2d = viz.get_projection(data['pose/3d-univ'][()][0, :, :], 32,
                                  jnt.CAMERAS[0]['focal_length'],
                                  jnt.CAMERAS[0]['center'])

    #viz.plot_3d(data['pose/3d-univ'][()][0, :, :], True)

    #viz.plot_2d(point_2d)

    #convert_h5_directory_to_augmented("../H36M_H5_Annotations/**/**/*.h5", 15)
Esempio n. 6
0
                      & (lon < blon + dlon)
                      & (lat > blat - dlat)
                      & (lat < blat + dlat))

    if return_index:
        return (ii, jj)
    else:
        melt[ii, jj] = np.nan

        return melt


# ------------------------------------------------

print("loading data ...")
x, y, t, H, adv, str, div, smb, mask = read_h5(
    FCUBE, [xvar, yvar, tvar, hvar, avar, svar, dvar, mvar, kvar])

# Output containers
dHdt = np.full_like(H, np.nan)
dt = t[1] - t[0]

count = 0
count_plot = 0
n_plots = 10  # number of plots to exit

for i in range(H.shape[0]):
    for j in range(H.shape[1]):

        # Only for ploting purposes
        if PLOT:
            i, j = list(test_ij_3km.values())[count]
Esempio n. 7
0
def convert_h5_directory_to_augmented(directory_path, cam_count):

    files = glob.glob(directory_path)

    data = {}

    viz = Visualize(cam_count)

    prev_subject = ""

    for file in files:

        meta = file.split("/")
        subject = meta[2]
        """
        if subject != prev_subject:
            if data is not None:
                np.savez_compressed("output/h36m_augment_" + subject + "_cam_count_" + str(cam_count), data=data, cameras=jnt.CAMERAS)
                print("Dumped data for", subject)
            data = {}
            prev_subject = subject
        """

        action_name = meta[3].split("-")[0]

        if subject not in data:
            data[subject] = {}

        if action_name not in data[subject]:
            data[subject][action_name] = {
                '2d': np.empty((0, 32, 2), dtype=np.float32),
                '3d': np.empty((0, 32, 3), dtype=np.float32)
            }

        h5_data = read_h5(file)

        print("Processing", subject, "for", action_name)

        if action_name != "SittingDown":
            continue

        for i in tqdm(range(h5_data['pose/3d'][()].shape[0])):
            viz.place_random_cameras(cam_count, [3000, 3500],
                                     h5_data['pose/3d'][i, 0, :])
            for j in range(4):
                for k in range(cam_count):
                    point_2d = viz.get_projection(
                        h5_data['pose/3d-univ'][()][i, :, :], k,
                        jnt.CAMERAS[j]['focal_length'],
                        jnt.CAMERAS[j]['center']).reshape(1, 32, 2)
                    point_3d = viz.get_camspace_coord(
                        h5_data['pose/3d-univ'][()][i, :, :],
                        k).reshape(1, 32, 3)
                    R, t = viz.get_rotation_and_translation(k)

                    data[subject][action_name]['2d'] = np.vstack(
                        (data[subject][action_name]['2d'], point_2d))
                    data[subject][action_name]['3d'] = np.vstack(
                        (data[subject][action_name]['3d'], point_3d))
                    data[subject][action_name]['cam_id'] = jnt.CAMERAS[j]['id']
                    data[subject][action_name]['R'] = R
                    data[subject][action_name]['t'] = t

    np.savez_compressed("output/h36m_augment_sitting_down_cam_count" +
                        str(cam_count),
                        data=data,
                        cameras=jnt.CAMERAS)
Esempio n. 8
0
buoyancy = rho_ocean / (rho_ocean - rho_ice)

# Load Firn error

err_H = np.sqrt(err_dh[:, :, None] ** 2 + ds.err_fac ** 2) * buoyancy

# --- Error thickness rate (dH/dt)--- #

dt = 1.0  # years

err_H2 = np.roll(err_H, 3, axis=2)
err_dHdt = np.sqrt(err_H ** 2 + err_H2 ** 2) / dt

# --- Error melt rate (b) --- #

H = read_h5(FILE_CUBE, ["H_filt10"])

# Assuming u and v are independent and have the same error

err_u = 5.0  # m/yr
dx = 3000.0  # m

err_div = 2 * np.abs(H) * err_u / dx

err_melt = np.sqrt(err_dHdt ** 2 + err_div ** 2 + ds.err_smb ** 2)

# Save

if 0:

    FILE_SAVE = "/Users/fspaolo/work/melt/data/FULL_CUBE_v4.h5"
Esempio n. 9
0
def main():
    # TODO: COMBINE CUBEDIV.PY AND CUBEDEM.PY?
    """
    1. Reference all time series
    2. Correc dh for dFAC
    3. Correct dh for SLT
    4. Compute h(t) time series: h_mean + dh(t)
    5. Compute freeboard: H_freeb = h(t) - MSL
    6. Compute thickness and draft

    """
    print("loading ...")

    x, y, t, dh, h_mean, fac, msl, slt = read_h5(
        FILE_CUBE,
        [x_var, y_var, t_var, dh_var, h_var, fac_var, msl_var, slt_var],
    )

    # TODO: Maybe do this from the beguining (in cubefilt2.py)?
    # Mask out constant values (pole hole)
    dhdt = np.apply_along_axis(np.gradient, 2, dh)

    dh[dhdt == 0] = np.nan

    # Generate time series of sea-level trend (2D -> 3D)
    slt = slt[:, :, None] * (t - REF_TIME)

    # --- Smooth and reference series --- #

    if SMOOTH_WINDOW != 0:
        print("smoothing ...")

        dh = np.apply_along_axis(smooth_series, 2, dh)
        fac = np.apply_along_axis(smooth_series, 2, fac)

    print("referencing ...")

    # Correct mean height CS2 for FAC (before referencing)
    k_ref, = find_nearest(t, REF_TIME)
    h_mean = h_mean - fac[:, :, k_ref]

    # Reference all time series to a common epoch
    dh = np.apply_along_axis(lambda y: y - y[k_ref], 2, dh)
    fac = np.apply_along_axis(lambda y: y - y[k_ref], 2, fac)

    if PLOT:

        i_, j_ = test_ij_3km["PEAK_2"]

        plt.figure()
        plt.plot(t, dh[i_, j_, :], label="dh")
        plt.plot(t, fac[i_, j_, :], label="fac")
        plt.plot(t, slt[i_, j_, :], label="slt")
        plt.legend()
        plt.show()

        plt.pcolormesh(fac[:, :, 10], cmap='RdBu', rasterized=True)
        plt.plot([j_], [i_], 'or')
        plt.show()

    # Correct dh(t)
    dh_cor = dh - fac - slt

    # Compute time-evolving DEM (and correct for FAC)
    h = h_mean[:, :, None] + dh_cor

    if PLOT:

        plt.figure()
        plt.plot(t, dh[i_, j_, :], label="dh")
        plt.plot(t, dh_cor[i_, j_, :], label="dh_cor")
        plt.legend()

        plt.figure()
        plt.plot(t, h[i_, j_, :], label="h_mean + dh_cor 1")
        plt.legend()
        plt.show()

    # h(t) -> Freeboard, Draft, Thickness
    rho_ocean = 1028.0
    rho_ice = 917.0

    H_freeb = h - msl[:, :, None]
    H_draft = H_freeb * ((rho_ocean / (rho_ocean - rho_ice)) - 1)
    H = H_freeb * rho_ocean / (rho_ocean - rho_ice)

    # if PLOT:
    #     plt.figure()
    #     plt.plot(t, H_freeb[i_, j_, :] / 1000.0)
    #     plt.plot(t, -H_draft[i_, j_, :] / 1000.0)

    if SAVE:
        data = {"h10": h, "H_freeb10": H_freeb, "H_draft10": H_draft, "H10": H}
        save_h5(FILE_OUT, data, 'a')
        print("saved.")