예제 #1
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--save_dir',           type=str,   default='./models', help='directory to store checkpointed models')
    parser.add_argument('--val_frac',           type=float, default=0.1,        help='fraction of data to be witheld in validation set')
    parser.add_argument('--ckpt_name',          type= str,  default='',         help='name of checkpoint file to load (blank means none)')

    parser.add_argument('--batch_size',         type=int,   default= 64,        help='minibatch size')
    parser.add_argument('--state_dim',          type=int,   default= 51,        help='number of state variables')
    parser.add_argument('--num_classes',        type=int,   default= 4,         help='number of driver classes')
    parser.add_argument('--action_dim',         type=int,   default= 2,         help='number of action variables')

    parser.add_argument('--num_epochs',         type=int,   default= 50,        help='number of epochs')
    parser.add_argument('--learning_rate',      type=float, default= 0.004,     help='learning rate')
    parser.add_argument('--decay_rate',         type=float, default= 0.5,       help='decay rate for learning rate')
    parser.add_argument('--grad_clip',          type=float, default= 5.0,       help='clip gradients at this value')
    parser.add_argument('--save_h5',            type=bool,  default= False,     help='Whether to save network params to h5 file')

    parser.add_argument('--seq_length',         type=int,   default=50,         help='Sequence length for training')
    parser.add_argument('--burn_in_length',     type=int,   default=10,         help='Amount of time steps for initializing LSTM internal state')

    ############################
    #       Policy Network     #
    ############################
    parser.add_argument('--policy_size',        type=int,   default= 128,       help='number of neurons in each layer')
    parser.add_argument('--num_policy_layers',  type=int,   default= 2,         help='number of layers in the policy network')
    parser.add_argument('--recurrent',          type=bool,  default= False,     help='whether to use recurrent policy')
    parser.add_argument('--oracle',             type=bool,  default= False,     help='whether to include class as input to policy')
    parser.add_argument('--dropout_level',      type=float, default=  1.0,      help='dropout applied to fc policy')

    args = parser.parse_args()

    # Construct model
    net = bc_policy.BCPolicy(args)

    # Export model parameters or perform training
    if args.save_h5:
        data_loader = DataLoader(args.batch_size, args.val_frac, args.seq_length + args.burn_in_length, args.oracle)
        save_h5(args, net, data_loader)
    else:
        train(args, net)
예제 #2
0
    env.close()
    exit(1)

### use the GT vision
rgb, depth, seg, obj_seg = cam.get_observation()
Image.fromarray(
    (rgb * 255).astype(np.uint8)).save(os.path.join(out_dir, 'rgb.png'))
pickle.dump(seg, open(os.path.join(out_dir, 'seg.pkl'), "wb"))
pickle.dump(obj_seg, open(os.path.join(out_dir, 'obj_seg.pkl'), "wb"))

cam_XYZA_id1, cam_XYZA_id2, cam_XYZA_pts = cam.compute_camera_XYZA(depth)
cam_XYZA = cam.compute_XYZA_matrix(cam_XYZA_id1, cam_XYZA_id2, cam_XYZA_pts,
                                   depth.shape[0], depth.shape[1])
save_h5(os.path.join(out_dir, 'cam_XYZA.h5'), \
        [(cam_XYZA_id1.astype(np.uint64), 'id1', 'uint64'), \
         (cam_XYZA_id2.astype(np.uint64), 'id2', 'uint64'), \
         (cam_XYZA_pts.astype(np.float32), 'pc', 'float32'), \
        ])

gt_nor = cam.get_normal_map()
Image.fromarray(((gt_nor + 1) / 2 * 255).astype(np.uint8)).save(
    os.path.join(out_dir, 'gt_nor.png'))

object_link_ids = env.movable_link_ids
gt_movable_link_mask = cam.get_movable_link_mask(object_link_ids)
Image.fromarray((gt_movable_link_mask > 0).astype(np.uint8) * 255).save(
    os.path.join(out_dir, 'interaction_mask.png'))

# sample a pixel to interact
xs, ys = np.where(gt_movable_link_mask > 0)
if len(xs) == 0:
예제 #3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--save_dir',
                        type=str,
                        default='./models',
                        help='directory to store checkpointed models')
    parser.add_argument(
        '--val_frac',
        type=float,
        default=0.1,
        help='fraction of data to be witheld in validation set')
    parser.add_argument(
        '--ckpt_name',
        type=str,
        default='',
        help='name of checkpoint file to load (blank means none)')

    parser.add_argument('--batch_size',
                        type=int,
                        default=64,
                        help='minibatch size')
    parser.add_argument('--state_dim',
                        type=int,
                        default=51,
                        help='number of state variables')
    parser.add_argument('--action_dim',
                        type=int,
                        default=2,
                        help='number of action variables')
    parser.add_argument('--z_dim',
                        type=int,
                        default=2,
                        help='dimensions of latent variable')
    parser.add_argument('--sample_size',
                        type=int,
                        default=10,
                        help='number of samples from z')

    parser.add_argument('--num_epochs',
                        type=int,
                        default=50,
                        help='number of epochs')
    parser.add_argument('--learning_rate',
                        type=float,
                        default=0.004,
                        help='learning rate')
    parser.add_argument('--decay_rate',
                        type=float,
                        default=0.5,
                        help='decay rate for learning rate')
    parser.add_argument('--grad_clip',
                        type=float,
                        default=5.0,
                        help='clip gradients at this value')
    parser.add_argument('--save_h5',
                        type=bool,
                        default=False,
                        help='Whether to save network params to h5 file')

    ###############################
    #          Encoder            #
    ###############################
    parser.add_argument('--encoder_size',
                        type=int,
                        default=128,
                        help='number of neurons in each LSTM layer')
    parser.add_argument('--num_encoder_layers',
                        type=int,
                        default=2,
                        help='number of layers in the LSTM')
    parser.add_argument('--seq_length',
                        type=int,
                        default=50,
                        help='LSTM sequence length')

    ############################
    #       Policy Network     #
    ############################
    parser.add_argument('--policy_size',
                        type=int,
                        default=128,
                        help='number of neurons in each feedforward layer')
    parser.add_argument('--num_policy_layers',
                        type=int,
                        default=2,
                        help='number of layers in the policy network')
    parser.add_argument('--recurrent',
                        type=bool,
                        default=False,
                        help='whether to use recurrent policy')
    parser.add_argument('--dropout_level',
                        type=float,
                        default=1.0,
                        help='percent of state values to keep')

    ############################
    #       Reconstructor      #
    ############################
    parser.add_argument('--rec_size',
                        type=int,
                        default=64,
                        help='number of neurons in each feedforward layer')
    parser.add_argument('--num_rec_layers',
                        type=int,
                        default=2,
                        help='number of layers in the policy network')
    parser.add_argument('--rec_weight',
                        type=float,
                        default=0.03,
                        help='weight applied to reconstruction cost')

    args = parser.parse_args()

    # Construct model
    net = vae.VariationalAutoencoder(args)

    # Export model parameters or perform training
    if args.save_h5:
        data_loader = DataLoader(args.batch_size, args.val_frac,
                                 args.seq_length)
        save_h5(args, net, data_loader)
    else:
        train(args, net)
예제 #4
0
                ]

                # Deal with batches size 1
                if len(oris) == 1:
                    img_keypoints[0] = img_keypoints[0].unsqueeze(0)
                    img_keypoints[1] = img_keypoints[1].unsqueeze(0)
                    img_keypoints[2] = img_keypoints[2].unsqueeze(0)

                descriptors, patches = model({key: imgs}, img_keypoints,
                                             [key] * len(img_keypoints[0]))
                all_desc.append(descriptors.data.cpu().numpy())
            seq_descriptors[key] = np.vstack(np.array(all_desc)).astype(
                np.float32)

        cur_path = os.path.join(args.save_path, args.method_name, seq_name)
        if not os.path.exists(cur_path):
            os.makedirs(cur_path)
        save_h5(seq_descriptors, os.path.join(cur_path, 'descriptors.h5'))
        sub_files_in = [
            'keypoints{}.h5'.format(suffix), 'scales{}.h5'.format(suffix),
            'angles{}.h5'.format(suffix), 'scores{}.h5'.format(suffix)
        ]
        sub_files_out = ['keypoints.h5', 'scales.h5', 'angles.h5', 'scores.h5']

        for sub_file_in, sub_file_out in zip(sub_files_in, sub_files_out):
            shutil.copyfile(
                os.path.join(args.dataset_path, seq_name, sub_file_in),
                os.path.join(cur_path, sub_file_out))

        print('Done sequence: {}'.format(seq_name))
예제 #5
0
adv[~mask3d] = np.nan
str[~mask3d] = np.nan
div[~mask3d] = np.nan
smb[~mask3d] = np.nan

melt = dHdt + div - smb
melt_steady = div - smb
mass = dHdt * RHO_ICE * 1e-12  # kg/yr -> Gt/yr
melt_mean = np.nanmean(melt, axis=2)

# NOTE: Change name of variables for new version

data = {
    "dHdt_melt10": melt,
    "dHdt_steady10": melt_steady,
    "dMdt_net10": mass,
    "dHdt_net10": dHdt,
    "dHdt_melt_mean10": melt_mean,
    "H_filt10": H,
    "dHdt_adv_filt10": adv,
    "dHdt_str_filt10": str,
    "dHdt_div_filt10": div,
    "smb_gemb_filt10": smb,
}
if SAVE:
    save_h5(FCUBE, data, "a")
    print("saved.")

plt.matshow(melt_mean, cmap="RdBu", vmin=-5, vmax=5)
plt.show()
            num_patches.append(patches.shape[0])

            scene_patches[img_name] = patches
            scene_kp[img_name] = keypoints
            scene_scale[img_name] = scales
            sec_ori[img_name] = angles
            sec_resp[img_name] = responses

        print('Processed {} images: {} patches/image'.format(
            len(num_patches),
            np.array(num_patches).mean()))

        cur_path = os.path.join(args.folder_outp, scene)
        # if args.force_upright == 'no-dups':
        #     cur_path += '_upright_v1'
        # elif args.force_upright == 'no-dups-more-points':
        #     cur_path += '_upright_v2'
        if not os.path.isdir(cur_path):
            os.makedirs(cur_path)

        save_h5(scene_patches,
                os.path.join(cur_path, 'patches{}.h5'.format(suffix)))
        save_h5(scene_kp,
                os.path.join(cur_path, 'keypoints{}.h5'.format(suffix)))
        save_h5(scene_scale,
                os.path.join(cur_path, 'scales{}.h5'.format(suffix)))
        save_h5(sec_ori, os.path.join(cur_path, 'angles{}.h5'.format(suffix)))
        save_h5(sec_resp, os.path.join(cur_path, 'scores{}.h5'.format(suffix)))

    print('Done!')
예제 #7
0
err_div = 2 * np.abs(H) * err_u / dx

err_melt = np.sqrt(err_dHdt ** 2 + err_div ** 2 + ds.err_smb ** 2)

# Save

if 0:

    FILE_SAVE = "/Users/fspaolo/work/melt/data/FULL_CUBE_v4.h5"

    save_h5(
        FILE_SAVE,
        {
            "dh_err10": err_dh,
            "H_err10": err_H,
            "dHdt_net_err10": err_dHdt,
            "dHdt_div_err10": err_div,
            "dHdt_melt_err10": err_melt,
        },
    )  # not corrected for firn

    print("saved ->", FILE_SAVE)


# Plot to double check


def plot_error(t, cube, error, indices, title="", detrend=False):

    for k, (i, j) in enumerate(indices, start=1):
예제 #8
0
    def make_emb_db(self,
                    args,
                    net,
                    data_loader,
                    eval_sampled,
                    eval_per_class,
                    newly_trained=True,
                    batch_size=None,
                    mode='val'):
        """

        :param batch_size:
        :param eval_sampled:
        :param eval_per_class:
        :param newly_trained:
        :param mode:
        :param args: utils args
        :param net: trained top_model network
        :param data_loader: DataLoader object
        :return: None
        """

        if newly_trained:
            net.eval()
            if batch_size is None:
                batch_size = args.batch_size

            steps = int(np.ceil(len(data_loader) / batch_size))

            test_classes = np.zeros(((len(data_loader.dataset))))
            test_seen = np.zeros(((len(data_loader.dataset))))
            test_paths = np.empty(dtype='S20',
                                  shape=((len(data_loader.dataset))))
            if args.feat_extractor == 'resnet50':
                test_feats = np.zeros((len(data_loader.dataset), 2048))
            elif args.feat_extractor == 'resnet18':
                test_feats = np.zeros((len(data_loader.dataset), 512))
            else:
                raise Exception('Not handled feature extractor')

            for idx, (img, lbl, seen, path) in enumerate(data_loader):

                if args.cuda:
                    img = img.cuda()
                img = Variable(img)

                output = net.forward(img, None, single=True)
                output = output.data.cpu().numpy()

                end = min((idx + 1) * batch_size, len(test_feats))

                test_feats[idx * batch_size:end, :] = output
                test_classes[idx * batch_size:end] = lbl
                test_paths[idx * batch_size:end] = path
                test_seen[idx * batch_size:end] = seen.to(int)

            utils.save_h5(f'{mode}_ids', test_paths, 'S20',
                          os.path.join(self.save_path, f'{mode}Ids.h5'))
            utils.save_h5(f'{mode}_classes', test_classes, 'i8',
                          os.path.join(self.save_path, f'{mode}Classes.h5'))
            utils.save_h5(f'{mode}_feats', test_feats, 'f',
                          os.path.join(self.save_path, f'{mode}Feats.h5'))
            utils.save_h5(f'{mode}_seen', test_seen, 'i2',
                          os.path.join(self.save_path, f'{mode}Seen.h5'))

        test_feats = utils.load_h5(
            f'{mode}_feats', os.path.join(self.save_path, f'{mode}Feats.h5'))
        test_classes = utils.load_h5(
            f'{mode}_classes', os.path.join(self.save_path,
                                            f'{mode}Classes.h5'))
        test_seen = utils.load_h5(
            f'{mode}_seen', os.path.join(self.save_path, f'{mode}Seen.h5'))

        utils.calculate_k_at_n(args,
                               test_feats,
                               test_classes,
                               test_seen,
                               logger=self.logger,
                               limit=args.limit_samples,
                               run_number=args.number_of_runs,
                               save_path=self.save_path,
                               sampled=eval_sampled,
                               per_class=eval_per_class,
                               mode=mode)

        self.logger.info('results at: ' + self.save_path)
예제 #9
0
        # Sort the scores and subsample
        indices = np.argsort(scores)[::-1]
        if args.num_kp > 0:
            top_k = indices[:args.num_kp]
        else:
            top_k = indices

        # Flip coordinates: network provides [y, x]
        seq_keypoints[key] = np.concatenate(
            [keypoints[top_k, 1][..., None], keypoints[top_k, 0][..., None]],
            axis=1)
        seq_scales[key] = keypoints[top_k, 2]
        seq_scores[key] = scores[top_k]
        seq_descriptors[key] = descriptors[top_k, :]

        # print('Processed "{}" in {:.02f} sec. Found {} features'.format(
        #     key, t_end - t_start, keypoints.shape[0]))

    print('Average number of keypoints per image: {:.02f}'.format(
        np.mean([v.shape[0] for v in seq_keypoints.values()])))

    cur_path = os.path.join(args.save_path, args.method_name, seq)
    if not os.path.exists(cur_path):
        os.makedirs(cur_path)
    save_h5(seq_descriptors, os.path.join(cur_path, 'descriptors.h5'))
    save_h5(seq_keypoints, os.path.join(cur_path, 'keypoints.h5'))
    save_h5(seq_scores, os.path.join(cur_path, 'scores.h5'))
    save_h5(seq_scales, os.path.join(cur_path, 'scales.h5'))

print('Done')
예제 #10
0
def main():
    # TODO: COMBINE CUBEDIV.PY AND CUBEDEM.PY?
    """
    1. Reference all time series
    2. Correc dh for dFAC
    3. Correct dh for SLT
    4. Compute h(t) time series: h_mean + dh(t)
    5. Compute freeboard: H_freeb = h(t) - MSL
    6. Compute thickness and draft

    """
    print("loading ...")

    x, y, t, dh, h_mean, fac, msl, slt = read_h5(
        FILE_CUBE,
        [x_var, y_var, t_var, dh_var, h_var, fac_var, msl_var, slt_var],
    )

    # TODO: Maybe do this from the beguining (in cubefilt2.py)?
    # Mask out constant values (pole hole)
    dhdt = np.apply_along_axis(np.gradient, 2, dh)

    dh[dhdt == 0] = np.nan

    # Generate time series of sea-level trend (2D -> 3D)
    slt = slt[:, :, None] * (t - REF_TIME)

    # --- Smooth and reference series --- #

    if SMOOTH_WINDOW != 0:
        print("smoothing ...")

        dh = np.apply_along_axis(smooth_series, 2, dh)
        fac = np.apply_along_axis(smooth_series, 2, fac)

    print("referencing ...")

    # Correct mean height CS2 for FAC (before referencing)
    k_ref, = find_nearest(t, REF_TIME)
    h_mean = h_mean - fac[:, :, k_ref]

    # Reference all time series to a common epoch
    dh = np.apply_along_axis(lambda y: y - y[k_ref], 2, dh)
    fac = np.apply_along_axis(lambda y: y - y[k_ref], 2, fac)

    if PLOT:

        i_, j_ = test_ij_3km["PEAK_2"]

        plt.figure()
        plt.plot(t, dh[i_, j_, :], label="dh")
        plt.plot(t, fac[i_, j_, :], label="fac")
        plt.plot(t, slt[i_, j_, :], label="slt")
        plt.legend()
        plt.show()

        plt.pcolormesh(fac[:, :, 10], cmap='RdBu', rasterized=True)
        plt.plot([j_], [i_], 'or')
        plt.show()

    # Correct dh(t)
    dh_cor = dh - fac - slt

    # Compute time-evolving DEM (and correct for FAC)
    h = h_mean[:, :, None] + dh_cor

    if PLOT:

        plt.figure()
        plt.plot(t, dh[i_, j_, :], label="dh")
        plt.plot(t, dh_cor[i_, j_, :], label="dh_cor")
        plt.legend()

        plt.figure()
        plt.plot(t, h[i_, j_, :], label="h_mean + dh_cor 1")
        plt.legend()
        plt.show()

    # h(t) -> Freeboard, Draft, Thickness
    rho_ocean = 1028.0
    rho_ice = 917.0

    H_freeb = h - msl[:, :, None]
    H_draft = H_freeb * ((rho_ocean / (rho_ocean - rho_ice)) - 1)
    H = H_freeb * rho_ocean / (rho_ocean - rho_ice)

    # if PLOT:
    #     plt.figure()
    #     plt.plot(t, H_freeb[i_, j_, :] / 1000.0)
    #     plt.plot(t, -H_draft[i_, j_, :] / 1000.0)

    if SAVE:
        data = {"h10": h, "H_freeb10": H_freeb, "H_draft10": H_draft, "H10": H}
        save_h5(FILE_OUT, data, 'a')
        print("saved.")