Beispiel #1
0
    def __init__(self):
        self.eyes = {}
        self.mouths = {}
        self.hold_overrides = {"blink": 0, "idle_mouth": 0}
        self.hold_frames = {}
        self.eye_latches = {"blink"}
        self.mouth_latches = {"smile_closed"}
        self.special_attr_eyes = {"blink": {"hold_frame": 0}}
        self.special_attr_mouths = {"idle_mouth": {"hold_frame": 0}}

        # Load Eyes
        utils.load_files("faces/eyes", self.special_attr_eyes, self.eyes,
                         "faces/eyes/")

        # Load Mouths
        utils.load_files("faces/mouths", self.special_attr_mouths, self.mouths,
                         "faces/mouths/")

        for i, value in self.mouths.items():
            self.hold_frames[i] = (value[self.hold_overrides[i]]
                                   if self.hold_overrides.get(i, None) else
                                   value[-1])
        for i, value in self.eyes.items():
            self.hold_frames[i] = (value[self.hold_overrides[i]]
                                   if self.hold_overrides.get(i, None) else
                                   value[-1])
        # print(self.hold_frames)
        self.parsed_latches = {}
        for item in self.eye_latches:
            eye = self.eyes[item]
            self.parsed_latches[eye] = item

        for item in self.mouth_latches:
            mouth = self.mouths[item]
            self.parsed_latches[mouth] = item
Beispiel #2
0
def gen_semantic_data(semantic_folder,
                      scan_folder,
                      dst_folder,
                      proj_H=64,
                      proj_W=900):
    """ Generate projected semantic data in the shape of (64, 900, 20).
      The input raw data are in the shape of (Num_points, 20).
  """
    # specify the goal folder
    dst_folder = os.path.join(dst_folder, 'semantic')
    try:
        os.stat(dst_folder)
        print('generating semantic data in: ', dst_folder)
    except:
        print('creating new semantic folder: ', dst_folder)
        os.mkdir(dst_folder)

    # load raw semantic predictions
    prob_paths = load_files(
        semantic_folder
    )  #   semantic_folder: "data/semantic_probs",加载的直接是语义分割的输出标签

    # load corresponding LiDAR scans
    scan_paths = load_files(scan_folder)
    semantics = []
    # iterate over all semantic files in the given folder
    for idx in range(len(prob_paths)):
        # read semantic probabilities from the raw file
        probs = np.fromfile(prob_paths[idx], dtype=np.float32).reshape(
            (-1, 20))

        # read the point cloud from the raw scan file
        current_vertex = np.fromfile(scan_paths[idx],
                                     dtype=np.float32).reshape((-1, 4))

        # get range projection correspondences
        _, _, _, proj_idx = range_projection(current_vertex, max_range=np.inf)

        # init a semantic image array
        proj_prob = np.full((proj_H, proj_W, 20), -1,
                            dtype=np.float32)  # [H,W]: probs

        # fill in a semantic image
        proj_prob[proj_idx >= 0] = probs[proj_idx[proj_idx >= 0]]
        # print(probs[proj_idx[proj_idx >= 0]])

        # generate the destination path
        base_name = os.path.basename(scan_paths[idx]).replace('.bin', '')
        dst_path = os.path.join(dst_folder, base_name)

        # save the semantic image as format of .npy
        np.save(dst_path, proj_prob)
        semantics.append(proj_prob)
        print('finished generating semantic data at: ', dst_path)

    return semantics
def process_images(input_dir, output_dir, resize=RESIZE):
    files = load_files(input_dir, format="jpg")
    for i, file in enumerate(files):
        img_PIL = Image.open(os.path.join(input_dir, file))
        if abs(1 - img_PIL.width / img_PIL.height) < 0.15:
            img_PIL = img_PIL.resize(resize)
            img_PIL.save(os.path.join(output_dir, file))
        if i % 100 == 0:
            print(i, end=',')
Beispiel #4
0
def load_wallets(path: str) -> None:
    """
    Load all wallets into global WALLETS and return the lowest checked block height
    :param path:
    :return:
    """
    for data in load_files(path, ".json"):
        wallet: dict = json.loads(data)
        WALLETS[wallet["address"]] = wallet
        LOGGER.info(f"Loaded wallet: {wallet['address']}")
Beispiel #5
0
    def __init__(self):
        self.eyes = {}
        self.mouths = {}
        self.hold_overrides = {'blink': 0, 'idle_mouth': 0}
        self.hold_frames = {}
        self.eye_latches = {'blink'}
        self.mouth_latches = {'smile_closed'}
        self.special_attr_eyes = {
            'blink': {
                'hold_frame': 0,
            },
        }
        self.special_attr_mouths = {
            'idle_mouth': {
                'hold_frame': 0,
            },
        }

        # Load Eyes
        utils.load_files("faces/eyes", self.special_attr_eyes, self.eyes,
                         "faces/eyes/")

        # Load Mouths
        utils.load_files("faces/mouths", self.special_attr_mouths, self.mouths,
                         "faces/mouths/")

        for i, value in self.mouths.items():
            self.hold_frames[i] = value[
                self.hold_overrides[i]] if self.hold_overrides.get(
                    i, None) else value[-1]
        for i, value in self.eyes.items():
            self.hold_frames[i] = value[
                self.hold_overrides[i]] if self.hold_overrides.get(
                    i, None) else value[-1]
        #print(self.hold_frames)
        self.parsed_latches = {}
        for item in self.eye_latches:
            eye = self.eyes[item]
            self.parsed_latches[eye] = item

        for item in self.mouth_latches:
            mouth = self.mouths[item]
            self.parsed_latches[mouth] = item
Beispiel #6
0
def encode(model, f, output):
    new_lines = []
    s = SentencePieceProcessor()
    s.load(model)
    lines = load_files(f)
    for line in lines:
        line = line.strip()
        tokens = s.encode_as_pieces(line)
        new_lines.append(tokens)
    output_lines(output, new_lines)
Beispiel #7
0
    def __getitem__(self, idx):

        image = load_files(self.image_list[idx])
        labels = load_files(self.labels_list[idx])
        cl = self.classes_list[idx]

        if labels.ndim < 3:
            labels = np.expand_dims(labels, axis=2)

        # US only: change image to us, dict
        sample = {'image': image, 'labels': labels}
        if self.transform:
            sample = self.transform(sample)

        # TODO: Need to break apart dictionary and squeeze data so that it fits into the framework. Modify framework to
        #  accept sample tuple
        sample['labels'] = sample['labels'].squeeze()
        if self.with_idx:
            return sample['image'].float(), sample['labels'].float(), cl, idx
        return sample['image'].float(), sample['labels'].float(), cl
Beispiel #8
0
def encode_dict(model, model2, d, output, sep=" ||| "):
    s = SentencePieceProcessor()
    s.load(model)
    s2 = SentencePieceProcessor()
    s2.load(model2)
    out = open(output, "w")
    new_d = {}
    f = load_files(d)
    for line in f:
        k, v = line.split(sep)
        k = " ".join(s.encode_as_pieces(k))
        v = " ".join(s2.encode_as_pieces(v))
        out.write(k + " ||| " + v + "\n")
    print("Encode dict to {}".format(output))
Beispiel #9
0
 def __init__(self, map_module, scan_folder, params):
   # load the map module.
   self.map_module = map_module
   
   # initialize the map renderer with the appropriate parameter.
   self.params = params
   self.max_instance = params['max_instance']
   if params['render_instanced']:
     self.renderer = MapRenderer_instanced(self.params)
   else:
     self.renderer = MapRenderer(self.params)
   self.renderer.set_mesh(self.map_module.mesh)
   
   # specify query scan paths
   self.scan_paths = load_files(scan_folder)
   
   self.is_converged = False
Beispiel #10
0
def create_tfrecords(config=FLAGS):
    if not os.path.exists(config.tfrecord_dir):
        os.makedirs(config.tfrecord_dir)
    if not os.path.exists(
            os.path.join(config.tfrecord_dir, config.dataset, config.subset)):
        os.makedirs(
            os.path.join(config.tfrecord_dir, config.dataset, config.subset))

    save_config(config.tfrecord_dir, config)

    highres_files = load_files(
        os.path.join(config.data_dir, config.dataset, config.subset,
                     'Highres'), config.extension)
    print("\nThere are %d files in %s dataset, subset %s\n" %
          (len(highres_files), config.dataset, config.subset))
    for file in highres_files:
        print(file)
        name = ntpath.basename(file).split('.')[0]
        lowres_filename = os.path.join(config.data_dir, config.dataset,
                                       config.subset, 'Lowres',
                                       '%s.%s' % (name, config.extension))
        hr_image = get_image(file, config.image_size,
                             config.color_channels == 3)
        lr_image = get_image(lowres_filename, 256, config.color_channels == 3)

        # Create a feature and record
        feature = {
            HEIGHT: _int64_feature(config.image_size),
            WIDTH: _int64_feature(config.image_size),
            DEPTH: _int64_feature(config.color_channels),
            LR_IMAGE: _float_feature(lr_image),
            HR_IMAGE: _float_feature(hr_image),
            FILENAME: _bytes_feature(bytes(name, 'utf-8'))
        }
        record = tf.train.Example(features=tf.train.Features(feature=feature))

        tfrecord_filename = os.path.join(config.tfrecord_dir, config.dataset,
                                         config.subset,
                                         '%s.%s' % (name, TFRECORD))
        print(tfrecord_filename)
        with tf.python_io.TFRecordWriter(tfrecord_filename) as writer:
            writer.write(record.SerializeToString())
Beispiel #11
0
def com_overlaps(virtual_scan_folder, poses, scan_paths, overlap_file_path,
                 range_image_params):
    """ Compute the ground truth overlap values for a sequence of LiDAR scans
    and generate a ground truth overlap file.
    Args:
      virtual_scan_folder: path of virtual scan folder
      poses: ground truth poses of the LiDAR scans
      scan_paths: paths of the LiDAR scans
      overlap_file_path: output file path of the ground truth overlaps
      range_image_params: parameters for generating a range image
  """
    # load virtual scans
    virtual_scan_paths = utils.load_files(virtual_scan_folder)

    grid_coords = []
    for virtual_scan_path in virtual_scan_paths:
        grid_coords.append(
            os.path.basename(virtual_scan_path).replace('.npz', '').split('_'))
    grid_coords = np.array(grid_coords, dtype=float)

    # ground truth format: each row contains [current_frame_idx, reference_frame_idx, overlap, yaw]
    print('generating raw overlap ground truth file...')
    if os.path.exists(overlap_file_path):
        print('the overlap mapping file already exists!')
    else:
        os.mkdir(os.path.dirname(overlap_file_path))
        ground_truth_overlap = []
        for idx in tqdm(range(len(poses))):
            overlaps = com_overlap(idx, grid_coords, virtual_scan_folder,
                                   poses[idx], scan_paths[idx],
                                   range_image_params)
            if len(overlaps) > 0:
                ground_truth_overlap.append(overlaps)

        ground_truth_overlap = np.concatenate(ground_truth_overlap).reshape(
            (-1, 4))
        np.savez_compressed(overlap_file_path, ground_truth_overlap)
Beispiel #12
0
def main(config):
    """ This script can be used to create mesh maps using LiDAR scans with GT poses.
  It assumes you have the data in the kitti-like format like:

  data
  └── sequences
      └── 00
          ├── calib.txt
          ├── poses.txt
          └── velodyne
              ├── 000000.bin
              ├── 000001.bin
              └── ...

  How to run it and check a quick example:
  $ ./build_gt_map.py /path/to/config.yaml
  """
    # load scans and poses
    scan_folder = config['scan_folder']
    scan_paths = load_files(scan_folder)

    # load poses
    pose_file = config['pose_file']
    poses = load_poses(pose_file)
    inv_frame0 = np.linalg.inv(poses[0])

    # load calibrations
    # Note that if your poses are already in the LiDAR coordinate system, you
    # just need to set T_cam_velo as a 4x4 identity matrix
    calib_file = config['calib_file']
    T_cam_velo = load_calib(calib_file)
    T_cam_velo = np.asarray(T_cam_velo).reshape((4, 4))
    T_velo_cam = np.linalg.inv(T_cam_velo)

    # convert poses into LiDAR coordinate system
    new_poses = []
    for pose in poses:
        new_poses.append(T_velo_cam.dot(inv_frame0).dot(pose).dot(T_cam_velo))
    new_poses = np.array(new_poses)
    gt_poses = new_poses

    # Use the whole sequence if -1 is specified
    n_scans = len(scan_paths) if config['n_scans'] == -1 else config['n_scans']

    # init mesh map
    mesh_file = config['mesh_file']
    if os.path.exists(mesh_file):
        exit(print('The mesh map already exists at:', mesh_file))
    global_mesh = o3d.geometry.TriangleMesh()
    cloud_map = o3d.geometry.PointCloud()

    # counter for local map
    count = 1
    local_map_size = config['local_map_size']

    # config for range images
    range_config = config['range_image']

    for idx in tqdm(range(n_scans)):
        # load the point cloud
        curren_points = load_vertex(scan_paths[idx])

        # get rid of invalid points
        dist = np.linalg.norm(curren_points[:, :3], 2, axis=1)
        curren_points = curren_points[(dist < range_config['max_range'])
                                      & (dist > range_config['min_range'])]

        # convert into open3d format and preprocess the point cloud
        local_cloud = o3d.geometry.PointCloud()
        local_cloud.points = o3d.utility.Vector3dVector(curren_points[:, :3])

        # estimated normals
        local_cloud = compute_normals_range(local_cloud,
                                            range_config['fov_up'],
                                            range_config['fov_down'],
                                            range_config['height'],
                                            range_config['width'],
                                            range_config['max_range'])

        # preprocess point clouds
        local_cloud = preprocess_cloud(local_cloud,
                                       config['voxel_size'],
                                       config['crop_x'],
                                       config['crop_y'],
                                       config['crop_z'],
                                       downsample=True)

        # integrate the local point cloud
        local_cloud.transform(gt_poses[idx])
        cloud_map += local_cloud

        if idx > 0:
            # if the car stops, we don't count the frame
            relative_pose = np.linalg.inv(gt_poses[idx - 1]).dot(gt_poses[idx])
            traj_dist = np.linalg.norm(relative_pose[:3, 3])
            if traj_dist > 0.2:
                count += 1

            # build a local mesh map
            if count % local_map_size == 0:
                # segment the ground
                ground, rest = pcd_ground_seg_open3d(cloud_map, config)

                # build the local poisson mesh
                mesh = run_poisson(ground + rest,
                                   depth=config['depth'],
                                   min_density=config['min_density'])

                # simply the ground to save space
                mesh = mesh_simplify(mesh, config)
                mesh.compute_vertex_normals()
                mesh.compute_triangle_normals()

                # integrate the local mesh into global mesh
                global_mesh += mesh

                # re-init cloud map
                cloud_map = o3d.geometry.PointCloud()

    # save the mesh map
    print("Saving mesh to " + mesh_file)
    o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Error)
    o3d.io.write_triangle_mesh(mesh_file, global_mesh)

    # visualize the mesh map
    if config['visualize']:
        o3d.visualization.draw_geometries([global_mesh])
Beispiel #13
0
        Silent = False

    ########################### Welcome #################################
    save_and_say("statics/data/audio/suhi_intro.mp3", "welcome to Suhi's English test", silent=Silent)
    #############################LOAD DATA###############################

    # eng_path = "english.txt"
    # vet_path = "vietnamese.txt"

    eng_path = "statics/data/Destination_B2_unit2e.txt"
    vet_path = "statics/data/Destination_B2_unit2v.txt"

    # eng_path = "statics/data/Destination_B2_unit4e.txt"
    # vet_path = "statics/data/Destination_B2_unit4v.txt"

    engs, viets = load_files(eng_path, vet_path)

    print("Saving audio files...")
    save_to_audio(engs, 'en')
    print("DONE!")

    dictionary_ev, dictionary_ve = create_dics(engs, viets)
    
    #########################################################################


    USE_AUDIO = 'suhi'
    type_of_test_text = "Please choose to use listen and guess test or not, type yes or no and enter"
    print(type_of_test_text)
    save_and_say("statics/data/audio/suhi_1.mp3", type_of_test_text, silent=Silent)
"""
Simple tester for the vgg19_trainable
"""

import tensorflow as tf

import vgg19_bin_trainable as vgg19
import utils
import gc
import time
import random

batch, label = utils.load_files("./test_data/chip-sample/train")
batch2, label2 = utils.load_files("./test_data/chip-sample/test")
sample_count = len(batch)
#img1_true_result = [1 if i == 292 else 0 for i in range(1000)]  # 1-hot result for tiger
#img1_true_result = [[0] for i in range(sample_count)]  # 1-hot result for tiger
img1_true_result = label
slots = [i for i in range(0, sample_count)]


def print_pred(prob, gt, flag):
    pred_label = []
    for p in prob:
        if p > 0.5:
            pred_label.append(1)
        else:
            pred_label.append(0)
    flat_gt = sum(gt, [])
    res = list(zip(pred_label, flat_gt))
    corrects = 0
 def __init__(self):    
     self.files = utils.load_files()
     self.affixes = affixes.load_affix_dict(self.files.get('aff'))
     self.dictionary = dictionary.load_dictionary_dict(self.files.get('dic'))
     self.inflex = self.inflections()
from sklearn.tree import DecisionTreeClassifier
from sklearn.cross_validation import train_test_split
from utils import load_files
import cPickle

import os
import sys

sys.path.insert(0, os.path.abspath('../'))
from config import *


if __name__ == "__main__":
    print('Loading files...')
    features, labels = load_files()
    labels = labels.reshape((len(labels),))
    print('Splitting into test and train datasets')
    train_features, test_features, train_labels, test_labels = train_test_split(features, labels,
                                                                                train_size=TRAIN_TEST_RATIO)
    print('Train features: ' + str(len(train_features)))
    print('Train labels: ' + str(len(train_labels)))
    print('Test features: ' + str(len(test_features)))
    print('Test labels: ' + str(len(test_labels)))
    print('')

    params = {'min_samples_split': 22, 'max_depth': 25, 'min_samples_leaf': 20}
    cls = DecisionTreeClassifier(**params)

    print('Training...')
    cls.fit(train_features, train_labels)
Beispiel #17
0
import numpy as np
from sklearn import grid_search
from sklearn.tree import DecisionTreeClassifier
from utils import load_files

import os
import sys

sys.path.insert(0, os.path.abspath('../'))
from config import *


if __name__ == '__main__':
    cross_res_f = open('./cross_validation1.res', 'w')
    features, labels = load_files([VOICED_FNAME, UNVOICED_FNAME], [VOICED_FEATURES_NUM, UNVOICED_FEATURES_NUM])
    labels = labels.reshape((len(labels),))
    print(len(features))
    print(len(labels))

    depthes = np.arange(10, 41, 5)
    min_split = np.arange(7, 23, 5)
    min_leaves = np.arange(5, 21, 5)
    dt_parameters = {'max_depth': depthes, 'min_samples_leaf': min_leaves, 'min_samples_split': min_split}

    print("Begin cross validation on DT")
    dt = grid_search.GridSearchCV(DecisionTreeClassifier(), dt_parameters)
    dt.fit(features, labels)
    cross_res_f.write("Decision tree optimal : " + str(dt.best_params_) + " with score " + str(dt.best_score_) + "\r\n")

    cross_res_f.close()
 
 # load calibrations
 calib_file = '../' + config['calib_file']
 T_cam_velo = utils.load_calib(calib_file)
 T_cam_velo = np.asarray(T_cam_velo).reshape((4, 4))
 T_velo_cam = np.linalg.inv(T_cam_velo)
 
 # convert kitti poses from camera coord to LiDAR coord
 new_poses = []
 for pose in poses:
   new_poses.append(T_velo_cam.dot(inv_frame0).dot(pose).dot(T_cam_velo))
 poses = np.array(new_poses)
 
 # load LiDAR scans
 scan_folder = config['scan_folder']
 scan_paths = utils.load_files(scan_folder)
 
 # test for the first N scans
 if num_frames >= len(poses) or num_frames <= 0:
   print('generate training data for all frames with number of: ', len(poses))
 else:
   poses = poses[:num_frames]
   scan_paths = scan_paths[:num_frames]
 
 range_image_params = config['range_image']
 
 # step1: build the pcd map
 if os.path.exists(map_file):
   pcd_map = o3d.io.read_point_cloud(map_file)
   num_points = len(pcd_map.points)
   if num_points > 0:
Beispiel #19
0
        depth = depth_and_normal[:, :, 3] / np.max(depth_and_normal[:, :, 3])
        normal = depth_and_normal[:, :, :3]

        # save depth and normal data
        np.save(os.path.join(depth_folder, frame_name), depth)
        np.save(os.path.join(normal_folder, frame_name), normal)


if __name__ == '__main__':
    # load config file
    config_filename = '../../config/prepare_training.yml'
    if len(sys.argv) > 1:
        config_filename = sys.argv[1]

    if yaml.__version__ >= '5.1':
        config = yaml.load(open(config_filename), Loader=yaml.FullLoader)
    else:
        config = yaml.load(open(config_filename))

    depth_folder = '../' + config['query_depth_folder']
    normal_folder = '../' + config['query_normal_folder']

    # load virtual scans
    query_scan_folder = '../' + config['scan_folder']
    query_scan_paths = utils.load_files(query_scan_folder)

    range_image_params = config['range_image']

    gen_depth_and_normal_query(query_scan_paths[:100], depth_folder,
                               normal_folder, range_image_params)