Пример #1
0
def read_surface_data(sname):
    from utils.io import read_json
    surf = read_surface(sname + ".xyz")
    ipcs = read_json(sname + ".json")
    surf.space_group = ipcs["space_group"]
    surf.space_group_ref = ipcs["space_group_ref"]
    surf.unit_cell = ipcs["unit_cell"]
    surf.fix = ipcs.get("fix", "")
    return surf
Пример #2
0
  def __init__(self, opts):
    super(MetaSim, self).__init__()
    self.opts = opts
    self.cfg = read_json(opts['config'])
    
    g = get_generator(opts['dataset'])
    self.generator = g(self.cfg)

    self.init_model()
Пример #3
0
    def __init__(self, path):
        self._identifier = os.path.basename(path)
        self._path = path

        # load config json
        self._config = read_json(os.path.join(self._path, 'config.json'))

        # load csv
        self._log = read_keras_csv_logfile(os.path.join(self._path, 'log.csv'))
Пример #4
0
    def read(self):
        zf = zipfile.ZipFile(self.input_file, 'r')
        print ("## %s" % self.filename)
        namel = zf.namelist()
        tree = FileItem("/")
        for l in namel:
            lf = FileItem(l)
            k = tree
            for i in lf.parents: k = k[i]
            k[lf.name] = lf
        tomasters = tree["tomaster"].path_names()
        masters = tree["master"].path_names()
        for m in masters:
            mfiles = tree["master"][m].file_names()
            for f in mfiles:
                fn = tree["master"][m][f].path
                idx = f.split('.')[2]
                xname = m + '.' + idx
                if f.startswith("par_structs") and not self.read_traj: continue
                if f.startswith("par_log"):
                    self.xdata.log[m + '.'] = zf.open(fn, 'rU').readlines()
                for key in ["props", "runtime"]:
                    if f.startswith("par_" + key):
                        self.xdata[key][xname] = json.loads(zf.open(fn, 'rU').read())
                for key in ["structs", "sources", "local", "disp", "dupl", "max", "filter"]:
                    if f.startswith("par_" + key):
                        self.xdata[key][xname] = read_clusters(fn, zf=zf, iprint=True)
                # fil_structs ignored

        for m in tomasters:
            mfiles = tree["tomaster"][m].file_names()
            for f in mfiles:
                fn = tree["tomaster"][m][f].path
                if f.startswith("para.json"):
                    xname = m.split('-')[1] + '.'
                    self.xdata.input[xname] = read_json(zf.open(fn, 'rU').read(),
                                                        cont=True, iprint=False)
                elif f.startswith("meta_info"):
                    self.metas.append(zf.open(fn, 'rU').readlines())

        self.surf_locator = {}
        self.cmd_hist = []
        for k in tree.subs.keys():
            if k.endswith(".xyz"):
                fn = tree[k].path
                self.surf_locator[k] = read_surface(fn, zf=zf, iprint=False)
            if k == "CMD-HISTORY":
                fn = tree[k].path
                self.cmd_hist = [m.strip() for m in zf.open(fn, 'rU').readlines()]
        print ("data loading finished")
Пример #5
0
    def __getitem__(self, index):

        # load image
        img_path = self.index['rgba'][index].decode('utf8')
        img = sio.imread(img_path).astype(np.float32)
        img /= 255.0

        # read joint positions
        json_path = self.index['json'][index].decode('utf8')
        data = io.read_json(json_path)
        p2d, p3d = self._process_points(data)

        # get action name
        action = data['action']

        if self.transform:
            img = self.transform({'image': img})['image']
            p3d = self.transform({'joints3D': p3d})['joints3D']
            p2d = self.transform({'joints2D': p2d})['joints2D']

        return img, p2d, p3d, action
Пример #6
0
 def pull_anno(self, index):
     data_id = self.files[index]
     target = io.read_json(data_id + '.json')
     return target[0]['obj_class']

def get_args():
    parser = argparse.ArgumentParser()

    parser.add_argument('--config', type=str, required=True)

    return parser.parse_args()


def generate_data(config):
    attr = config['attributes']
    generator_class = get_generator(attr['dataset'])
    generator = generator_class(config)

    # vars and housekeeping
    out_dir = attr['output_dir']
    n_samples = attr['num_samples']

    # out directory
    io.makedirs(out_dir)
    io.write_json(config, os.path.join(out_dir, 'config.json'))

    # generate
    io.generate_data(generator, out_dir, n_samples)


if __name__ == '__main__':
    args = get_args()
    config = io.read_json(args.config)
    generate_data(config)
Пример #8
0
def main():
    # parse args --------------------------------------------------------------
    args = _parse_args()

    # set device and data format ----------------------------------------------
    if args.cpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = ''
        args.devices = ''
    else:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.devices
    if not args.devices or args.model == 'mobilenet_v2':
        # note: tensorflow supports b01c pooling on cpu only
        K.set_image_data_format('channels_last')
    else:
        K.set_image_data_format('channels_first')

    # set dtype ---------------------------------------------------------------
    K.set_floatx(args.dtype)

    # create random data ------------------------------------------------------
    if args.verbose:
        print("Generating random data ...")

    # helper function
    def generate_data(data_shape):
        return np.random.random(data_shape).astype(args.dtype)

    # data for timing
    data = []
    for _ in range(args.n_repetitions):
        single_input = []
        if args.input_type in [INPUT_DEPTH, INPUT_DEPTH_AND_RGB]:
            shape = _get_shape(args.batch_size, args.input_height,
                               args.input_width, 1)
            single_input.append(generate_data(shape))
        if args.input_type in [INPUT_RGB, INPUT_DEPTH_AND_RGB]:
            shape = _get_shape(args.batch_size, args.input_height,
                               args.input_width, 3)
            single_input.append(generate_data(shape))
        data.append(single_input)

    # data initial runs
    data_initial = []
    for _ in range(args.n_repetitions):
        single_input_initial = []
        if args.input_type in [INPUT_DEPTH, INPUT_DEPTH_AND_RGB]:
            shape = _get_shape(args.batch_size, args.input_height,
                               args.input_width, 1)
            single_input_initial.append(generate_data(shape))
        if args.input_type in [INPUT_RGB, INPUT_DEPTH_AND_RGB]:
            shape = _get_shape(args.batch_size, args.input_height,
                               args.input_width, 3)
            single_input_initial.append(generate_data(shape))
        data_initial.append(single_input_initial)

    # time prediction on random data ------------------------------------------
    fps = []
    if args.optimize is not None:
        # use TensorRT
        # convert to frozen graph
        if args.verbose:
            print("Creating frozen graph ...")
        model_filepath = './model.pb'
        subprocess.check_call(
            'python freeze_graph.py {} {}'
            ' --input_type {}'
            ' --input_height {}'
            ' --input_width {}'
            ' --output_type {}'
            ' --n_classes {}'
            ' --kappa {}'
            ' --mobilenet_v2_alpha {}'
            ' --devices {}'
            ' --dtype {}'.format(args.model, model_filepath, args.input_type,
                                 args.input_height, args.input_width,
                                 args.output_type, args.n_classes, args.kappa,
                                 args.mobilenet_v2_alpha, args.devices or '""',
                                 args.dtype),
            shell=True,
            cwd=os.path.dirname(__file__) or './')

        # load frozen graph
        if args.verbose:
            print("Loading frozen graph ...")

        graph_def = _load_frozen_graph_def(model_filepath)
        names = read_json(model_filepath + '.json')

        if args.optimize == 'trt':
            # optimize using TensorRT
            import tensorflow.contrib.tensorrt as trt

            graph_def = trt.create_inference_graph(
                input_graph_def=graph_def,
                outputs=names['output_names'],
                max_batch_size=args.batch_size,
                max_workspace_size_bytes=1 << 25,
                precision_mode='FP16' if args.dtype == 'float16' else 'FP32',
                minimum_segment_size=50)

        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        sess = tf.Session(config=config)

        tf.import_graph_def(graph_def, name='')

        input_tensors = []
        for name in names['input_names']:
            input_tensors.append(sess.graph.get_tensor_by_name(name + ':0'))

        output_tensor = sess.graph.get_tensor_by_name(
            names['output_names'][0] + ':0')

        # some dry runs
        for d in data_initial:
            output = sess.run(
                output_tensor,
                feed_dict={t: v
                           for t, v in zip(input_tensors, d)})

        # time inference
        for d in data:
            t1 = time()
            output = sess.run(
                output_tensor,
                feed_dict={t: v
                           for t, v in zip(input_tensors, d)})
            fps.append(1 / (time() - t1))

    else:
        # use Keras and Tensorflow

        # load model
        if args.verbose:
            print("Loading model ...")
        model_module = globals()[args.model]
        model_kwargs = {}
        if args.model == 'mobilenet_v2':
            model_kwargs['alpha'] = args.mobilenet_v2_alpha
        model = model_module.get_model(input_type=args.input_type,
                                       input_shape=(args.input_height,
                                                    args.input_width),
                                       output_type=args.output_type,
                                       n_classes=args.n_classes,
                                       sampling=False,
                                       **model_kwargs)

        # some dry runs
        for d in data_initial:
            model.predict(d, batch_size=args.batch_size)

        # time inference
        for d in data:
            t1 = time()
            model.predict(d, batch_size=args.batch_size)
            fps.append(1 / (time() - t1))

    mean = np.mean(fps)
    std = np.std(fps)
    model_str = args.model
    size_str = '{}x{}x{}'.format(args.batch_size, args.input_height,
                                 args.input_width)
    if args.model == 'mobilnet_v2':
        model_str += "_{:0.2f}".format(args.mobilenet_v2_alpha).replace(
            '.', '_')
    print("FPS ({}, {}, {}, {}, {}, opt: {}, gpu id: {}, mean of {} runs): "
          "{:.1f}+-{:.1f}".format(model_str, args.output_type, args.input_type,
                                  size_str, args.dtype, args.optimize,
                                  args.devices or '-1', args.n_repetitions,
                                  mean, std))
Пример #9
0
    def __getattr__(self, attr):
        return getattr(self.stream, attr)


sys.stdout = Unbuffered(sys.stdout)

from utils.io import read_json, write_json
from utils.io import load_data, dump_data, new_file_name, load_name

if __name__ != "__main__":
    raise RuntimeError('Must run this file as the main program!')

if len(sys.argv) < 1 + 1:
    raise RuntimeError('Need input file!')

ip = read_json(sys.argv[1])

theano_flags = []
if _platform == 'darwin':
    theano_flags.append("cxx=/usr/local/bin/g++-5")
if 'OMP_NUM_THREADS' in os.environ:
    print('number of openmp threads: {}'.format(os.environ['OMP_NUM_THREADS']))
    theano_flags.append("openmp=True")
    theano_flags.append("openmp_elemwise_minsize=100000")
if 'gpu' in ip and ip['gpu'] is not False:
    theano_flags.append("device=" + ip['gpu'])
    theano_flags.append("lib.cnmem=1")
    theano_flags.append("floatX=float32")
if len(theano_flags) != 0:
    if 'THEANO_FLAGS' in os.environ:
        os.environ['THEANO_FLAGS'] += ',' + ','.join(theano_flags)
Пример #10
0
    def __getitem__(self, index):

        # load image
        img_path = self.index['rgba'][index].decode('utf8')
        #################### DATASET PATH 수정해줘야함. H5 파일에 고정된 DIRECTORY로 저장되어 있는 것 같은데, 나는 고정된 DIRECTORY를 사용할 수가 없어서
        # img_path = "/SSD/xR-EgoPose/" + img_path
        ####################
        img = sio.imread(img_path).astype(np.float32)
        img /= 255.0
        #################### Fisheye Camera라서 중간에 렌즈부분 맞춰서 Crop
        img = img[0 + 32:800 - 32, 250 + 32:1050 - 32, :]  # (y, x)임
        ####################
        #################### Input Size 맞추기 위해서 1/2 Bicubic Downsampling
        img = stransform.downscale_local_mean(img, (2, 2, 1))
        ####################

        # read joint positions
        json_path = self.index['json'][index].decode('utf8')
        #################### DATASET PATH 수정해줘야함. H5 파일에 고정된 DIRECTORY로 저장되어 있는 것 같은데, 나는 고정된 DIRECTORY를 사용할 수가 없어서
        # json_path = "/SSD/xR-EgoPose/" + json_path
        ####################
        data = io.read_json(json_path)
        p2d, p3d = self._process_points(data)

        #################### 2D Image에 Labeled Keypoint 찍어보기 위한 Test Code
        # img *= 255.0
        # for (x, y) in p2d:
        #     for i in range(int(x-10), int(x+10)):
        #         for j in range(int(y-10), int(y+10)):
        #             img[j, i, :] = 0
        # sio.imsave("./test2DPose.png", img)
        ####################
        #################### 3D Space에 Labeled Keypoint 찍어보기 위한 Test Code
        # p3d *= 100
        # # This import registers the 3D projection, but is otherwise unused.
        # from mpl_toolkits.mplot3d import Axes3D  # noqa: F401 unused import
        # import matplotlib.pyplot as plt
        # # for scattering point
        # fig = plt.figure()
        # ax = fig.add_subplot(111, projection='3d')
        # for idx, point in enumerate(p3d):
        #     if idx < 14:
        #         ax.scatter(point[1], point[0], point[2], c='#FF0000')
        #     else:
        #         ax.scatter(point[1], point[0], point[2], c='#0000FF')
        # # for connecting point by line
        # ax.plot([p3d[5][1], p3d[4][1]], [p3d[5][0], p3d[4][0]], [p3d[5][2], p3d[4][2]], c='#FF0000')  # Head -> Neck
        # ax.plot([p3d[4][1], p3d[7][1]], [p3d[4][0], p3d[7][0]], [p3d[4][2], p3d[7][2]], c='#FF0000')  # Neck -> Left Arm
        # ax.plot([p3d[7][1], p3d[8][1]], [p3d[7][0], p3d[8][0]], [p3d[7][2], p3d[8][2]], c='#FF0000')  # Left Arm -> Left Elbow
        # ax.plot([p3d[8][1], p3d[9][1]], [p3d[8][0], p3d[9][0]], [p3d[8][2], p3d[9][2]], c='#FF0000')  # Left Elbow -> Left Hand
        # ax.plot([p3d[4][1], p3d[11][1]], [p3d[4][0], p3d[11][0]], [p3d[4][2], p3d[11][2]], c='#FF0000')  # Neck -> Right Arm
        # ax.plot([p3d[11][1], p3d[12][1]], [p3d[11][0], p3d[12][0]], [p3d[11][2], p3d[12][2]], c='#FF0000')  # Right Arm -> Right Elbow
        # ax.plot([p3d[12][1], p3d[13][1]], [p3d[12][0], p3d[13][0]], [p3d[12][2], p3d[13][2]], c='#FF0000')  # Right Elbow -> Right Hand
        # ax.plot([p3d[4][1], p3d[0][1]], [p3d[4][0], p3d[0][0]], [p3d[4][2], p3d[0][2]], c='#FF0000')  # Neck -> Hip
        # ax.plot([p3d[0][1], p3d[14][1]], [p3d[0][0], p3d[14][0]], [p3d[0][2], p3d[14][2]], c='#0000FF')  # Hip -> Left Leg
        # ax.plot([p3d[14][1], p3d[15][1]], [p3d[14][0], p3d[15][0]], [p3d[14][2], p3d[15][2]], c='#0000FF')  # Left Leg -> Left Knee
        # ax.plot([p3d[15][1], p3d[16][1]], [p3d[15][0], p3d[16][0]], [p3d[15][2], p3d[16][2]], c='#0000FF')  # Left Knee -> Left Foot
        # ax.plot([p3d[16][1], p3d[17][1]], [p3d[16][0], p3d[17][0]], [p3d[16][2], p3d[17][2]], c='#0000FF')  # Left Foot -> Left Toe
        # ax.plot([p3d[0][1], p3d[18][1]], [p3d[0][0], p3d[18][0]], [p3d[0][2], p3d[18][2]], c='#0000FF')  # Hip -> Right Leg
        # ax.plot([p3d[18][1], p3d[19][1]], [p3d[18][0], p3d[19][0]], [p3d[18][2], p3d[19][2]], c='#0000FF')  # Right Leg -> Right Knee
        # ax.plot([p3d[19][1], p3d[20][1]], [p3d[19][0], p3d[20][0]], [p3d[19][2], p3d[20][2]], c='#0000FF')  # Right Knee -> Right Foot
        # ax.plot([p3d[20][1], p3d[21][1]], [p3d[20][0], p3d[21][0]], [p3d[20][2], p3d[21][2]], c='#0000FF')  # Right Foot -> Right Toe
        # # set legend & save
        # ax.set_xlabel('X Label')
        # ax.set_ylabel('Y Label')
        # ax.set_zlabel('Z Label')
        # plt.savefig("./test3DPose.png")
        ####################

        # get action name
        action = data['action']

        if self.transform:
            img = self.transform({'image': img})['image']
            p3d = self.transform({'joints3D': p3d})['joints3D']
            p2d = self.transform({'joints2D': p2d})['joints2D']

        #################### Keypoint 논문에 정의된 15개로 압축하고 p2d에서 manual하게 넣어준 후에 Heatmap 생성
        keypoints = np.zeros((15, 2))
        keypoints[0] = p2d[4]  # Neck
        keypoints[1] = p2d[7]  # Left Arm
        keypoints[2] = p2d[8]  # Left Elbow
        keypoints[3] = p2d[9]  # Left Hand
        keypoints[4] = p2d[11]  # Right Arm
        keypoints[5] = p2d[12]  # Right Elbow
        keypoints[6] = p2d[13]  # Right Hand
        keypoints[7] = p2d[14]  # Left Leg
        keypoints[8] = p2d[15]  # Left Knee
        keypoints[9] = p2d[16]  # Left Foot
        keypoints[10] = p2d[17]  # Left Toe
        keypoints[11] = p2d[18]  # Right Leg
        keypoints[12] = p2d[19]  # Right Knee
        keypoints[13] = p2d[20]  # Right Foot
        keypoints[14] = p2d[21]  # Right Toe
        heatmap = self.generateHeatmap(keypoints)
        ####################
        ################### 생성된 Heatmap Visualization을 위한 코드
        # heatmap = np.sum(heatmap, axis=0)
        # from PIL import Image
        # img = Image.fromarray(np.uint8(heatmap*255.0), 'L')
        # img.save("./test2DHeatmap.png")
        ###################

        return img, p2d, p3d, action, heatmap.astype(np.float32)