コード例 #1
0
def rec_human(pipe_img_2, pipe_center, pipe_scale):
    config = flags.FLAGS
    config(sys.argv)
    config.load_path = src.config.PRETRAINED_MODEL
    config.batch_size = 1
    sess = tf.Session()
    model = RunModel(config, sess=sess)
    print(config.smpl_face_path)
    rec_human_count = 0
    rec_human_time = time.time()
    while True:

        img = pipe_img_2.recv()
        center = pipe_center.recv()
        scale = pipe_scale.recv()
        input_img, proc_param = img_util.scale_and_crop(
            img, scale, center, config.img_size)
        input_img = 2 * ((input_img / 255.) - 0.5)
        input_img = np.expand_dims(input_img, 0)
        joints, verts, cams, joints3d, theta = model.predict(input_img,
                                                             get_theta=True)
        cam_for_render, vert_shifted, joints_orig = vis_util.get_original(
            proc_param, verts[0], cams[0], joints[0], img_size=img.shape[:2])
        #print(cam_for_render.shape)
        rec_human_count = rec_human_count + 1
        if rec_human_count == 100:
            print('rec FPS:', 1.0 / ((time.time() - rec_human_time) / 100.0))
            rec_human_count = 0
            rec_human_time = time.time()
コード例 #2
0
def load_tradelogs(
    start: datetime = None,
    end: datetime = None,
    symbols: Optional[Container[str]] = None,
) -> dict[str, list[Trade]]:

    all_trades: defaultdict[str, set[Trade]] = defaultdict(set)

    trade_dir = config()["log"]["ibkr"]

    for fn in trade_dir.glob("*.csv"):
        fn_log = parse_ibkr_report_tradelog(fn)
        for sym, trades in fn_log.items():
            all_trades[sym] |= trades

    tws_log_today = (Path(config()["log"]["tws"]).joinpath(
        f'trades.{date.today().strftime("%Y%m%d")}.csv').expanduser())

    if tws_log_today.exists():
        for k, trades in parse_tws_exported_tradelog(tws_log_today).items():
            all_trades[k] |= trades

    # noinspection PyTypeChecker
    return {
        sym: keep_trades
        for sym, trades in all_trades.items()
        if (keep_trades := sorted(tr for tr in trades
                                  if (end is None or tr.time <= end) and (
                                      start is None or tr.time >= start)))
        if symbols is None or sym in symbols
    }
コード例 #3
0
def visualize(img, proc_param, joints, verts, weights, cam, img_path):
    """
    Renders the result in original image coordinate frame.
    """
    cam_for_render, vert_shifted, joints_orig = vis_util.get_original(
        proc_param, verts, cam, joints, img_size=img.shape[:2])

    folder = '/'.join(img_path.split('/')[0:-1])
    print("FOLDER!!!!!!!!!!!")
    print(folder)

    # Render results
    config = flags.FLAGS
    config(sys.argv)
    # Using pre-trained model, change this to use your own.
    config.load_path = src.config.PRETRAINED_MODEL

    config.batch_size = 1
    renderer = vis_util.SMPLRenderer(face_path=config.smpl_face_path)


    rend_img = renderer(
        vert_shifted, weights, False, cam=cam_for_render, img_size=img.shape[:2])
    rend_img_back = renderer.rotated(
        vert_shifted, weights, False, 180, cam=cam_for_render, img_size=img.shape[:2])

    smplPath = folder + '/weights.png'
    print("Saving Weights picture to:")
    print(smplPath)

    cv2.imwrite(smplPath, rend_img)
    # cv2.imshow('Weights Front',rend_img)        
    # cv2.waitKey(0)

    smplPath = folder + '/weightsBack.png'
    print("Saving Weights Map to:")
    print(smplPath)    

    cv2.imwrite(smplPath, rend_img_back)
    # cv2.imshow('Weights Back',rend_img_back)        
    # cv2.waitKey(0)

    rend_img2 = renderer(
        vert_shifted, weights, True, cam=cam_for_render, img_size=img.shape[:2])
    rend_img_back2 = renderer.rotated(
        vert_shifted, weights, True, 180, cam=cam_for_render, img_size=img.shape[:2])

    smplPath = folder + '/weights2.png'
    print("Saving Skinning indexs picture to:")
    print(smplPath)

    cv2.imwrite(smplPath, rend_img2)
    # cv2.imshow('Skinning indexs  Front',rend_img2)        
    # cv2.waitKey(0)

    smplPath = folder + '/weightsBack2.png'
    print("Saving Skinning indexs  Map to:")
    print(smplPath)    

    cv2.imwrite(smplPath, rend_img_back2)
コード例 #4
0
def rerenders(img_path, proc_param, joints, verts, cam, folder_name):
    try:
        import hmr.src.config
        from hmr.src.util import renderer as vis_util

    except:
        pass

    config = flags.FLAGS
    config(sys.argv)
    # Using pre-trained model, change this to use your own.
    config.load_path = hmr.src.config.PRETRAINED_MODEL

    config.batch_size = 1

    renderer = vis_util.SMPLRenderer(face_path=config.smpl_face_path)

    """
    Renders the result in original image coordinate frame.
    """
    img = io.imread(img_path)
    cam_for_render, vert_shifted, joints_orig = vis_util.get_original(
        proc_param, verts, cam, joints, img_size=img.shape[:2])

    # Render results
    rend_img_overlay = renderer(
        vert_shifted, cam=cam_for_render, img=img, do_alpha=True)
    rend_img_overlay = cv2.cvtColor(rend_img_overlay, cv2.COLOR_BGR2RGB)
    # print("hhhhhhhhhhhhhhhhh/home/ankur/GUI_project/frames/FRAMES_HMR/" + folder_name + img_path.split('/')[-1][:-4]+".png")
    cv2.imwrite("/home/ankur/GUI_project/frames/FRAMES_HMR/" + folder_name + "/" + img_path.split('/')[-1][:-4]+".png",rend_img_overlay)
コード例 #5
0
def setup():
    config = flags.FLAGS
    config(sys.argv)
    # Using pre-trained model, change this to use your own.
    config.load_path = src.config.PRETRAINED_MODEL
    config.batch_size = 1

    sess = tf.Session()
    model = RunModel(config, sess=sess)

    return sess, model, config
コード例 #6
0
ファイル: hmr11.py プロジェクト: gaowq2017/hmr_openpose_zhj
def rec_human(pipe_img_2, pipe_center, pipe_scale, pipe_shape, pipe_kp):
    global last_person
    config = flags.FLAGS
    config(sys.argv)
    config.load_path = src.config.PRETRAINED_MODEL
    config.batch_size = 1
    sess = tf.Session()
    model = RunModel(config, sess=sess)
    rec_human_count = 0
    rec_human_time = time.time()
    #num_render = 1

    while True:

        img = pipe_img_2.recv()
        center = pipe_center.recv()
        scale = pipe_scale.recv()
        person_shape = pipe_shape.recv()
        kp = pipe_kp.recv()

        input_img, proc_param, last_person = img_util.scale_and_crop(
            img, scale, center, person_shape, 0.25, config.img_size,
            last_person)

        cv2.imwrite('/media/ramdisk/input.jpg', input_img)
        print(np.mean(input_img))

        input_img = ((input_img / 255.))

        # input_img = 2 * ((input_img / 255.) - 0.5)

        input_img = np.expand_dims(input_img, 0)
        joints, verts, cams, joints3d, theta = model.predict(input_img,
                                                             get_theta=True)
        #cam_for_render, vert_shifted, joints_orig = vis_util.get_original(proc_param, verts[0], cams[0], joints[0], img_size=img.shape[:2])
        write_obj(smpl_model_used, theta, outmesh_path)
        str_1 = open(outmesh_path, 'rb').read()
        message_id = queue2.sendMessage(delay=0).message(str_1).execute()
        msg2.append(message_id)
        if len(msg2) > 1:
            rt = queue2.deleteMessage(id=msg2[0]).execute()
            del msg2[0]

        rec_human_count = rec_human_count + 1
        if rec_human_count == 100:
            print('rec FPS:', 1.0 / ((time.time() - rec_human_time) / 100.0))
            rec_human_count = 0
            rec_human_time = time.time()
コード例 #7
0
ファイル: flex_query.py プロジェクト: qdbp/tws_autorebalance
def load_raw_query(query: Query) -> StringIO:

    cfg = config()["flex_query"]
    params = {"t": cfg["token"], "q": cfg[query], "v": "3"}

    query_url = f"{QUERY_URL}?{urlencode(params)}"
    raw = urlopen(query_url).read().decode("utf-8")
    tree = fromstring(raw)

    status = tree.find(".//Status")
    if not (status is not None and status.text == "Success"):
        raise IOError(f"Flex service returned error: "  # type: ignore
                      f"{tree.find('.//ErrorMessage').text}")

    flex_base: str = tree.find(".//Url").text  # type: ignore
    code = tree.find(".//ReferenceCode").text  # type: ignore
    payload_url = flex_base + "?" + urlencode(dict(t=cfg["token"], q=code))

    while True:
        raw_payload = urlopen(payload_url).read().decode("utf-8")
        try:
            tree = fromstring(raw_payload)
            payload_status: str = tree.find("code").text  # type: ignore
            if "generation in progress" in payload_status:
                sleep(1)
                continue
        except ParseError:
            break

    return StringIO(raw_payload)
コード例 #8
0
def visualize(img, proc_param, joints, verts, cam, img_path):
    """
    Renders the result in original image coordinate frame.
    """
    cam_for_render, vert_shifted, joints_orig = vis_util.get_original(
        proc_param, verts, cam, joints, img_size=img.shape[:2])

    # folder = '/'.join(img_path.split('/')[0:-1])
    folder = './results'
    print("FOLDER!!!!!!!!!!!")
    print(folder)

    # Render results
    config = flags.FLAGS
    config(sys.argv)
    # Using pre-trained model, change this to use your own.
    config.load_path = src.config.PRETRAINED_MODEL

    config.batch_size = 1
    renderer = vis_util.SMPLRenderer(face_path=config.smpl_face_path)

    # skel_img = vis_util.draw_skeleton(img, joints_orig)
    # rend_img_overlay = renderer(
    # vert_shifted, cam=cam_for_render, img=img, do_alpha=True)
    rend_img = renderer(vert_shifted,
                        cam=cam_for_render,
                        img_size=img.shape[:2])
    # rend_img_vp1 = renderer.rotated(
    #     vert_shifted, 60, cam=cam_for_render, img_size=img.shape[:2])
    rend_img_vp2 = renderer.rotated(vert_shifted,
                                    180,
                                    cam=cam_for_render,
                                    img_size=img.shape[:2])

    smplPath = folder + '/normals.png'
    print("Saving Normals picture to:")
    print(smplPath)

    cv2.imwrite(smplPath, rend_img)
    # cv2.imshow('Normals Front',rend_img)
    # cv2.waitKey(0)

    smplPath = folder + '/normalsBack.png'
    print("Saving Normals Map to:")
    print(smplPath)

    cv2.imwrite(smplPath, rend_img_vp2)
コード例 #9
0
def make_mesh(img_path):
    # this was an old version for Pierlorenzo (~Feb 7-10, 2019)
    if os.path.isfile(outmesh_path):
        sp.call(['rm', outmesh_path])  # b/c old mesh

    config(sys.argv)
    # Using pre-trained model, change this to use your own.
    config.load_path = src.config.PRETRAINED_MODEL

    config.batch_size = 1

    renderer = vis_util.SMPLRenderer(face_path=config.smpl_face_path)

    main(img_path,
         config.json_path)  # here, img_path is a parameter, not from config.
    # NOTE: oughta do this the right way, but right now the .obj file is getting written "backward."  So we just gotta rewrite it.  We do that in the fix() function
    with open(outmesh_path, 'r') as fp:
        return fp.read(
        )  # fp?  fp.read()?  NOTE: Pier said the actual file is preferable to just the string, but I'm not sure what he means by this.  What's the difference between the file and the string contents?
コード例 #10
0
def main(img_path, model_type='ResNet50-HMR', json_path=None):
    config = flags.FLAGS
    config(sys.argv)

    config.load_path = src.config.FULL_PRETRAINED_MODEL
    if model_type == 'ResNet50-HMR':
        second_load_path = None
    elif model_type == 'ResNet50-ImageNet':
        second_load_path = src.config.RESNET_IMAGENET_PRETRAINED_MODEL
    else:
        print('Error. Model type {} is currently not implemented'.format(
            model_type))

    config.batch_size = 1

    tf.reset_default_graph()
    sess = tf.Session()
    model = RunModel(config, second_load_path, sess=sess)

    img_name = str.split(os.path.basename(img_path), '.')[0]
    input_img, proc_param, img = preprocess_image(img_path, config.img_size,
                                                  json_path)
    # Add batch dimension: 1 x D x D x 3
    input_img = np.expand_dims(input_img, 0)

    # Theta is the 85D vector holding [camera, pose, shape]
    # where camera is 3D [s, tx, ty]
    # pose is 72D vector holding the rotation of 24 joints of SMPL in axis angle format
    # shape is 10D shape coefficients of SMPL
    joints, verts, cams, joints3d, theta, layer_activations = model.predict(
        input_img, get_theta=True)

    renderer = vis_util.SMPLRenderer(face_path=config.smpl_face_path)
    pdb.set_trace()
    fig = visualize(img, proc_param, renderer, joints[0], verts[0], cams[0])
    save_dir = 'reconstructions-' + model_type
    make_path(save_dir)
    fig.savefig(os.path.join(save_dir, img_name + '.png'))

    return layer_activations
コード例 #11
0
    def load(cls) -> AutoRebalanceConfig:

        raw_config = config()
        strategy = raw_config["strategy"]

        acct_configs = {
            acct: AutoRebalanceAcctConfig.from_dict(acct, acct_dict)
            for acct, acct_dict in strategy["accounts"].items()
        }

        return cls(
            # raw_config=raw_config,
            accounts=acct_configs,
            **raw_config["settings"],
        )
コード例 #12
0
def main(acct: Acct) -> None:
    args = get_args()

    symbols = {
        sc.symbol
        for sc in Composition.parse_config_section(
            config()["strategy"][acct]["composition"])[0].contracts
    }

    mode: PAttrMode
    for mode in ["shortest", "min_variation"]:  # type: ignore
        attr_set = analyze_trades(args.start, args.end, symbols, mode=mode)
        for symbol in symbols:
            fig, ax = plt.subplots(1, 1)
            attr_set.plot_arrows(symbol,
                                 ax,
                                 start=args.start,
                                 end=args.end + ONE_DAY)
            fig.tight_layout()
            fig.savefig(data_fn(f"{symbol}_trade_plot_{mode}.png"))
            plt.close(fig)

    summarize_closed_positions()
コード例 #13
0
ファイル: hmr12.py プロジェクト: gaowq2017/hmr_openpose_zhj
def rec_human(pipe_img_2, pipe_center, pipe_scale, pipe_kp):
    config = flags.FLAGS
    config(sys.argv)
    config.load_path = src.config.PRETRAINED_MODEL
    config.batch_size = 1
    sess = tf.Session()
    model = RunModel(config, sess=sess)
    rec_human_count = 0
    rec_human_time = time.time()
    num_render = 1

    while True:

        img = pipe_img_2.recv()
        center = pipe_center.recv()
        scale = pipe_scale.recv()
        kp = pipe_kp.recv()
        input_img, proc_param = img_util.scale_and_crop(
            img, scale, center, config.img_size)
        input_img = 2 * ((input_img / 255.) - 0.5)
        input_img = np.expand_dims(input_img, 0)
        joints, verts, cams, joints3d, theta = model.predict(input_img,
                                                             get_theta=True)
        cam_for_render, vert_shifted, joints_orig = vis_util.get_original(
            proc_param, verts[0], cams[0], joints[0], img_size=img.shape[:2])

        print(111111)
        if num_render == 1:
            np.save('/media/ramdisk/render_data/1/cam_for_render.npy',
                    cam_for_render)
            np.save('/media/ramdisk/render_data/1/vert_shifted.npy',
                    vert_shifted)
            np.save('/media/ramdisk/render_data/1/kp.npy', kp)
            #cv2.imwrite('/media/ramdisk/render_data/1/kp.jpg',kp)
            #print(kp.shape)
            num_render = 2
        if num_render == 2:
            np.save('/media/ramdisk/render_data/2/cam_for_render.npy',
                    cam_for_render)
            np.save('/media/ramdisk/render_data/2/vert_shifted.npy',
                    vert_shifted)
            np.save('/media/ramdisk/render_data/2/kp.npy', kp)
            #cv2.imwrite('/media/ramdisk/render_data/2/kp.jpg',kp)
            num_render = 3
        if num_render == 3:
            np.save('/media/ramdisk/render_data/3/cam_for_render.npy',
                    cam_for_render)
            np.save('/media/ramdisk/render_data/3/vert_shifted.npy',
                    vert_shifted)
            np.save('/media/ramdisk/render_data/3/kp.npy', kp)
            #cv2.imwrite('/media/ramdisk/render_data/3/kp.jpg',kp)
            num_render = 4
        if num_render == 4:
            np.save('/media/ramdisk/render_data/4/cam_for_render.npy',
                    cam_for_render)
            np.save('/media/ramdisk/render_data/4/vert_shifted.npy',
                    vert_shifted)
            np.save('/media/ramdisk/render_data/4/kp.npy', kp)
            #cv2.imwrite('/media/ramdisk/render_data/4/kp.jpg',kp)
            num_render = 1

        rec_human_count = rec_human_count + 1
        if rec_human_count == 100:
            print('rec FPS:', 1.0 / ((time.time() - rec_human_time) / 100.0))
            rec_human_count = 0
            rec_human_time = time.time()
コード例 #14
0
ファイル: demo.py プロジェクト: siripoojitha/project
    visualize(img_path, img, proc_param, joints[0], verts[0], cams[0])


def join_csv():
    path = 'hmr/output/csv/'
    all_files = glob.glob(os.path.join(path, "*.csv"))

    df_from_each_file = (pd.read_csv(f) for f in sorted(all_files))
    concatenated_df = pd.concat(df_from_each_file, ignore_index=True)

    concatenated_df['frame'] = concatenated_df.index + 1
    concatenated_df.to_csv("hmr/output/csv_joined/csv_joined.csv", index=False)


config = flags.FLAGS
config(['hmr/demo.py'])


def get_model(path=None):
    # Using pre-trained model, change this to use your own.
    print([src.config.PRETRAINED_MODEL, path])
    if path is None:
        config.load_path = src.config.PRETRAINED_MODEL
    else:
        config.load_path = path + 'model.ckpt-667589'
    config.batch_size = 1
    sess = tf.Session()
    model = RunModel(config, sess=sess)
    return model

コード例 #15
0
ファイル: tax.py プロジェクト: qdbp/tws_autorebalance
                (lot for lot in self.lots[acct] if lot.symbol == trade.symbol),
                key=sort_key_asc,
            )[::-1],
        )


if __name__ == "__main__":
    state_margin = 0.055
    fed_margin = 0.22
    fed_lt = 0.15

    lt = fed_lt + state_margin
    st = fed_margin + state_margin

    taxer = StLtTaxProvider(st_rate=st, lt_rate=lt)
    selector = LotSelector.parse_flex_df(
        load_query("lots", reload=False), taxer
    )

    print(taxer)

    trade = Trade(
        symbol="XLI", qty=-10, dt=datetime.now(tz=TZ_EASTERN), price=93.49
    )
    acct = min(config()["strategy"]["accounts"].keys())
    print(acct)

    pprint(list(selector.find_best_lot_heuristic(acct, trade)))

    print("foo")
コード例 #16
0
ファイル: generate_3DMSMT_aligned.py プロジェクト: layumi/hmr
                vy /= camera[0,0] 
                vy -= camera[:, 1]
                vx /= camera[0,0] 
                vx -= camera[:, 2]
                vz = np.mean(verts[:,2])
                c = img[i,j,:]/255.0
                fp.write( 'v %f %f %f %f %f %f\n' % ( vy, vx, vz, c[0], c[1], c[2]) )
                background[i,j] = index
                index +=1

        for f in faces: # Faces are 1-based, not 0-based in obj files
            fp.write( 'f %d %d %d\n' %  (f[0] + 1, f[1] + 1, f[2] + 1) )
            break  # skip for small file
        #count = 0
        #for i in range(1,w):
        #    for j in range(1,h): 
        #        fp.write( 'f %d %d %d %d\n' % (background[i,j], background[i-1,j] ,background[i,j-1] , background[i-1, j-1]))


if __name__ == '__main__':
    config = flags.FLAGS
    config(sys.argv)
    # Using pre-trained model, change this to use your own.
    config.load_path = src.config.PRETRAINED_MODEL

    config.batch_size = 1

    renderer = vis_util.SMPLRenderer(face_path=config.smpl_face_path)

    main(config.market_path, config.json_path)
コード例 #17
0
def predict(image, weight, height):
    global config, renderer

    config = flags.FLAGS
    config(sys.argv)
    # Using pre-trained model, change this to use your own.
    config.load_path = src.config.PRETRAINED_MODEL
    config.batch_size = 1
    renderer = vis_util.SMPLRenderer(face_path=config.smpl_face_path)

    tf.reset_default_graph()
    sess = tf.Session()
    model = RunModel(config, sess=sess)

    input_img, proc_param, img = preprocess_image_V2(image)
    # Add batch dimension: 1 x D x D x 3
    input_img = np.expand_dims(input_img, 0)

    joints, verts, cams, joints3d, theta = model.predict(input_img,
                                                         get_theta=True)
    sess.close()

    cams = theta[:, :model.num_cam]
    poses = theta[:, model.num_cam:(model.num_cam + model.num_theta)]
    shapes = theta[:, (model.num_cam + model.num_theta):]

    viz_result = visualize(img, proc_param, joints[0], verts[0], cams[0])
    '''
    Start adjusting the shape
    '''
    shape_adjuster = torch.load("./trained/model_release_1.5961363467")
    smpl = SMPL("./models/neutral_smpl_with_cocoplus_reg.pkl")

    beta = torch.from_numpy(shapes).float().cuda()
    theta = torch.zeros((1, 72)).float().cuda()
    heights = torch.from_numpy(np.asarray([height]))
    volume = torch.from_numpy(np.asarray([weight]))

    verts, joints3d, Rs = smpl.forward(beta, theta, True)
    flatten_joints3d = joints3d.view(1, -1)
    heights = torch.unsqueeze(heights, -1).float().cuda()
    volumes = torch.unsqueeze(volume, -1).float().cuda()
    input_to_net = torch.cat((flatten_joints3d, heights, volumes), 1)

    adjusted_betas = shape_adjuster.forward(input_to_net)

    adjusted_verts, adjusted_joints3d, Rs = smpl.forward(
        adjusted_betas, theta, True)
    adjusted_heights = measure.compute_height(adjusted_verts)
    adjusted_volumes = measure.compute_volume(adjusted_verts, smpl.f)

    print(adjusted_heights, adjusted_volumes)

    #  debug_display_cloud(verts[0], joints3d[0], adjusted_verts[0], adjusted_joints3d[0])

    # Change the posture for measurement
    from measurement import POSE1
    theta = torch.from_numpy(np.expand_dims(POSE1, 0)).float().cuda()
    m_adjusted_verts, adjusted_joints3d, Rs = smpl.forward(
        adjusted_betas, theta, True)
    return viz_result, \
           torch.squeeze(verts).detach().cpu().numpy(), \
           torch.squeeze(adjusted_verts).detach().cpu().numpy(), \
           torch.squeeze(m_adjusted_verts).detach().cpu().numpy(), \
           torch.squeeze(adjusted_volumes).detach().cpu().numpy(),\
           torch.squeeze(adjusted_heights).detach().cpu().numpy(),