Exemple #1
0
    def act(self,
            inputs,
            rnn_hxs,
            masks,
            mean_entropy,
            min_beta,
            max_beta,
            current_beta_range,
            device,
            flatness,
            beta,
            deterministic=False):
        value, actor_features, rnn_hxs, x = self.base(inputs, rnn_hxs, masks)
        dist = self.dist(actor_features)
        dist_entropy = dist.entropy()
        beta = get_beta(device, dist_entropy, mean_entropy, min_beta, max_beta,
                        current_beta_range, flatness, beta)
        dist = self.dist(actor_features * beta)

        if deterministic:
            action = dist.mode()
        else:
            action = dist.sample()
        action_log_probs = dist.log_probs(action)
        return value, action, action_log_probs, rnn_hxs, dist_entropy, beta
Exemple #2
0
def test_gpu(gpu_id=[0]):
    if len(gpu_id) > 0 and torch.cuda.is_available():
        os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id[0])
        device = torch.device('cuda')
    else:
        device = torch.device('cpu')
    print(device)

    pose_size = 72
    beta_size = 10

    np.random.seed(9608)
    pose = torch.from_numpy(get_theta('00001'))\
            .type(torch.float64).to(device)
    betas = torch.from_numpy(get_beta('00001')) \
            .type(torch.float64).to(device)
    trans = torch.from_numpy(get_trans('00001')).type(torch.float64).to(device)
    outmesh_path = './smpl_torch.obj'

    model = SMPLModel(device=device)
    result = model(betas, pose, trans)

    verts_numpy = result.cpu().numpy()
    faces_numpy = model.faces

    np.save('../../resault/verts.npy', verts_numpy)
    np.save('../../resault/faces.npy', faces_numpy)
Exemple #3
0
def smplTest():
    smpl = smpl_torch.SMPLModel(device)

    pose = torch.from_numpy(get_theta('00000')).type(torch.float64).to(device)
    beta = torch.from_numpy(get_beta('00000')) \
          .type(torch.float64).to(device)
    trans = torch.from_numpy(get_trans('00000')).type(torch.float64).to(device)

    res = smpl(beta, pose, trans)

    J = smpl.J_regressor
    J = J.mm(res)

    print(J)
def beta(ticker):
    yf_ticker = escape(ticker) + ".SA"
    interval = (365 if request.args.get("interval") is None else int(
        request.args.get("interval")))
    start, end = get_interval(interval)
    benchmark = "^BVSP"
    tickers = [yf_ticker, benchmark]
    df = get_data(tickers=tickers, columns=["Adj Close"], start=start,
                  end=end)["Adj Close"]
    df.dropna(inplace=True)
    beta, corr, std_asset, std_bench = get_beta(df[yf_ticker], df[benchmark])
    return jsonify({
        "beta": round(beta, 2),
        "corr": round(corr, 2),
        "std_asset": round(std_asset, 4),
        "std_bench": round(std_bench, 4),
    })
Exemple #5
0
def smpl_plot():
    smpl = smpl_np.SMPLModel('../smpl/model.pkl')
    np.random.seed(9608)
    pose = get_theta('00000')
    beta = get_beta('00000')
    trans = get_trans('00000')
    smpl.set_params(beta=beta, pose=pose, trans=trans)
    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')

    mesh = Poly3DCollection(smpl.verts[smpl.faces], alpha=0.05)
    mesh.set_edgecolor((0.3,0.3,0.3))
    mesh.set_facecolor((0.7,0.7,0.7))
    ax.add_collection3d(mesh)

    J = smpl.J_regressor.dot(smpl.verts)
    print(J)
    print(J.shape, type(J))
    print(smpl.J_regressor.shape)
    for j in range(len(J)):
        pos1 = J[j]
        ax.scatter3D([pos1[0]], [pos1[1]], [pos1[2]], label=f"{j}")
    plt.legend()
    plt.show()
Exemple #6
0
def main(img_path, json_path=None):

    input_img, proc_param, img = preprocess_image(img_path, json_path)
    # Add batch dimension: 1 x D x D x 3
    input_img = np.expand_dims(input_img, 0)

    # Theta is the 85D vector holding [camera, pose, shape]
    # where camera is 3D [s, tx, ty]
    # pose is 72D vector holding the rotation of 24 joints of SMPL in axis angle format
    # shape is 10D shape coefficients of SMPL
    device = torch.device('cuda', 0)
    smpl = SMPL('../../smpl/model_cocoplus.pkl', obj_saveable=True).to(device)
    pose = get_theta('00001')

    beta = get_beta('00001')

    vbeta = torch.tensor(np.array([beta])).float().to(device)
    vpose = torch.tensor(np.array([pose])).float().to(device)
    vcam = torch.tensor([0.9, 0, 0]).expand((1, 3)).float().to(device)

    verts, joints, _ = smpl(vbeta, vpose, get_skin=True)

    pred_kp = batch_orth_proj_idrot(joints.cpu(), vcam.cpu())

    r = torch.ones((1, 3))

    verts, joints, r = verts.cpu().numpy(), pred_kp.cpu().numpy(), vcam.cpu(
    ).numpy()

    print(img.shape)

    print(type(joints), type(verts), type(r))

    print(joints[0].shape, verts[0].shape, r[0].shape)

    visualize(img, proc_param, joints[0], verts[0], r[0])
Exemple #7
0
# readin data
statement_dic = load_statement(statement_dir)
debt_equity_market = pd.read_pickle(debt_equity_market_path).astype(np.float64)
risk_free_rate = pd.read_pickle(risk_free_rate_path).astype(np.float64)
price_market = pd.read_pickle(price_market_path).astype(np.float64)
price_rdsb = pd.read_pickle(price_rdsb_path).astype(
    np.float64).loc["2007-06-18":, ]
price_oil = pd.read_pickle(price_oil_path).astype(np.float64)

# not related to individual company
return_market = price_market.pct_change()
# plot_time_series(price_oil, 'Brent oil price', img_save=os.path.join('image', 'oil_price'))
# plot_time_series(return_market, 'Market Return', img_save=os.path.join('image', 'market_return'))
# plot_time_series(price_market, 'Dow Jone Index price', marker=None, img_save=os.path.join('image', 'dow_jone_index'))
# plot_time_series(price_rdsb, 'RDS-B', marker=None, img_save=os.path.join('image', 'RDS-B'))
get_beta(benchmark=price_market, stock=price_rdsb)

for ticker, statement in statement_dic.items():
    image_dir = os.path.join('image', ticker)
    os.makedirs(image_dir, exist_ok=True)

    # Show accounts in statement
    # print_stat_account(statement['income_statement'])
    print_stat_account(statement['financial_position'])
    # print_stat_account(statement['cashflow_statement'])

    # B/S
    capital_employed = cal_financial_data(statement['financial_position'],
                                          'capital_employed')
    long_term_assets = cal_financial_data(statement['financial_position'],
                                          'long_term_assets')
Exemple #8
0
        joints = torch.stack([joint_x, joint_y, joint_z], dim=2)

        if get_skin:
            return verts, joints, Rs
        else:
            return joints


if __name__ == '__main__':
    device = torch.device('cuda', 0)

    smpl = SMPL('../../smpl/model_cocoplus.pkl', obj_saveable=True).to(device)
    pose = get_theta('00002')
    pose[:3] = 0
    beta = get_beta('00002')
    cam = np.array([0.9, 0, 0])

    vbeta = torch.tensor(np.array([beta])).float().to(device)
    vpose = torch.tensor(np.array([pose])).float().to(device)
    vcam = torch.tensor(np.array([cam])).float().to(device)

    verts, j, r = smpl(vbeta, vpose, get_skin=True)
    smpl.save_obj(verts[0].cpu().numpy(), './mesh.obj')
    verts_numpy = verts.cpu().numpy()
    faces_numpy = smpl.faces
    np.save('../../resault/verts.npy', verts_numpy)
    np.save('../../resault/faces.npy', faces_numpy)

    # rpose = reflect_pose(pose)
    # vpose = torch.tensor(np.array([rpose])).float().to(device)
Exemple #9
0
def main():
    args = parse_arguments()
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"  # see issue #152
    os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu_id)

    if args.use_stft:
        Xtr_load_nm = "Xtr_STFT.npy"
        Xva_load_nm = "Xva_STFT.npy"
    elif args.use_log_mel:
        Xtr_load_nm = "Xtr_log_Mel.npy"
        Xva_load_nm = "Xva_log_Mel.npy"
    elif args.use_mfcc:
        Xtr_load_nm = "Xtr_MFCC.npy"
        Xva_load_nm = "Xva_MFCC.npy"

    # Load training dictionary
    Xtr = np.load(Xtr_load_nm)

    # Training datset
    truncate_len = len(Xtr) % args.segment_len
    np.random.seed(42)
    Xtr_shuffled = Xtr[np.random.permutation(len(Xtr))][:-truncate_len]
    # TODO: Continue shuffling per epoch, or remove it
    Ntr, n_features = Xtr_shuffled.shape

    # Validation dataset
    Xva = np.load(Xva_load_nm)
    truncate_len = len(Xva) % args.segment_len
    Xva = Xva[:-truncate_len]

    tol = 1 / (args.segment_len**2)

    # Load previous model if given
    if args.load_model:
        projections = list(np.load("{}_projs.npy".format(args.load_model)))
        proj_losses = load_pkl("{}_projlosses.pkl".format(args.load_model))
        wip1 = np.load("{}_wip1.npy".format(args.load_model))
        betas = list(np.load("{}_betas.npy".format(args.load_model)))
        m_start = len(projections)
        model_nm = args.load_model
    else:
        m_start = 0
        wip1 = np.ones((Ntr, args.segment_len), dtype=np.float32)
        wip1_init_val = np.float32(1 / (args.segment_len * args.segment_len))
        wip1 *= wip1_init_val
        print("Observation weights\n\tShape: {}\n\tValues: {}".format(
            wip1.shape, wip1[0, 0]))
        projections = []
        proj_losses = []
        betas = []
        model_nm = "proj[n={}]_feat[{}]_kern[{}_{}]_lr[{:.0e}]_bias[{}]".format(
            len(projections),
            Xtr_load_nm.split('.')[0], args.kernel, args.sigma2, args.lr,
            args.use_bias)
        if args.debug_option > 0:
            model_nm += "_debug[{}|{}]".format(args.debug_option,
                                               args.tanhscale)
    print("Starting {}...".format(model_nm))

    for m in range(m_start, args.num_proj + m_start):
        # Init Training
        wi = wip1
        if args.use_bias:
            p_m = torch.Tensor(n_features + 1, 1)
        else:
            p_m = torch.Tensor(n_features, 1)
        p_m = Variable(torch.nn.init.xavier_normal_(p_m).cuda(),
                       requires_grad=True)
        optimizer = torch.optim.Adam([p_m], betas=[0.95, 0.98], lr=args.lr)

        toc = time.time()
        epoch = 0
        lsi = 0  # Losses start index
        tr_losses = []
        for epoch in range(args.max_iter):
            ep_losses = []
            # Training
            for i in range(0, Ntr, args.segment_len):
                Xtr_seg = torch.cuda.FloatTensor(Xtr[i:i + args.segment_len])
                wi_seg = torch.cuda.FloatTensor(wi[i:i + args.segment_len])

                # Create SSM
                ssm_tr = torch.mm(Xtr_seg, Xtr_seg.t()) * 2 - 1

                # Train the weak learner
                if args.use_bias:
                    Xtr_seg_bias = torch.cat(
                        (Xtr_seg, torch.ones((len(Xtr_seg), 1)).cuda()), 1)
                else:
                    Xtr_seg_bias = Xtr_seg

                if args.debug_option > 1:
                    bssm = bssm_tanh(Xtr_seg_bias, p_m, args.tanhscale)
                else:
                    bssm = bssm_sign(Xtr_seg_bias, p_m)
                # Backprop with weighted sum of errors
                sqerr = (bssm - ssm_tr)**2
                e_t = (sqerr * wi_seg).sum()
                optimizer.zero_grad()
                e_t.backward()
                optimizer.step()
                e_t = float(e_t)
                ep_losses.append(e_t)
            if args.debug:
                if (epoch + 1) % 20 == 0:
                    print("DEBUG: ", m, epoch, np.mean(ep_losses))
            tr_losses.append(np.mean(ep_losses))

        # Update Adaboost parameters at end of training
        beta = get_beta(Xtr_shuffled, wi, p_m, args)
        wip1 = np.zeros(wi.shape, dtype=np.float32)
        for i in range(0, Ntr, args.segment_len):
            Xtr_seg = torch.cuda.FloatTensor(Xtr[i:i + args.segment_len])
            wi_seg = torch.cuda.FloatTensor(wi[i:i + args.segment_len])

            # Create SSM with a kernel
            ssm_tr = torch.mm(Xtr_seg, Xtr_seg.t()) * 2 - 1

            if args.use_bias:
                Xtr_seg_bias = torch.cat((Xtr_seg, torch.ones(
                    (len(Xtr_seg), 1)).cuda()), 1)
            else:
                Xtr_seg_bias = Xtr_seg

            bssm = bssm_sign_nograd(Xtr_seg_bias, p_m)
            # Backprop with weighted sum of errors
            sqerr = (bssm - ssm_tr)**2 / 2 - 1

            wip1[i:i + args.segment_len] = pt_to_np(wi_seg) * np.exp(
                -beta * pt_to_np(sqerr))
            wip1[i:i + args.segment_len] /= wip1[i:i + args.segment_len].sum()

        tic = time.time()
        print("Time: Learning projection #{}: {:.2f} for {} iterations".format(
            m + 1, tic - toc, epoch))
        print("\tbeta: {:.3f}".format(beta))
        if args.debug:
            print("DEBUG. tr_losses[::20] = ", tr_losses[::20])
        projections.append(p_m.detach().cpu().numpy())
        proj_losses.append(tr_losses)
        betas.append(beta)

        # Saving results
        if (m + 1) % args.save_every == 0 or m < 5:
            model_nm = "proj[n={}]_feat[{}]_kern[{}_{}]_lr[{:.0e}]_bias[{}]".format(
                len(projections),
                Xtr_load_nm.split('.')[0], args.kernel, args.sigma2, args.lr,
                args.use_bias)
            if args.debug_option > 0:
                model_nm += "_debug[{}]".format(args.debug_option)
            np.save("Ada_Results/{}_projs".format(model_nm),
                    np.array(projections))
            save_pkl(proj_losses,
                     "Ada_Results/{}_projlosses.pkl".format(model_nm))
            np.save("Ada_Results/{}_betas".format(model_nm), np.array(betas))
            np.save("Ada_Results/{}_wip1".format(model_nm), wip1)

    np.save("Ada_Results/{}_wip1".format(model_nm), wip1)