コード例 #1
0
ファイル: dhmlpe_utils.py プロジェクト: chen1474147/convnet
def collect_feature_meta(folder, re_exp='batch_feature_\w+$'):
    allfile = sorted(iu.getfilelist(folder, re_exp), key=lambda x:extract_batch_num(x))
    feature_list_lst = []
    feature_dim = None
    indexes_lst = []
    feature_names = None
    if len(allfile) == 0:
        return dict()
    for f in allfile:
        print f
        p =  iu.fullfile(folder, f)
        d = mio.unpickle(p)
        feature_list_lst += [d['feature_list']]
        if feature_dim:
            if feature_dim!= d['feature_dim']:
                raise Exception('feature dim inconsistent')
        else:
            feature_dim = d['feature_dim']
        indexes_lst += [d['info']['indexes']]
    indexes = np.concatenate(indexes_lst)
    n_feature, n_batch = len(feature_dim), len(allfile)
    feature_list = [np.concatenate([feature_list_lst[i][k] for i in range(n_batch)],
                                   axis=-1)
                    for k in range(n_feature)]
    return {'feature_list':feature_list, 'feature_dim':feature_dim, 'info':{'indexes':indexes,
                                                                        'feature_names':d['info']['feature_names']}}
コード例 #2
0
ファイル: initfunc.py プロジェクト: tenstep/itheano
def gbns(name,sp, params):
    """
    get bias for combining norm and scale layer
    params[0] = model_folder
    params[1] = norm layer name  [source]
    params[2] = scale layer name [source]
    """
    model_folder, norm_name, scale_name = params[0], params[1], params[2]
    stat_folder = iu.fullfile(model_folder, 'stat')
    stat_path = Solver.get_saved_model_path(stat_folder)
    stat = mio.unpickle(stat_path)
    model  = Solver.get_saved_model(model_folder)
    layers = get_layers(model)
    W= layers[scale_name][2]['weights'][0]
    b= layers[scale_name][2]['biases'][0]
    print 'W-------------'
    iu.print_common_statistics(W)
    print 'b'
    iu.print_common_statistics(b)
    if 'epsilon' in layers[norm_name][2]: 
        epsilon = layers[norm_name][2]['epsilon']
    else:
        epsilon = 1e-6
    u = stat['layers'][norm_name]['u'].flatten()
    var = stat['layers'][norm_name]['var'].flatten()
    return [b - W * u / (np.sqrt(var + epsilon))]
コード例 #3
0
ファイル: ibasic_convdata.py プロジェクト: tenstep/itheano
 def __init__(self, data_dir, feature_range, init_epoch=1, init_batchnum=None, dp_params={}, test=False):
     DataProvider.__init__(self, data_dir, feature_range, init_epoch, init_batchnum, dp_params, test)
     self.shuffle_data = dp_params['shuffle_data'] # determine whether to shuffle test data
     if 'external_meta_path' in dp_params and dp_params['external_meta_path']:
         import iread.myio as mio
         ext_meta = mio.unpickle(dp_params['external_meta_path'])
         print 'Print load external_meta for %s succussfully' % dp_params['external_meta_path']
         for item in ext_meta:
             self.batch_meta[item] = ext_meta[item]
             print '----Load %s from ext_meta succussfully' % item
         del ext_meta
     self.test = test
     self.feature_range = np.asarray(feature_range)
     self.num_feature = len(feature_range)
     self.batch_size = dp_params['batch_size']
     self.keep_data_dic = False
     if self.batch_size > self.num_feature or self.batch_size <= 0:
         raise BasicDataProviderError('Invaid batch_size %d (num_image=%d)' % (self.batch_size, self.num_feature))
     self.num_batch = (self.num_feature - 1)/ self.batch_size + 1
     self.batch_range = range(self.num_feature)
     if self.curr_batchnum not in self.batch_range:
         self.curr_batchnum = 0
     self.curr_batchnum = min(max(self.curr_batchnum, 0), self.num_feature - 1)
     self.batch_idx = self.curr_batchnum
     if test and self.shuffle_data == 0:
         # There is no need to shuffle testing data
         self.shuffled_feature_range = self.feature_range
     else:
         self.shuffled_feature_range = self.feature_range[rd.permutation(self.num_feature)]
     self.num_feature_type = len(self.batch_meta['feature_dim'])
     self.feature_dim = self.batch_meta['feature_dim']
コード例 #4
0
ファイル: train_mmls_backup.py プロジェクト: tenstep/itheano
def create_dp(op, saved_model = None):
    try:
        if saved_model and 'dp_params' in saved_model['solver_params']:
            dp_params = saved_model['solver_params']['dp_params']
        else:
            dp_params = None
        required_fields = ['data_path', 'data_provider', 'train_range', 'test_range',
                           'batch_size']
        d = dict()
        for e in required_fields:
            if op.get_value(e):
                d[e] = op.get_value(e)
            else:
                d[e] = dp_params[e]
    except Exception as e:
        print e
        sys.exit(1)
    meta= mio.unpickle(iu.fullfile(d['data_path'], 'batches.meta'))
    param1, param2 = dict(), dict()
    if saved_model:
        copy_dic_by_key(param1, saved_model['model_state']['train'], ['epoch', 'batchnum'])
        copy_dic_by_key(param2, saved_model['model_state']['test'], ['epoch', 'batchnum'])
    copy_dic_by_key(param1, d, ['batch_size'])
    copy_dic_by_key(param2, d, ['batch_size'])
    dp_type = d['data_provider']
    pre_process_data(meta) # < ---- This should be used with care
    train_dp = dp_dic[dp_type](data_dic=meta, train=True, data_range=d['train_range'], params=param1)
    test_dp = dp_dic[dp_type](data_dic=meta, train=False, data_range=d['test_range'], params=param2)
    return d, train_dp, test_dp
コード例 #5
0
def get_wb_from_convnet2_checkpoint(name, sp, params, item_type):
    """
    params = [check_point_path, layer_name, ]
    """
    sys.path.append('/home/grads/sijinli2/Projects/cuda-convnet2/')
    checkpath = params[0]
    layer_name = name if len(params) == 1 else params[1]
    filepath = os.path.join(
        checkpath,
        sorted(os.listdir(checkpath), key=alphanum_key)[-1])
    saved = mio.unpickle(filepath)
    model_state = saved['model_state']
    layer = model_state['layers'][layer_name]
    if item_type == 'weights':
        # weights
        n_w = len(sp)
        print '    init from convnet {}--------'.format(checkpath)
        idx_list = range(n_w) if len(params) < 3 else iu.get_int_list_from_str(
            params[2])
        print ','.join(
            ['{}'.format(layer['weights'][k].shape) for k in idx_list])
        print '--------------\n---------\n'
        return [layer['weights'][k] for k in idx_list]
    else:
        return [layer['biases']]
コード例 #6
0
def gbns(name, sp, params):
    """
    get bias for combining norm and scale layer
    params[0] = model_folder
    params[1] = norm layer name  [source]
    params[2] = scale layer name [source]
    """
    model_folder, norm_name, scale_name = params[0], params[1], params[2]
    stat_folder = iu.fullfile(model_folder, 'stat')
    stat_path = Solver.get_saved_model_path(stat_folder)
    stat = mio.unpickle(stat_path)
    model = Solver.get_saved_model(model_folder)
    layers = get_layers(model)
    W = layers[scale_name][2]['weights'][0]
    b = layers[scale_name][2]['biases'][0]
    print 'W-------------'
    iu.print_common_statistics(W)
    print 'b'
    iu.print_common_statistics(b)
    if 'epsilon' in layers[norm_name][2]:
        epsilon = layers[norm_name][2]['epsilon']
    else:
        epsilon = 1e-6
    u = stat['layers'][norm_name]['u'].flatten()
    var = stat['layers'][norm_name]['var'].flatten()
    return [b - W * u / (np.sqrt(var + epsilon))]
コード例 #7
0
ファイル: test_analyze_stat.py プロジェクト: tenstep/itheano
def test_tmp():
    meta = mio.unpickle('/opt/visal/tmp/for_sijin/Data/H36M/H36MExp/folder_Raw_SP_t004_act_14/batches.meta')
    f0, f2 = meta['feature_list'][0], meta['feature_list'][2]
    print f0[..., 0]
    diff = f0 - f2
    print 'diff = {}'.format(diff.flatten().sum())
    print '''
コード例 #8
0
ファイル: idata.py プロジェクト: tenstep/itheano
 def parse_params(self, params):
     DataProvider.parse_params(self, params)
     if self.data_dic is None:
         if 'data_path' in params:
             self.data_dic = mio.unpickle(iu.fullfile(params['data_path'], 'batches.meta'))
         else:
             raise Exception('data-path is missing')
コード例 #9
0
ファイル: idata.py プロジェクト: xingyizhou/itheano
 def parse_params(self, params):
     DataProvider.parse_params(self, params)
     if self.data_dic is None:
         if 'data_path' in params:
             self.data_dic = mio.unpickle(
                 iu.fullfile(params['data_path'], 'batches.meta'))
         else:
             raise Exception('data-path is missing')
コード例 #10
0
ファイル: dhmlpe_utils.py プロジェクト: chen1474147/convnet
def collect_feature(folder, item, re_exp='batch_feature_\w+$'):
    allfile = sorted(iu.getfilelist(folder, re_exp), key=lambda x:extract_batch_num(x))
    l = []
    for f in allfile:
        p =  iu.fullfile(folder, f)
        d = mio.unpickle(p)
        l = l + [d[item]]
    return np.concatenate(l, axis=1)
コード例 #11
0
def test_tmp():
    meta = mio.unpickle(
        '/opt/visal/tmp/for_sijin/Data/H36M/H36MExp/folder_Raw_SP_t004_act_14/batches.meta'
    )
    f0, f2 = meta['feature_list'][0], meta['feature_list'][2]
    print f0[..., 0]
    diff = f0 - f2
    print 'diff = {}'.format(diff.flatten().sum())
    print '''
コード例 #12
0
ファイル: test_analyze_stat.py プロジェクト: tenstep/itheano
def verify_layer_outputs():
    solver_loader = MMSolverLoader()
    solver = solver_loader.parse()
    net = solver.train_net
    output_layers = net.get_layer_by_names(['net1_fc0'])
    outputs = [lay.outputs for lay in output_layers]
    f = theano.function(inputs=net.inputs,
                        outputs=outputs, on_unused_input='ignore')
    cur_data = solver.get_next_batch(train=False)
    mvd = solver.find_most_violated(cur_data, train=False)
    alldata = [self.gpu_require(e.T) for e in most_violated_data[2][1:]]
    res = f(alldata)
    ref_meta = mio.unpickle('/opt/visal/tmp/for_sijin/Data/H36M/H36MExp/folder_SP_t004_act_14/batches.meta')
コード例 #13
0
def show_bm_cmp_from_saved():
    save_path = "/public/sijinli2/ibuffer/2015-01-22/saved_batch_data_test_net4_K_rbf_test"
    save_path = "/public/sijinli2/ibuffer/2015-01-22/saved_batch_data_test_net4_K_rbf_correct_test"
    d = mio.unpickle(save_path)
    print d.keys()
    bm_target, gt_score, mv_score, mv_target, gt_target, bm_score = (
        d["bm_target"],
        d["gt_score"],
        d["mv_score"],
        d["mv_target"],
        d["gt_target"],
        d["bm_score"],
    )
    ndata = bm_target.shape[-1]
    bm_margin = MMLSSolver.calc_margin(gt_target - bm_target).flatten() * 1200
    mv_margin = MMLSSolver.calc_margin(gt_target - mv_target).flatten() * 1200

    # bm_rbf_margin = 1 - dutils.calc_RBF_score(gt_target - bm_target, 50/1200.0,3).flatten()
    # mv_rbf_margin = 1 - dutils.calc_RBF_score(gt_target - mv_target, 50/1200.0,3).flatten()
    # pl.ylabel('bm rbf margin')
    # pl.xlabel('highest score rbf margin')
    # pl.scatter(mv_rbf_margin, bm_rbf_margin, s=15, c='b')
    # pl.show()

    score_diff = mv_score - bm_score
    pl.ylabel("score_diff")
    pl.xlabel("mpjpe (bestmatch_pose, max_score_pose)")
    pl.scatter(bm_margin, score_diff, s=15, c="b")
    pl.show()

    #    show the scatter plot for mpjpe vs RBF score
    residuals = (gt_target - bm_target) * 1200
    bm_margin = bm_margin / 1200.0
    t = 0
    ncol = 4
    sigma_list = [1, 5, 10, 20, 50, 100, 200, 500, 1000, 1500, 2000, 5000]
    nrow = (len(sigma_list) - 1) // ncol + 1
    # mm = np.max(bm_margin)
    for r in range(nrow):
        for c in range(ncol):
            sigma = float(sigma_list[t])
            t = t + 1
            pl.subplot(nrow, ncol, t)
            bm_rbf_score = 1 - dutils.calc_RBF_score(residuals, sigma)
            pl.xlabel("mpjpe (bestmatch_pose, max_score_pose) ")
            pl.ylabel("RBF sigma = {:.1f}".format(sigma))
            pl.scatter(bm_margin, bm_rbf_score)
            if t == len(sigma_list):
                break
    pl.show()
コード例 #14
0
def create_dp2(train_ext_params=None, test_ext_params=None):
    meta_path = "/opt/visal/tmp/for_sijin/Data/H36M/H36MExp/folder_SP_t004_act_14/batches.meta"
    d = mio.unpickle(meta_path)
    print "data format"
    print_dims(d["feature_list"])
    train_range = range(0, 132744)
    test_range = range(132744, 162008)
    params = {"batch_size": 1024, "data_path": meta_path}
    train_params = merge_dic(params, train_ext_params)
    test_params = merge_dic(params, test_ext_params)
    pre_process_data(d)
    train_dp = MemoryDataProvider(data_dic=d, train=True, data_range=train_range, params=train_params)
    test_dp = MemoryDataProvider(d, train=False, data_range=test_range, params=test_params)
    print "Create Data Provider Successfully"
    return train_dp, test_dp
コード例 #15
0
def show_bm_cmp_from_saved():
    save_path = '/public/sijinli2/ibuffer/2015-01-22/saved_batch_data_test_net4_K_rbf_test'
    save_path = '/public/sijinli2/ibuffer/2015-01-22/saved_batch_data_test_net4_K_rbf_correct_test'
    d = mio.unpickle(save_path)
    print d.keys()
    bm_target, gt_score, mv_score, mv_target, gt_target, bm_score = d['bm_target'], \
                                                                    d['gt_score'], \
                                                                    d['mv_score'], \
                                                                    d['mv_target'],\
                                                                    d['gt_target'],\
                                                                    d['bm_score']
    ndata = bm_target.shape[-1]
    bm_margin = MMLSSolver.calc_margin(gt_target - bm_target).flatten() * 1200
    mv_margin = MMLSSolver.calc_margin(gt_target - mv_target).flatten() * 1200

    # bm_rbf_margin = 1 - dutils.calc_RBF_score(gt_target - bm_target, 50/1200.0,3).flatten()
    # mv_rbf_margin = 1 - dutils.calc_RBF_score(gt_target - mv_target, 50/1200.0,3).flatten()
    # pl.ylabel('bm rbf margin')
    # pl.xlabel('highest score rbf margin')
    # pl.scatter(mv_rbf_margin, bm_rbf_margin, s=15, c='b')
    # pl.show()

    score_diff = mv_score - bm_score
    pl.ylabel('score_diff')
    pl.xlabel('mpjpe (bestmatch_pose, max_score_pose)')
    pl.scatter(bm_margin, score_diff, s=15, c='b')
    pl.show()

    #    show the scatter plot for mpjpe vs RBF score
    residuals = (gt_target - bm_target) * 1200
    bm_margin = bm_margin / 1200.0
    t = 0
    ncol = 4
    sigma_list = [1, 5, 10, 20, 50, 100, 200, 500, 1000, 1500, 2000, 5000]
    nrow = (len(sigma_list) - 1) // ncol + 1
    # mm = np.max(bm_margin)
    for r in range(nrow):
        for c in range(ncol):
            sigma = float(sigma_list[t])
            t = t + 1
            pl.subplot(nrow, ncol, t)
            bm_rbf_score = 1 - dutils.calc_RBF_score(residuals, sigma)
            pl.xlabel('mpjpe (bestmatch_pose, max_score_pose) ')
            pl.ylabel('RBF sigma = {:.1f}'.format(sigma))
            pl.scatter(bm_margin, bm_rbf_score)
            if t == len(sigma_list):
                break
    pl.show()
コード例 #16
0
def verify_layer_outputs():
    solver_loader = MMSolverLoader()
    solver = solver_loader.parse()
    net = solver.train_net
    output_layers = net.get_layer_by_names(['net1_fc0'])
    outputs = [lay.outputs for lay in output_layers]
    f = theano.function(inputs=net.inputs,
                        outputs=outputs,
                        on_unused_input='ignore')
    cur_data = solver.get_next_batch(train=False)
    mvd = solver.find_most_violated(cur_data, train=False)
    alldata = [self.gpu_require(e.T) for e in most_violated_data[2][1:]]
    res = f(alldata)
    ref_meta = mio.unpickle(
        '/opt/visal/tmp/for_sijin/Data/H36M/H36MExp/folder_SP_t004_act_14/batches.meta'
    )
コード例 #17
0
 def __init__(self,
              data_dir,
              feature_range,
              init_epoch=1,
              init_batchnum=None,
              dp_params={},
              test=False):
     DataProvider.__init__(self, data_dir, range(1), init_epoch,
                           init_batchnum, dp_params, test)
     self.shuffle_data = dp_params[
         'shuffle_data']  # determine whether to shuffle test data
     if 'external_meta_path' in dp_params and dp_params[
             'external_meta_path']:
         import iread.myio as mio
         ext_meta = mio.unpickle(dp_params['external_meta_path'])
         print 'Print load external_meta for %s succussfully' % dp_params[
             'external_meta_path']
         for item in ext_meta:
             self.batch_meta[item] = ext_meta[item]
             print '----Load %s from ext_meta succussfully' % item
         del ext_meta
     self.test = test
     self.feature_range = np.asarray(feature_range)
     self.num_feature = len(feature_range)
     self.batch_size = dp_params['batch_size']
     self.keep_data_dic = False
     if self.batch_size > self.num_feature or self.batch_size <= 0:
         raise BasicDataProviderError(
             'Invaid batch_size %d (num_image=%d)' %
             (self.batch_size, self.num_feature))
     self.num_batch = (self.num_feature - 1) / self.batch_size + 1
     self.batch_range = range(self.num_feature)
     if self.curr_batchnum not in self.batch_range:
         self.curr_batchnum = 0
     self.curr_batchnum = min(max(self.curr_batchnum, 0),
                              self.num_feature - 1)
     self.batch_idx = self.curr_batchnum
     if test and self.shuffle_data == 0:
         # There is no need to shuffle testing data
         self.shuffled_feature_range = self.feature_range
     else:
         self.shuffled_feature_range = self.feature_range[rd.permutation(
             self.num_feature)]
     self.num_feature_type = len(self.batch_meta['feature_dim'])
     self.feature_dim = self.batch_meta['feature_dim']
コード例 #18
0
ファイル: test_exp_generate.py プロジェクト: tenstep/itheano
def cvt1(source_exp_name, target_exp_name):
    print '''
    SP_t004_act_14:
    source meta [rel_gt,  img_feature_accv_fc_j0,  relskel_feature_t004]
    Raw_SP_t004_act_14:
    target meta [rel_gt,  img_feature_accv_fc_j0,  rel_gt]
    '''
    base_path = '/opt/visal/tmp/for_sijin/Data/H36M/H36MExp/'
    source_meta = mio.unpickle(iu.fullfile(base_path, 'folder_%s' % source_exp_name,
                                           'batches.meta'))
    target_meta_folder = iu.fullfile(base_path, 'folder_%s' % target_exp_name) 
    target_meta_path =  iu.fullfile(target_meta_folder, 'batches.meta') 
    d = source_meta.copy()
    print d.keys()
    d['feature_list'] = [source_meta['feature_list'][k] for k in [0, 1, 0]]
    d['feature_dim'] = [source_meta['feature_dim'][k] for k in [0, 1, 0]]
    # print d['info']
    print 'folder :{}\n path {}'.format(target_meta_folder, target_meta_path)
    iu.ensure_dir(target_meta_folder)
    mio.pickle(target_meta_path, d)
コード例 #19
0
def cvt1(source_exp_name, target_exp_name):
    print '''
    SP_t004_act_14:
    source meta [rel_gt,  img_feature_accv_fc_j0,  relskel_feature_t004]
    Raw_SP_t004_act_14:
    target meta [rel_gt,  img_feature_accv_fc_j0,  rel_gt]
    '''
    base_path = '/opt/visal/tmp/for_sijin/Data/H36M/H36MExp/'
    source_meta = mio.unpickle(
        iu.fullfile(base_path, 'folder_%s' % source_exp_name, 'batches.meta'))
    target_meta_folder = iu.fullfile(base_path, 'folder_%s' % target_exp_name)
    target_meta_path = iu.fullfile(target_meta_folder, 'batches.meta')
    d = source_meta.copy()
    print d.keys()
    d['feature_list'] = [source_meta['feature_list'][k] for k in [0, 1, 0]]
    d['feature_dim'] = [source_meta['feature_dim'][k] for k in [0, 1, 0]]
    # print d['info']
    print 'folder :{}\n path {}'.format(target_meta_folder, target_meta_path)
    iu.ensure_dir(target_meta_folder)
    mio.pickle(target_meta_path, d)
コード例 #20
0
def create_dp2(train_ext_params=None, test_ext_params=None):
    meta_path = '/opt/visal/tmp/for_sijin/Data/H36M/H36MExp/folder_SP_t004_act_14/batches.meta'
    d = mio.unpickle(meta_path)
    print 'data format'
    print_dims(d['feature_list'])
    train_range = range(0, 132744)
    test_range = range(132744, 162008)
    params = {'batch_size': 1024, 'data_path': meta_path}
    train_params = merge_dic(params, train_ext_params)
    test_params = merge_dic(params, test_ext_params)
    pre_process_data(d)
    train_dp = MemoryDataProvider(data_dic=d,
                                  train=True,
                                  data_range=train_range,
                                  params=train_params)
    test_dp = MemoryDataProvider(d,
                                 train=False,
                                 data_range=test_range,
                                 params=test_params)
    print 'Create Data Provider Successfully'
    return train_dp, test_dp
コード例 #21
0
ファイル: initfunc.py プロジェクト: tenstep/itheano
def get_wb_from_convnet2_checkpoint(name, sp, params, item_type):
    """
    params = [check_point_path, layer_name, ]
    """
    sys.path.append('/home/grads/sijinli2/Projects/cuda-convnet2/')
    checkpath= params[0]
    layer_name= name if len(params) == 1 else params[1]
    filepath = os.path.join(checkpath, sorted(os.listdir(checkpath), key=alphanum_key)[-1])
    saved = mio.unpickle(filepath)
    model_state = saved['model_state']
    layer = model_state['layers'][layer_name]
    if item_type == 'weights':
        # weights
        n_w = len(sp)
        print '    init from convnet {}--------'.format(checkpath)
        idx_list = range(n_w) if len(params) < 3 else iu.get_int_list_from_str(params[2])
        print ','.join(['{}'.format(layer['weights'][k].shape) for k in idx_list])
        print '--------------\n---------\n'
        return [layer['weights'][k] for k in idx_list]  
    else:
        return [layer['biases']]
コード例 #22
0
ファイル: initfunc.py プロジェクト: tenstep/itheano
def gwns(name, sp_list, params):
    """
    get weights for combining norm and scale layer
    params[0] = model_folder
    params[1] = norm layer name  [source]
    params[2] = scale layer name [source]
    """
    model_folder, norm_name, scale_name = params[0], params[1], params[2]
    stat_folder = iu.fullfile(model_folder, 'stat')
    stat_path = Solver.get_saved_model_path(stat_folder)
    stat = mio.unpickle(stat_path)
    print 'stat keys = {}'.format(stat['layers'].keys())
    model  = Solver.get_saved_model(model_folder)
    layers = get_layers(model)
    W= layers[scale_name][2]['weights']
    if 'epsilon' in layers[norm_name][2]: 
        epsilon = layers[norm_name][2]['epsilon']
    else:
        epsilon = 1e-6
    # u = stat['layers'][norm_name]['u'].flatten()
    var = stat['layers'][norm_name]['var'].flatten()
    return [W[0] / np.sqrt(var + epsilon)]
コード例 #23
0
def create_dp(op, saved_model=None):
    try:
        if saved_model and 'dp_params' in saved_model['solver_params']:
            dp_params = saved_model['solver_params']['dp_params']
        else:
            dp_params = None
        required_fields = [
            'data_path', 'data_provider', 'train_range', 'test_range',
            'batch_size'
        ]
        d = dict()
        for e in required_fields:
            if op.get_value(e):
                d[e] = op.get_value(e)
            else:
                d[e] = dp_params[e]
    except Exception as e:
        print e
        sys.exit(1)
    meta = mio.unpickle(iu.fullfile(d['data_path'], 'batches.meta'))
    param1, param2 = dict(), dict()
    if saved_model:
        copy_dic_by_key(param1, saved_model['model_state']['train'],
                        ['epoch', 'batchnum'])
        copy_dic_by_key(param2, saved_model['model_state']['test'],
                        ['epoch', 'batchnum'])
    copy_dic_by_key(param1, d, ['batch_size'])
    copy_dic_by_key(param2, d, ['batch_size'])
    dp_type = d['data_provider']
    pre_process_data(meta)  # < ---- This should be used with care
    train_dp = dp_dic[dp_type](data_dic=meta,
                               train=True,
                               data_range=d['train_range'],
                               params=param1)
    test_dp = dp_dic[dp_type](data_dic=meta,
                              train=False,
                              data_range=d['test_range'],
                              params=param2)
    return d, train_dp, test_dp
コード例 #24
0
def gwns(name, sp_list, params):
    """
    get weights for combining norm and scale layer
    params[0] = model_folder
    params[1] = norm layer name  [source]
    params[2] = scale layer name [source]
    """
    model_folder, norm_name, scale_name = params[0], params[1], params[2]
    stat_folder = iu.fullfile(model_folder, 'stat')
    stat_path = Solver.get_saved_model_path(stat_folder)
    stat = mio.unpickle(stat_path)
    print 'stat keys = {}'.format(stat['layers'].keys())
    model = Solver.get_saved_model(model_folder)
    layers = get_layers(model)
    W = layers[scale_name][2]['weights']
    if 'epsilon' in layers[norm_name][2]:
        epsilon = layers[norm_name][2]['epsilon']
    else:
        epsilon = 1e-6
    # u = stat['layers'][norm_name]['u'].flatten()
    var = stat['layers'][norm_name]['var'].flatten()
    return [W[0] / np.sqrt(var + epsilon)]
コード例 #25
0
def show_the_most_violated_poses():
    from mpl_toolkits.mplot3d import Axes3D
    import imgproc
    saved_model_path = '/public/sijinli2/ibuffer/2015-01-16/net2_test_for_stat'
    saved_model_path = '/opt/visal/tmp/for_sijin/Data/saved/theano_models/FCJ0_act_14_graph_0029_test_norm_cumulate_update'
    data_path = '/opt/visal/tmp/for_sijin/Data/H36M/H36MExp/folder_SP_t004_act_14/batches.meta'
    meta = mio.unpickle(data_path)
    all_pose = meta['feature_list'][0]
    ssolver = Solver.get_saved_model(saved_model_path)
    stat = ssolver['model_state']['stat']
    cnt_sample = stat['sample_candidate_counts']
    mvc = stat['most_violated_counts']
    ntrain = cnt_sample.size

    sorted_indexes = sorted(range(ntrain), key=lambda k: mvc[k], reverse=True)
    show_num = int(144)
    selected_indexes = sorted_indexes[:show_num]
    max_show_row = int(16)
    n_row = (show_num - 1) // max_show_row + 1
    nc = 0
    selected_pose = all_pose[:, selected_indexes]
    limbs = h36m.part_idx
    fig = pl.figure()
    params = {'elev': -89, 'azim': -107, 'linewidth': 3}
    selected_cnt = mvc[selected_indexes]
    print n_row, max_show_row
    for r in range(n_row):
        for c in range(max_show_row):
            if nc == show_num:
                break
            p = selected_pose[..., nc].reshape((3, 17), order='F').T
            nc = nc + 1
            # pl.subplot(n_row, max_show_row, c)
            ax = fig.add_subplot(n_row, max_show_row, nc, projection='3d')
            imgproc.turn_off_axis()
            dutils.show_3d_skeleton(p, limbs, params)
            pl.title('mvc={}'.format(selected_cnt[nc - 1]))
    pl.show()
コード例 #26
0
ファイル: test_analyze_stat.py プロジェクト: tenstep/itheano
def show_the_most_violated_poses():
    from mpl_toolkits.mplot3d import Axes3D
    import imgproc
    saved_model_path = '/public/sijinli2/ibuffer/2015-01-16/net2_test_for_stat'
    saved_model_path = '/opt/visal/tmp/for_sijin/Data/saved/theano_models/FCJ0_act_14_graph_0029_test_norm_cumulate_update'
    data_path = '/opt/visal/tmp/for_sijin/Data/H36M/H36MExp/folder_SP_t004_act_14/batches.meta'
    meta = mio.unpickle(data_path)
    all_pose = meta['feature_list'][0]
    ssolver = Solver.get_saved_model(saved_model_path)
    stat = ssolver['model_state']['stat']
    cnt_sample = stat['sample_candidate_counts']
    mvc = stat['most_violated_counts']
    ntrain = cnt_sample.size

    sorted_indexes = sorted(range(ntrain), key=lambda k: mvc[k], reverse=True)
    show_num = int(144)
    selected_indexes = sorted_indexes[:show_num]
    max_show_row = int(16)
    n_row = (show_num - 1)// max_show_row + 1
    nc = 0
    selected_pose = all_pose[:,selected_indexes]
    limbs = h36m.part_idx
    fig = pl.figure()
    params = {'elev':-89, 'azim':-107, 'linewidth':3}
    selected_cnt = mvc[selected_indexes]
    print n_row, max_show_row
    for r in range(n_row):
        for c in range(max_show_row):
            if nc == show_num:
                break
            p = selected_pose[...,nc].reshape((3,17),order='F').T
            nc = nc + 1
            # pl.subplot(n_row, max_show_row, c)
            ax = fig.add_subplot(n_row, max_show_row, nc, projection='3d')
            imgproc.turn_off_axis()
            dutils.show_3d_skeleton(p, limbs, params)
            pl.title('mvc={}'.format(selected_cnt[nc-1]))
    pl.show()
コード例 #27
0
def read_inputs():
    d = mio.unpickle(
        '/opt/visal/tmp/for_sijin/Data/H36M/H36MExp/folder_FCJ0_act_12/batches.meta'
    )
    info = d['info']
    print info.keys()
    indexes = info['indexes']
    Y = d['feature_list'][0]
    X = d['feature_list'][1]
    train_range = range(0, 76048)
    test_range = range(76048, 105368)

    print min(indexes[train_range]), max(indexes[train_range])
    print min(indexes[test_range]), max(indexes[test_range])

    print 'X '
    iu.print_common_statistics(X)

    X_train = X[..., train_range]
    Y_train = Y[..., train_range]

    feature_dim = X_train.shape[0]

    X_test = X[..., test_range]
    Y_test = Y[..., test_range]

    params = {'Sigma': np.ones(feature_dim + 1) * 0.0001}
    r = LinearRegression(params)
    r.fit(simpleDP(X_train, Y_train))
    Y_pred = r.apply(X_test)
    print Y_pred.shape
    print Y_test[:5, :5]
    print Y_pred[:5, :5]
    diff = Y_test - Y_pred
    print 'abs diff = {}'.format(np.sum(diff.flatten()**2))
    mpjpe = dutils.calc_mpjpe_from_residual(diff, 17)

    print 'average mpjpe  {}'.format(np.mean(mpjpe.flatten()))
コード例 #28
0
def process(op):
    data_folder = op.get_value('load_file')
    save_path = op.get_value('save_path')
    # data_folder = '/public/sijinli2/ibuffer/2015-01-16/net2_test_for_stat_2000'
    all_files = iu.getfilelist(data_folder, '\d+@\d+$')
    print all_files
    d = mio.unpickle(iu.fullfile(data_folder, all_files[0]))
    ms = d['model_state']
    if op.get_value('cost_name') is not None:
        cost_names = op.get_value('cost_name').split(',')
        n_cost = len(cost_name)
    else:
        n_cost = len(d['solver_params']['train_error'][0])
        cost_names = d['solver_params']['train_error'][0].keys()
    print 'Start to plot'
    start_time = time()
    for i in range(n_cost):
        pl.subplot(n_cost, 1, i + 1)
        plot_cost(op, d, cost_names[i])
    print 'Cost {} seconds '.format(time() - start_time)
    if save_path:
        imgproc.imsave_tight(save_path)
    pl.show()
コード例 #29
0
def process(op):
    data_folder = op.get_value('load_file')
    save_path = op.get_value('save_path')
    # data_folder = '/public/sijinli2/ibuffer/2015-01-16/net2_test_for_stat_2000'
    all_files = iu.getfilelist(data_folder, '\d+@\d+$')
    print all_files
    d = mio.unpickle(iu.fullfile(data_folder, all_files[0]))
    ms = d['model_state']
    if op.get_value('cost_name') is not None:
        cost_names = op.get_value('cost_name').split(',')
        n_cost = len(cost_name)
    else:
        n_cost = len(d['solver_params']['train_error'][0])
        cost_names = d['solver_params']['train_error'][0].keys()
    print 'Start to plot'
    start_time = time()
    for i in range(n_cost):
        pl.subplot(n_cost, 1, i + 1)
        plot_cost(op, d, cost_names[i])
    print 'Cost {} seconds '.format(time()- start_time)
    if save_path:
        imgproc.imsave_tight(save_path)
    pl.show()
コード例 #30
0
def read_inputs():
    d = mio.unpickle('/opt/visal/tmp/for_sijin/Data/H36M/H36MExp/folder_FCJ0_act_12/batches.meta')
    info = d['info']
    print info.keys()
    indexes = info['indexes']
    Y = d['feature_list'][0]
    X = d['feature_list'][1]
    train_range = range(0,76048)
    test_range = range(76048,105368)

    print min(indexes[train_range]), max(indexes[train_range])
    print min(indexes[test_range]), max(indexes[test_range])

    print 'X '
    iu.print_common_statistics(X)
    
    X_train = X[..., train_range]
    Y_train = Y[..., train_range]

    
    feature_dim = X_train.shape[0]
    
    X_test = X[..., test_range]
    Y_test = Y[..., test_range]

    params = {'Sigma':np.ones(feature_dim + 1) * 0.0001}
    r = LinearRegression(params)
    r.fit(simpleDP(X_train,Y_train))
    Y_pred = r.apply(X_test)
    print Y_pred.shape
    print Y_test[:5,:5]
    print Y_pred[:5,:5]
    diff = Y_test - Y_pred
    print 'abs diff = {}'.format(np.sum(diff.flatten()**2))
    mpjpe = dutils.calc_mpjpe_from_residual(diff,17)
    
    print 'average mpjpe  {}'.format(np.mean(mpjpe.flatten()))
コード例 #31
0
ファイル: ibasic_convdata.py プロジェクト: tenstep/itheano
    def __init__(self, data_dir, image_range, init_epoch=1, init_batchnum=None, dp_params={}, test=False):
        DataProvider.__init__(self, data_dir, range(len(image_range)), init_epoch, init_batchnum, dp_params, test)

        #crop_boarder will crop 2 * crop_boarder in each dimension
        #padding is similar. It will pad both size, i.e., adding 2x pixels
        self.image_dim = np.asarray(self.batch_meta['image_adjust_dim']).flatten()
        if dp_params['crop_border'] > 0:
            self.input_image_dim = self.batch_meta['image_adjust_dim']
            self.input_image_dim[0] -= dp_params['crop_border'] * 2
            self.input_image_dim[1] -= dp_params['crop_border'] * 2
        elif dp_params['crop_one_border'] > 0:
            self.input_image_dim = self.batch_meta['image_adjust_dim']
            self.input_image_dim[0] -= dp_params['crop_one_border']
            self.input_image_dim[1] -= dp_params['crop_one_border']
        else:
            self.input_image_dim = self.batch_meta['image_sample_dim']
        if 'fix_num_batch' in dp_params:
            # It can be used for forcing the number of batch to be fixed
            # The last batch might be smaller than previous batch
            self.fix_num_batch = dp_params['fix_num_batch']
        else:
            self.fix_num_batch = False 
        self.shuffle_data = dp_params['shuffle_data'] # determine whether to shuffle test data
        if 'external_meta_path' in dp_params and dp_params['external_meta_path']:
            import iread.myio as mio
            ext_meta = mio.unpickle(dp_params['external_meta_path'])
            print 'Print load external_meta for %s succussfully' % dp_params['external_meta_path']
            override_dic = ['mean_image', 'cropped_mean_image', 'rgb_eigenvalue', \
                            'rgb_eigenvector', 'RelativeSkel_Y3d_mono_body', \
                            'Relative_Y3d_mono_body']
            for item in override_dic:
                if item in ext_meta:
                    self.batch_meta[item] = ext_meta[item]
                    print '----Load %s from ext_meta succussfully' % item
            del ext_meta
        self.mean_image = self.batch_meta['mean_image']
        self.cropped_mean_image = self.get_cropped_mean()

        self.rgb_eigenvalue = self.batch_meta['rgb_eigenvalue']
        self.rgb_eigenvector = self.batch_meta['rgb_eigenvector']
        self.test = test        
        self.image_range = np.asarray(image_range)
        self.num_image = len(image_range)
        self.batch_size = dp_params['batch_size']
        self.keep_data_dic = False
        if self.batch_size > self.num_image or self.batch_size <= 0:
            raise BasicDataProviderError('Invaid batch_size %d (num_image=%d)' % (self.batch_size, self.num_image))
        self.num_batch = (self.num_image - 1)/ self.batch_size + 1
        # override batch_range, this is not actually the batch_range
        # just keep consistent
        self.batch_range = range(self.num_image)
        # recheck curr_batchnum 
        # (Remembering last times' batch_num will not help training), just keep batch consistant
        if self.curr_batchnum not in self.batch_range:
            self.curr_batchnum = 0
        self.curr_batchnum = min(max(self.curr_batchnum, 0), self.num_image - 1)
        # print 'Curr_batchnum = %d Test  = %s' % (self.curr_batchnum, 'True' if self.test else 'False')
        # override batch_Idx
        self.batch_idx = self.curr_batchnum
        if test and (not self.shuffle_data):
            # There is no need to shuffle testing data
            self.shuffled_image_range = self.image_range
        else:
            self.shuffled_image_range = self.image_range[rd.permutation(self.num_image)]
        if 'images_path' in self.batch_meta:
            self.images_path = self.batch_meta['images_path']
        else:
            self.images_path = None
コード例 #32
0
from init_test import *
import dhmlpe_utils as dutils
import iutils as iu
import scipy.io as sio
import iread.myio as mio
cutils = dutils.Cutils()
dskels = sio.loadmat(
    '/opt/visal/data/H36/H36MData/SLP/data/AngleSamples/skel2.mat')
skel = dskels['skel']
print dskels.keys()
dpose = mio.unpickle(
    '/opt/visal/tmp/for_sijin/Data/H36M/H36MExp/folder_ASM_act_14_exp_2/batches.meta'
)
gt_pose = dpose['Relative_Y3d_mono_body']
print gt_pose.shape
dangle = sio.loadmat(
    '/opt/visal/data/H36/H36MData/SLP/data/AngleSamples/ASM_act_14_angles.mat')
print dangle.keys()
print dangle['angles_range'].shape
t = dangle['angles_range'].flatten()
print max(t), min(t)
print dangle['angles'].shape
gt_angle = dangle['angles'][0, :, :]
[g_pose, g_rot] = cutils.convert_angle2(skel, gt_angle)
コード例 #33
0
ファイル: test_cutils.py プロジェクト: chen1474147/convnet
from init_test import *
import dhmlpe_utils as dutils
import iutils as iu
import scipy.io as sio
import iread.myio as mio 
cutils = dutils.Cutils()
dskels = sio.loadmat('/opt/visal/data/H36/H36MData/SLP/data/AngleSamples/skel2.mat')
skel = dskels['skel']
print dskels.keys()
dpose = mio.unpickle('/opt/visal/tmp/for_sijin/Data/H36M/H36MExp/folder_ASM_act_14_exp_2/batches.meta')
gt_pose = dpose['Relative_Y3d_mono_body']
print gt_pose.shape
dangle = sio.loadmat('/opt/visal/data/H36/H36MData/SLP/data/AngleSamples/ASM_act_14_angles.mat')
print dangle.keys()
print dangle['angles_range'].shape
t = dangle['angles_range'].flatten()
print max(t), min(t)
print dangle['angles'].shape
gt_angle = dangle['angles'][0,:,:]
[g_pose,g_rot] = cutils.convert_angle2(skel, gt_angle)

コード例 #34
0
    def __init__(self,
                 data_dir,
                 image_range,
                 init_epoch=1,
                 init_batchnum=None,
                 dp_params={},
                 test=False):
        DataProvider.__init__(self, data_dir, range(1), init_epoch,
                              init_batchnum, dp_params, test)
        #crop_boarder will crop 2 * crop_boarder in each dimension
        #padding is similar. It will pad both size, i.e., adding 2x pixels
        self.image_dim = np.asarray(
            self.batch_meta['image_adjust_dim']).flatten()
        if dp_params['crop_border'] > 0:
            self.input_image_dim = self.batch_meta['image_adjust_dim']
            self.input_image_dim[0] -= dp_params['crop_border'] * 2
            self.input_image_dim[1] -= dp_params['crop_border'] * 2
        elif dp_params['crop_one_border'] > 0:
            self.input_image_dim = self.batch_meta['image_adjust_dim']
            self.input_image_dim[0] -= dp_params['crop_one_border']
            self.input_image_dim[1] -= dp_params['crop_one_border']
        else:
            self.input_image_dim = self.batch_meta['image_sample_dim']
        self.shuffle_data = dp_params[
            'shuffle_data']  # determine whether to shuffle test data
        if 'external_meta_path' in dp_params and dp_params[
                'external_meta_path']:
            import iread.myio as mio
            ext_meta = mio.unpickle(dp_params['external_meta_path'])
            print 'Print load external_meta for %s succussfully' % dp_params[
                'external_meta_path']
            override_dic = ['mean_image', 'cropped_mean_image', 'rgb_eigenvalue', \
                            'rgb_eigenvector', 'RelativeSkel_Y3d_mono_body', \
                            'Relative_Y3d_mono_body']
            for item in override_dic:
                if item in ext_meta:
                    self.batch_meta[item] = ext_meta[item]
                    print '----Load %s from ext_meta succussfully' % item
            del ext_meta
        self.mean_image = self.batch_meta['mean_image']
        self.cropped_mean_image = self.get_cropped_mean()

        self.rgb_eigenvalue = self.batch_meta['rgb_eigenvalue']
        self.rgb_eigenvector = self.batch_meta['rgb_eigenvector']
        self.test = test
        self.image_range = np.asarray(image_range)
        self.num_image = len(image_range)
        self.batch_size = dp_params['batch_size']
        self.keep_data_dic = False
        if self.batch_size > self.num_image or self.batch_size <= 0:
            raise BasicDataProviderError(
                'Invaid batch_size %d (num_image=%d)' %
                (self.batch_size, self.num_image))
        self.num_batch = (self.num_image - 1) / self.batch_size + 1
        # override batch_range, this is not actually the batch_range
        # just keep consistent
        self.batch_range = range(self.num_image)
        # recheck curr_batchnum
        # (Remembering last times' batch_num will not help training), just keep batch consistant
        if self.curr_batchnum not in self.batch_range:
            self.curr_batchnum = 0
        self.curr_batchnum = min(max(self.curr_batchnum, 0),
                                 self.num_image - 1)
        # print 'Curr_batchnum = %d Test  = %s' % (self.curr_batchnum, 'True' if self.test else 'False')
        # override batch_Idx
        self.batch_idx = self.curr_batchnum
        if test and self.shuffle_data == 0:
            # There is no need to shuffle testing data
            self.shuffled_image_range = self.image_range
        else:
            self.shuffled_image_range = self.image_range[rd.permutation(
                self.num_image)]
        if 'images_path' in self.batch_meta:
            self.images_path = self.batch_meta['images_path']
        else:
            self.images_path = None