Example #1
0
def test():
    d = mio.unpickle(
        '/opt/visal/tmp/for_sijin/Data/H36M/H36MExp/folder_FCJ0_act_14/batches.meta'
    )
    a = d['feature_list'][1]
    print a[..., 0].flatten()
    iu.print_common_statistics(a)
Example #2
0
def gbns(name,sp, params):
    """
    get bias for combining norm and scale layer
    params[0] = model_folder
    params[1] = norm layer name  [source]
    params[2] = scale layer name [source]
    """
    model_folder, norm_name, scale_name = params[0], params[1], params[2]
    stat_folder = iu.fullfile(model_folder, 'stat')
    stat_path = Solver.get_saved_model_path(stat_folder)
    stat = mio.unpickle(stat_path)
    model  = Solver.get_saved_model(model_folder)
    layers = get_layers(model)
    W= layers[scale_name][2]['weights'][0]
    b= layers[scale_name][2]['biases'][0]
    print 'W-------------'
    iu.print_common_statistics(W)
    print 'b'
    iu.print_common_statistics(b)
    if 'epsilon' in layers[norm_name][2]: 
        epsilon = layers[norm_name][2]['epsilon']
    else:
        epsilon = 1e-6
    u = stat['layers'][norm_name]['u'].flatten()
    var = stat['layers'][norm_name]['var'].flatten()
    return [b - W * u / (np.sqrt(var + epsilon))]
Example #3
0
def gbns(name, sp, params):
    """
    get bias for combining norm and scale layer
    params[0] = model_folder
    params[1] = norm layer name  [source]
    params[2] = scale layer name [source]
    """
    model_folder, norm_name, scale_name = params[0], params[1], params[2]
    stat_folder = iu.fullfile(model_folder, 'stat')
    stat_path = Solver.get_saved_model_path(stat_folder)
    stat = mio.unpickle(stat_path)
    model = Solver.get_saved_model(model_folder)
    layers = get_layers(model)
    W = layers[scale_name][2]['weights'][0]
    b = layers[scale_name][2]['biases'][0]
    print 'W-------------'
    iu.print_common_statistics(W)
    print 'b'
    iu.print_common_statistics(b)
    if 'epsilon' in layers[norm_name][2]:
        epsilon = layers[norm_name][2]['epsilon']
    else:
        epsilon = 1e-6
    u = stat['layers'][norm_name]['u'].flatten()
    var = stat['layers'][norm_name]['var'].flatten()
    return [b - W * u / (np.sqrt(var + epsilon))]
Example #4
0
def show_cnn_filters(lay, op):
    # The filter shape is numfilter  x input_dimension  x sy  x sx
    weights = lay['weights']
    idx = op.get_value('weight_idx')
    W = weights[idx]
    nfilter, ndim, SY, SX = W.shape
    n_col = 8
    n_row = (nfilter - 1) // n_col + 1
    nc = 0
    fig = pl.figure()
    for r in range(n_row):
        for c in range(n_col):
            ax = fig.add_subplot(n_row, n_col, nc + 1)
            curF = W[nc, ...]
            img = curF.transpose([1,2,0]) if curF.shape[0] == 3 else curF.mean(axis=0)
            ax.imshow(imgproc.maptorange(-img, [0,1]))
            iu.print_common_statistics(img)
            pl.title('filter idx {:02d}'.format(nc))
            nc = nc + 1
    pl.show()
Example #5
0
def show_cnn_filters(lay, op):
    # The filter shape is numfilter  x input_dimension  x sy  x sx
    weights = lay['weights']
    idx = op.get_value('weight_idx')
    W = weights[idx]
    nfilter, ndim, SY, SX = W.shape
    n_col = 8
    n_row = (nfilter - 1) // n_col + 1
    nc = 0
    fig = pl.figure()
    for r in range(n_row):
        for c in range(n_col):
            ax = fig.add_subplot(n_row, n_col, nc + 1)
            curF = W[nc, ...]
            img = curF.transpose(
                [1, 2, 0]) if curF.shape[0] == 3 else curF.mean(axis=0)
            ax.imshow(imgproc.maptorange(-img, [0, 1]))
            iu.print_common_statistics(img)
            pl.title('filter idx {:02d}'.format(nc))
            nc = nc + 1
    pl.show()
Example #6
0
def analyze_basicbp_outputs(solver, output_layer_names):
    cur_data = solver.get_next_batch(train=True)
    input_data = solver.prepare_data(cur_data[2])

    output_layers = solver.net.get_layer_by_names(output_layer_names)
    outputs= sum([e.outputs for e in output_layers],[])
    f = theano.function(inputs=solver.net.inputs,
                        outputs=outputs, on_unused_input='ignore')
    res = f(*input_data)
    max_col = 3
    nbin=100
    n_row = (len(res) - 1)//max_col + 1
    idx = 0
    for e,name in zip(res, output_layer_names):
        idx = idx + 1
        ndata = e.shape[0]
        print 'Layer {} output {} nodes'.format(name, ndata)
        iu.print_common_statistics(e)
        pl.subplot(n_row, max_col, idx)
        pl.hist(e.flatten(), bins=nbin)
        pl.title('Layer({})'.format(name))
    pl.show()
def read_inputs():
    d = mio.unpickle(
        '/opt/visal/tmp/for_sijin/Data/H36M/H36MExp/folder_FCJ0_act_12/batches.meta'
    )
    info = d['info']
    print info.keys()
    indexes = info['indexes']
    Y = d['feature_list'][0]
    X = d['feature_list'][1]
    train_range = range(0, 76048)
    test_range = range(76048, 105368)

    print min(indexes[train_range]), max(indexes[train_range])
    print min(indexes[test_range]), max(indexes[test_range])

    print 'X '
    iu.print_common_statistics(X)

    X_train = X[..., train_range]
    Y_train = Y[..., train_range]

    feature_dim = X_train.shape[0]

    X_test = X[..., test_range]
    Y_test = Y[..., test_range]

    params = {'Sigma': np.ones(feature_dim + 1) * 0.0001}
    r = LinearRegression(params)
    r.fit(simpleDP(X_train, Y_train))
    Y_pred = r.apply(X_test)
    print Y_pred.shape
    print Y_test[:5, :5]
    print Y_pred[:5, :5]
    diff = Y_test - Y_pred
    print 'abs diff = {}'.format(np.sum(diff.flatten()**2))
    mpjpe = dutils.calc_mpjpe_from_residual(diff, 17)

    print 'average mpjpe  {}'.format(np.mean(mpjpe.flatten()))
Example #8
0
def analyze_basicbp_outputs(solver, output_layer_names):
    cur_data = solver.get_next_batch(train=True)
    input_data = solver.prepare_data(cur_data[2])

    output_layers = solver.net.get_layer_by_names(output_layer_names)
    outputs = sum([e.outputs for e in output_layers], [])
    f = theano.function(inputs=solver.net.inputs,
                        outputs=outputs,
                        on_unused_input='ignore')
    res = f(*input_data)
    max_col = 3
    nbin = 100
    n_row = (len(res) - 1) // max_col + 1
    idx = 0
    for e, name in zip(res, output_layer_names):
        idx = idx + 1
        ndata = e.shape[0]
        print 'Layer {} output {} nodes'.format(name, ndata)
        iu.print_common_statistics(e)
        pl.subplot(n_row, max_col, idx)
        pl.hist(e.flatten(), bins=nbin)
        pl.title('Layer({})'.format(name))
    pl.show()
Example #9
0
def analyze_mmls_outputs(solver, output_layer_names):
    cur_data = solver.get_next_batch(train=True)
    most_violated_data = solver.find_most_violated(cur_data, train=True)
    alldata = [solver.gpu_require(e.T) for e in most_violated_data[2][1:]]
    output_layers = solver.train_net.get_layer_by_names(output_layer_names)
    outputs= sum([e.outputs for e in output_layers],[])
    f = theano.function(inputs=solver.train_net.inputs,
                        outputs=outputs, on_unused_input='ignore')
    res = f(*alldata)
    max_col = 3
    nbin=100
    n_row = (len(res) - 1)//max_col + 1
    idx = 0

    for e,name in zip(res, output_layer_names):
        idx = idx + 1
        ndata = e.shape[0]
        print 'Layer {} output {} nodes'.format(name, ndata)
        iu.print_common_statistics(e)
        pl.subplot(n_row, max_col, idx)
        pl.hist(e.flatten(), bins=nbin)
        pl.title('Layer({})'.format(name))
    pl.show()
Example #10
0
def read_inputs():
    d = mio.unpickle('/opt/visal/tmp/for_sijin/Data/H36M/H36MExp/folder_FCJ0_act_12/batches.meta')
    info = d['info']
    print info.keys()
    indexes = info['indexes']
    Y = d['feature_list'][0]
    X = d['feature_list'][1]
    train_range = range(0,76048)
    test_range = range(76048,105368)

    print min(indexes[train_range]), max(indexes[train_range])
    print min(indexes[test_range]), max(indexes[test_range])

    print 'X '
    iu.print_common_statistics(X)
    
    X_train = X[..., train_range]
    Y_train = Y[..., train_range]

    
    feature_dim = X_train.shape[0]
    
    X_test = X[..., test_range]
    Y_test = Y[..., test_range]

    params = {'Sigma':np.ones(feature_dim + 1) * 0.0001}
    r = LinearRegression(params)
    r.fit(simpleDP(X_train,Y_train))
    Y_pred = r.apply(X_test)
    print Y_pred.shape
    print Y_test[:5,:5]
    print Y_pred[:5,:5]
    diff = Y_test - Y_pred
    print 'abs diff = {}'.format(np.sum(diff.flatten()**2))
    mpjpe = dutils.calc_mpjpe_from_residual(diff,17)
    
    print 'average mpjpe  {}'.format(np.mean(mpjpe.flatten()))
Example #11
0
def analyze_mmls_outputs(solver, output_layer_names):
    cur_data = solver.get_next_batch(train=True)
    most_violated_data = solver.find_most_violated(cur_data, train=True)
    alldata = [solver.gpu_require(e.T) for e in most_violated_data[2][1:]]
    output_layers = solver.train_net.get_layer_by_names(output_layer_names)
    outputs = sum([e.outputs for e in output_layers], [])
    f = theano.function(inputs=solver.train_net.inputs,
                        outputs=outputs,
                        on_unused_input='ignore')
    res = f(*alldata)
    max_col = 3
    nbin = 100
    n_row = (len(res) - 1) // max_col + 1
    idx = 0

    for e, name in zip(res, output_layer_names):
        idx = idx + 1
        ndata = e.shape[0]
        print 'Layer {} output {} nodes'.format(name, ndata)
        iu.print_common_statistics(e)
        pl.subplot(n_row, max_col, idx)
        pl.hist(e.flatten(), bins=nbin)
        pl.title('Layer({})'.format(name))
    pl.show()
Example #12
0
def show_highest_score(train):
    """
    This function will load data from train or test set
    """
    solver = resume_solver()

    # stat = solver.stat
    # print stat.keys()
    # mvc = stat['most_violated_counts']
    # scc = stat['sample_candidate_counts']
    # print 'mvc sum = {}, scc = {}'.format(mvc.sum(), scc.sum())

    # test_shared_weights_online(solver.train_net.layers)
    # print '<<<<<<<<<<<<<<<<{}'.format(solver.train_net.layers is solver.eval_net.layers)
    # print 'train net inputs {}'.format(solver.train_net.inputs)
    # print 'eval net inputs {}'.format(solver.eval_net.inputs)
    # print 'eval net outputs {}'.format(solver.eval_net.outputs)

    # GraphParser.print_graph_connections(solver.train_net.layers)
    # return
    dp = solver.train_dp if train else solver.test_dp

    data = dp.get_next_batch(train)
    ep, bn, alldata, ext_data = solver.find_most_violated_ext(
        data, use_zero_margin=True, train=train)
    print ep, bn, len(alldata)
    gt_target = alldata[0]
    gt_margin = alldata[4]
    img_features = alldata[1]
    mv_target = ext_data[0]
    batch_candidate_indexes = ext_data[1]
    print 'batch candidate indexes shape is {}'.format(
        batch_candidate_indexes.shape)
    mv_features = alldata[3]
    gt_features = alldata[2]
    # mv_margin = solver.calc_margin(gt_target - mv_target)
    mv_margin = alldata[5]
    print "mv shape {}, gt shape {}".format(mv_target.shape, gt_target.shape)
    fl = solver.train_dp.data_dic['feature_list']
    batch_candidate_targets = fl[0][..., batch_candidate_indexes]
    ndata = gt_target.shape[-1]
    data_to_eval = [
        solver.gpu_require(img_features.T),
        solver.gpu_require(mv_features.T),
        solver.gpu_require(mv_margin.T)
    ]
    print 'Eval inpus are {}'.format([l.name for l in solver.eval_net.inputs])
    mv_score = solver.eval_net.outputs[0].eval(
        dict(zip(solver.eval_net.inputs, data_to_eval)))
    data_to_eval = [
        solver.gpu_require(img_features.T),
        solver.gpu_require(gt_features.T),
        solver.gpu_require(gt_margin.T)
    ]
    gt_score = solver.eval_net.outputs[0].eval(
        dict(zip(solver.eval_net.inputs, data_to_eval)))

    res_mpjpe, bmi = get_batch_best_match(batch_candidate_targets, gt_target,
                                          solver)
    print 'Current best match mpjpe is {}'.format(np.mean(res_mpjpe) * 1200)

    bmi_raw = batch_candidate_indexes[bmi]
    bm_features = fl[2][..., bmi_raw]
    bm_targets = fl[0][..., bmi_raw]
    residuals = bm_targets - gt_target
    mpjpe = dutils.calc_mpjpe_from_residual(residuals,
                                            17)  # mpjpe for best match
    print 'Calc Again mpjpe is {}'.format(np.mean(mpjpe.flatten()) * 1200)
    data_to_eval = [
        solver.gpu_require(img_features.T),
        solver.gpu_require(bm_features.T),
        solver.gpu_require(gt_margin.T)
    ]
    bm_score = solver.eval_net.outputs[0].eval(
        dict(zip(solver.eval_net.inputs, data_to_eval)))
    # for evaluation
    # inputs = [solver.train_net.inputs[0],solver.train_net.inputs[2],solver.train_net.inputs[4]]
    # print 'inputs = {}'.format(inputs)
    # ff = theano.function(inputs=inputs,
    #                      outputs=solver.eval_net.layers['net2_score'][2].outputs
    # )
    # print solver.eval_net.layers['net1_score'][2].outputs
    # res = solver.call_func(ff, data_to_eval)
    # r = res[0]
    # diff = r - gt_score
    # print '=======The abs difference is {}==========='.format(np.abs(diff).sum())

    all_input_data = [solver.gpu_require(e.T) for e in alldata[1:]]
    solver.analyze_num_sv(all_input_data)
    # all_input_data = [all_input_data[0], all_input_data[2], all_input_data[1],
    #                   all_input_data[4], all_input_data[3]]
    # solver.print_layer_outputs(all_input_data)

    # Ignore the use_zero margin flag
    whole_candidate_set = solver.train_dp.data_dic['feature_list'][0][
        ..., solver.train_dp.data_range]
    # print 'Whole candidate_set shape is {}'.format(whole_candidate_set.shape)
    # what_is_the_best_match( whole_candidate_set , mv_target, solver)
    # show_what_is_best_all(solver.train_dp, solver.test_dp, solver)
    mv_margin = solver.calc_margin(gt_target - mv_target)  # MPJPE
    print 'gt_margin<======================'
    iu.print_common_statistics(gt_margin)
    print 'mv_margin<======================'
    iu.print_common_statistics(alldata[5])
    show_bm_cmp(ndata, gt_target, mv_target, bm_targets, mv_score, gt_score,
                bm_score, solver)
    show_masked_plot(ndata, mv_margin, mv_score, gt_score, bm_score)
    show_raw_plot(ndata, mv_margin, mv_score, gt_score)
    # print 'Strange Here: {:.6f}% is correct'.format()
    pl.show()
Example #13
0
def test():
    d = mio.unpickle('/opt/visal/tmp/for_sijin/Data/H36M/H36MExp/folder_FCJ0_act_14/batches.meta')
    a = d['feature_list'][1]
    print a[...,0].flatten()
    iu.print_common_statistics(a)
Example #14
0
def show_highest_score(train):
    """
    This function will load data from train or test set
    """
    solver = resume_solver()

    # stat = solver.stat
    # print stat.keys()
    # mvc = stat['most_violated_counts']
    # scc = stat['sample_candidate_counts']
    # print 'mvc sum = {}, scc = {}'.format(mvc.sum(), scc.sum())

    # test_shared_weights_online(solver.train_net.layers)
    # print '<<<<<<<<<<<<<<<<{}'.format(solver.train_net.layers is solver.eval_net.layers)
    # print 'train net inputs {}'.format(solver.train_net.inputs)
    # print 'eval net inputs {}'.format(solver.eval_net.inputs)
    # print 'eval net outputs {}'.format(solver.eval_net.outputs)

    # GraphParser.print_graph_connections(solver.train_net.layers)
    # return
    dp = solver.train_dp if train else solver.test_dp

    data = dp.get_next_batch(train)
    ep, bn, alldata, ext_data = solver.find_most_violated_ext(data, use_zero_margin=True, train=train)
    print ep, bn, len(alldata)
    gt_target = alldata[0]
    gt_margin = alldata[4]
    img_features = alldata[1]
    mv_target = ext_data[0]
    batch_candidate_indexes = ext_data[1]
    print "batch candidate indexes shape is {}".format(batch_candidate_indexes.shape)
    mv_features = alldata[3]
    gt_features = alldata[2]
    # mv_margin = solver.calc_margin(gt_target - mv_target)
    mv_margin = alldata[5]
    print "mv shape {}, gt shape {}".format(mv_target.shape, gt_target.shape)
    fl = solver.train_dp.data_dic["feature_list"]
    batch_candidate_targets = fl[0][..., batch_candidate_indexes]
    ndata = gt_target.shape[-1]
    data_to_eval = [
        solver.gpu_require(img_features.T),
        solver.gpu_require(mv_features.T),
        solver.gpu_require(mv_margin.T),
    ]
    print "Eval inpus are {}".format([l.name for l in solver.eval_net.inputs])
    mv_score = solver.eval_net.outputs[0].eval(dict(zip(solver.eval_net.inputs, data_to_eval)))
    data_to_eval = [
        solver.gpu_require(img_features.T),
        solver.gpu_require(gt_features.T),
        solver.gpu_require(gt_margin.T),
    ]
    gt_score = solver.eval_net.outputs[0].eval(dict(zip(solver.eval_net.inputs, data_to_eval)))

    res_mpjpe, bmi = get_batch_best_match(batch_candidate_targets, gt_target, solver)
    print "Current best match mpjpe is {}".format(np.mean(res_mpjpe) * 1200)

    bmi_raw = batch_candidate_indexes[bmi]
    bm_features = fl[2][..., bmi_raw]
    bm_targets = fl[0][..., bmi_raw]
    residuals = bm_targets - gt_target
    mpjpe = dutils.calc_mpjpe_from_residual(residuals, 17)  # mpjpe for best match
    print "Calc Again mpjpe is {}".format(np.mean(mpjpe.flatten()) * 1200)
    data_to_eval = [
        solver.gpu_require(img_features.T),
        solver.gpu_require(bm_features.T),
        solver.gpu_require(gt_margin.T),
    ]
    bm_score = solver.eval_net.outputs[0].eval(dict(zip(solver.eval_net.inputs, data_to_eval)))
    # for evaluation
    # inputs = [solver.train_net.inputs[0],solver.train_net.inputs[2],solver.train_net.inputs[4]]
    # print 'inputs = {}'.format(inputs)
    # ff = theano.function(inputs=inputs,
    #                      outputs=solver.eval_net.layers['net2_score'][2].outputs
    # )
    # print solver.eval_net.layers['net1_score'][2].outputs
    # res = solver.call_func(ff, data_to_eval)
    # r = res[0]
    # diff = r - gt_score
    # print '=======The abs difference is {}==========='.format(np.abs(diff).sum())

    all_input_data = [solver.gpu_require(e.T) for e in alldata[1:]]
    solver.analyze_num_sv(all_input_data)
    # all_input_data = [all_input_data[0], all_input_data[2], all_input_data[1],
    #                   all_input_data[4], all_input_data[3]]
    # solver.print_layer_outputs(all_input_data)

    # Ignore the use_zero margin flag
    whole_candidate_set = solver.train_dp.data_dic["feature_list"][0][..., solver.train_dp.data_range]
    # print 'Whole candidate_set shape is {}'.format(whole_candidate_set.shape)
    # what_is_the_best_match( whole_candidate_set , mv_target, solver)
    # show_what_is_best_all(solver.train_dp, solver.test_dp, solver)
    mv_margin = solver.calc_margin(gt_target - mv_target)  # MPJPE
    print "gt_margin<======================"
    iu.print_common_statistics(gt_margin)
    print "mv_margin<======================"
    iu.print_common_statistics(alldata[5])
    show_bm_cmp(ndata, gt_target, mv_target, bm_targets, mv_score, gt_score, bm_score, solver)
    show_masked_plot(ndata, mv_margin, mv_score, gt_score, bm_score)
    show_raw_plot(ndata, mv_margin, mv_score, gt_score)
    # print 'Strange Here: {:.6f}% is correct'.format()
    pl.show()
Example #15
0
def show_highest_score(train, solver):
    """
    This function will load data from train or test set
    """
   
    dp = solver.train_dp if train else solver.test_dp
    data = solver.get_next_batch(train)
    alldata, ext_data  = solver.find_most_violated_ext(data[2],use_zero_margin=True,
                                                               train=train
    )
    print len(alldata)
    gt_target = alldata[0]
    gt_margin= alldata[4]
    img_features = alldata[1]
    mv_target = ext_data[0]
    batch_candidate_indexes =ext_data[1]
    print 'batch candidate indexes shape is {}'.format(batch_candidate_indexes.shape)
    mv_features = alldata[3]
    gt_features = alldata[2]
    # mv_margin = solver.calc_margin(gt_target - mv_target)
    mv_margin = alldata[5]
    print "mv shape {}, gt shape {}".format(mv_target.shape, gt_target.shape)

    fl = solver.get_all_candidates(solver.train_dp)
    batch_candidate_targets = fl[0][...,batch_candidate_indexes]
    ndata = gt_target.shape[-1]
    data_to_eval = [solver.gpu_require(img_features.T),
                    solver.gpu_require(mv_features.T),
                    solver.gpu_require(mv_margin.T)
    ]
    print 'Eval inpus are {}'.format([l.name for l in solver.eval_net.inputs])
    mv_score = solver.eval_net.outputs[0].eval(dict(zip(solver.eval_net.inputs, data_to_eval)))
    data_to_eval = [solver.gpu_require(img_features.T),
                    solver.gpu_require(gt_features.T),
                    solver.gpu_require(gt_margin.T)
    ]
    gt_score = solver.eval_net.outputs[0].eval(dict(zip(solver.eval_net.inputs, data_to_eval)))

    res_mpjpe, bmi  =  get_batch_best_match(batch_candidate_targets, gt_target, solver)
    print 'Current best match mpjpe is {}'.format(np.mean(res_mpjpe)* 1200)
    
    bmi_raw = batch_candidate_indexes[bmi]
    bm_features = fl[2][..., bmi_raw]
    bm_targets  = fl[0][..., bmi_raw]
    residuals = bm_targets - gt_target
    mpjpe = dutils.calc_mpjpe_from_residual(residuals, 17) # mpjpe for best match
    print 'mpjpe(bm_target, gt_target) is {}'.format(np.mean(mpjpe.flatten())*1200)
    data_to_eval = [solver.gpu_require(img_features.T),
                    solver.gpu_require(bm_features.T),
                    solver.gpu_require(gt_margin.T)
    ]
    bm_score = solver.eval_net.outputs[0].eval(dict(zip(solver.eval_net.inputs, data_to_eval)))
   
    all_input_data = [solver.gpu_require(e.T) for e in alldata[1:]]
    solver.analyze_num_sv(all_input_data)
    # all_input_data = [all_input_data[0], all_input_data[2], all_input_data[1],
    #                   all_input_data[4], all_input_data[3]]
    # solver.print_layer_outputs(all_input_data)

    # Ignore the use_zero margin flag
    whole_candidate_set = solver.train_dp.data_dic['feature_list'][0][..., solver.train_dp.data_range]
    # print 'Whole candidate_set shape is {}'.format(whole_candidate_set.shape)
    # what_is_the_best_match( whole_candidate_set , mv_target, solver)
    # show_what_is_best_all(solver.train_dp, solver.test_dp, solver)
    mv_margin = solver.calc_margin(gt_target - mv_target)  # MPJPE
    print 'gt_margin<======================'
    iu.print_common_statistics(gt_margin)
    print 'mv_margin<======================'
    iu.print_common_statistics(alldata[5])
    show_bm_cmp(ndata, gt_target, mv_target, bm_targets, mv_score, gt_score, bm_score, solver)
    show_masked_plot(ndata, mv_margin, mv_score, gt_score, bm_score)
    show_raw_plot(ndata, mv_margin, mv_score, gt_score)
    # print 'Strange Here: {:.6f}% is correct'.format()
    pl.show()