Beispiel #1
0
def predict_stimulus_condition(stim_folder, stim, condition, random_shapes):
    filename = "{0:s}/{1:s}_{2:s}.npy".format(stim_folder, stim, condition)
    data = np.load(filename)

    fwm = vfm.VisionForwardModel(render_size=(200, 200), custom_lighting=False)
    for shape in random_shapes:
        shape.forward_model = fwm

    log_probs = []
    for i in range(7):
        print(stim, condition, i)
        # calculate logp(image_test|I != image_train) by random shapes
        logp_test_no_train = 0.0
        logp_test_no_train_best = 0.0
        for shape in random_shapes:
            logp_avg, logp_max = calculate_probability_image_given_hypothesis(
                data[i], shape)
            logp_test_no_train += logp_avg
            logp_test_no_train_best += logp_max

        logp_test_no_train /= len(random_shapes)
        logp_test_no_train_best /= len(random_shapes)

        log_probs.append([
            stim, condition, i * 15, logp_test_no_train,
            logp_test_no_train_best
        ])

    del fwm
    pkl.dump(log_probs,
             open("{0:s}_{1:s}_temp.pkl".format(stim, condition), "w"))
    return log_probs
    def test_calculate_probability_image_given_hypothesis(self):
        fwm = i3d_vfm.VisionForwardModel(render_size=(200, 200),
                                         offscreen_rendering=True,
                                         custom_lighting=False)
        # create a cube, this will be our sample
        s = Shape(forward_model=fwm,
                  viewpoint=[np.array([np.sqrt(8.0), 0.0, 90.0])],
                  params={
                      'MAX_PIXEL_VALUE': 255.0,
                      'LL_VARIANCE': 0.1
                  },
                  parts=[
                      CuboidPrimitive(position=[0.0, 0.0, 0.0],
                                      size=[1.0, 1.0, 1.0])
                  ])
        # observed image
        image = np.load('test_images/test_image.npy')

        # calculate logp(image|sample)
        logp, logp_max = calculate_probability_image_given_hypothesis(image, s)
        # these are pre-calculated. NOTE these could change if VisionForwardModel changes.
        self.assertAlmostEqual(logp, -0.60814961737968254)
        self.assertAlmostEqual(logp_max, 0.0)

        # calculate logp(blank image|sample)
        image = np.zeros((1, 200, 200))
        logp, logp_max = calculate_probability_image_given_hypothesis(image, s)
        self.assertAlmostEqual(logp, -1.5146551964550277)
        self.assertAlmostEqual(logp_max, -0.91822996736329843)

        # change ll_variance and see if logp_max changes accordingly
        s.params['LL_VARIANCE'] = 0.01
        logp, logp_max = calculate_probability_image_given_hypothesis(image, s)
        self.assertAlmostEqual(logp_max / 10.0, -0.91822996736329843)
    def test_run_chain(self):
        fwm = vfm.VisionForwardModel(render_size=(50, 50))
        s = shape.Shape(forward_model=fwm, viewpoint=[(3.0, 45.0, 45.0)],
                        params={'LL_VARIANCE': 1.0, 'MAX_PIXEL_VALUE': 175.0})

        data = np.zeros((1, 50, 50))
        kernel = proposal.RandomMixtureProposal(moves={'aaa': shape.shape_change_part_size_local},
                                                params={'CHANGE_SIZE_VARIANCE': 1.0})

        params = {'name': 'unittest', 'results_folder': '.', 'sampler': 'xxx', 'burn_in': 0, 'sample_count': 1,
                  'best_sample_count': 1, 'thinning_period': 10, 'data': data, 'kernel': kernel, 'initial_h': s,
                  'report_period': 10}

        # wrong sampler
        self.assertRaises(ValueError, run_chain, **params)

        # need to supply temperatures if sampler is pt
        params['sampler'] = 'pt'
        self.assertRaises(ValueError, run_chain, **params)

        params['temperatures'] = [2.0, 1.0]
        results = run_chain(**params)
        self.assertIn('run_id', results.keys())
        self.assertIn('run_file', results.keys())
        self.assertIn('mean_acceptance_rate', results.keys())
        self.assertIn('start_time', results.keys())
        self.assertIn('end_time', results.keys())
        self.assertIn('duration', results.keys())
        self.assertIn('best_ll', results.keys())
        self.assertIn('best_posterior', results.keys())
        self.assertIn('mse', results.keys())
        self.assertIn('mean_best_ll', results.keys())
        self.assertIn('mean_best_posterior', results.keys())
        self.assertIn('mse_mean', results.keys())
        self.assertIn('mean_sample_posterior', results.keys())
        self.assertIn('mean_sample_ll', results.keys())
        self.assertIn('mse_sample', results.keys())

        # saved the right files
        start = results['start_time']
        time_str = time.strftime("%Y%m%d_%H%M%S", time.localtime(start))
        fname = "{0:s}/{1:s}_{2:s}_{3:06d}.pkl".format(params['results_folder'], params['name'], time_str,
                                                       results['run_id'])
        self.assertTrue(os.path.isfile(fname))
        os.remove(fname)

        fname = "{0:s}/{1:s}/s{2:d}_0.png".format(params['results_folder'], params['name'], 0)
        self.assertTrue(os.path.isfile(fname))
        os.remove(fname)

        fname = "{0:s}/{1:s}/b{2:d}_0.png".format(params['results_folder'], params['name'], 0)
        self.assertTrue(os.path.isfile(fname))
        os.remove(fname)

        folder = "{0:s}/{1:s}".format(params['results_folder'], params['name'])
        os.rmdir(folder)
    def test_calculate_similarity_image_given_image(self):
        fwm = i3d_vfm.VisionForwardModel(render_size=(200, 200),
                                         offscreen_rendering=True,
                                         custom_lighting=False)
        # create two samples
        s1 = Shape(forward_model=fwm,
                   viewpoint=[np.array([np.sqrt(8.0), 0.0, 90.0])],
                   params={
                       'MAX_PIXEL_VALUE': 255.0,
                       'LL_VARIANCE': 0.1
                   },
                   parts=[
                       CuboidPrimitive(position=[0.0, 0.0, 0.0],
                                       size=[1.0, 1.0, 1.0])
                   ])

        s2 = Shape(forward_model=fwm,
                   viewpoint=[np.array([np.sqrt(8.0), 0.0, 90.0])],
                   params={
                       'MAX_PIXEL_VALUE': 255.0,
                       'LL_VARIANCE': 0.1
                   },
                   parts=[
                       CuboidPrimitive(position=[0.0, 0.0, 0.0],
                                       size=[.5, .5, .5])
                   ])

        # observed image
        image = np.load('test_images/test_image.npy')

        # calculate similarity using two samples
        logp_avg, logp_wavg, logp_best, logp_wbest = calculate_similarity_image_given_image(
            image, [s1, s2], [np.log(2.0), np.log(1.0)])
        # these are pre-calculated. NOTE these could change if VisionForwardModel changes.
        self.assertAlmostEqual(logp_avg, np.log(0.3549830885871178))
        self.assertAlmostEqual(logp_wavg, np.log(0.41810779467381104))
        self.assertAlmostEqual(logp_best, np.log(0.58679820659933835))
        self.assertAlmostEqual(logp_wbest, np.log(0.72453213773289238))

        # calculate similarity from one sample
        logp_avg, logp_wavg, logp_best, logp_wbest = calculate_similarity_image_given_image(
            image, [s1], [np.log(2.0)])

        # these are pre-calculated. NOTE these could change if VisionForwardModel changes.
        self.assertAlmostEqual(logp_avg, np.log(0.54435720684719735))
        self.assertAlmostEqual(logp_wavg, np.log(0.54435720684719735))
        self.assertAlmostEqual(logp_best, np.log(1.0))
        self.assertAlmostEqual(logp_wbest, np.log(1.0))
    shape.viewpoint = []
    shape.viewpoint.append((r, theta, 45))
    shape.viewpoint.append((r, theta, 30))
    shape.viewpoint.append((r, theta, 15))
    shape.viewpoint.append((r, theta, 0))
    shape.viewpoint.append((r, (theta + 180) % 360, 15))
    shape.viewpoint.append((r, (theta + 180) % 360, 30))
    shape.viewpoint.append((r, (theta + 180) % 360, 45))
    fwm.save_render("stimuli/exp2/behavioral/{0:s}_ortho.png".format(name), shape)


if __name__ == "__main__":
    stimuli_folder = "stimuli/exp2"

    # stimuli names
    # names = ['s7']
    names = ['s1', 's2', 's3', 's4', 's5', 's6', 's7', 's8', 's9', 's10']
    # names = ['s11', 's12', 's13', 's14', 's15', 's16', 's17', 's18', 's19', 's20']

    # theta=0.0 viewpoint
    viewpoint_thetas = pkl.load(open("{0:s}/viewpoint_thetas.pkl".format(stimuli_folder)))

    fwm = vfm.VisionForwardModel(render_size=(400, 400), offscreen_rendering=False, custom_lighting=False)

    for name in names:
        shape = pkl.load(open("{0:s}/{1:s}.pkl".format(stimuli_folder, name)))
        shape.viewpoint = [[np.sqrt(8.0), viewpoint_thetas[name], 45.0]]
        render_stimulus(name, shape)


def get_feature_positions(forward_model, shape):
    # fix missing and ill-formed data
    shape.primitive_type = 'CUBE'
    shape.viewpoint[0] = geom_3d.cartesian_to_spherical(shape.viewpoint[0])

    # find position of each feature
    feature_list = np.zeros((2, 8))
    for i, p in enumerate(points):
        px, py = forward_model.convert_world_to_display(shape.viewpoint[0], p[0], p[1], p[2])
        # in vtk's coordinate system, left bottom corner is (0, 0)
        feature_list[:, i] = [px, IMG_SIZE[1] - py]

    return feature_list

if __name__ == "__main__":
    import Infer3DShape.vision_forward_model as vfm
    fwm = vfm.VisionForwardModel(render_size=IMG_SIZE, offscreen_rendering=True)

    shapes = pkl.load(open('../../../Infer3DShape/data/stimuli20150624_144833/shapes_single_view.pkl'))

    feature_lists = {}
    for name, shape in shapes.iteritems():
        print '.',
        feature_list = get_feature_positions(fwm, shape)
        feature_lists[name] = feature_list

    pkl.dump(feature_lists, open("AlignmentFeatureLists.pkl", "w"))


def run_experiment2(**kwargs):
    """This method runs the chain with a PaperClipShape hypothesis and given parameters for the second view-dependency
    experiment we look at.

    This method is intended to be used in an Experiment instance. This method prepares the necessary data and
    calls `Infer3DShape.run_chain`.

    Parameters:
        kwargs (dict): Keyword arguments are as follows
            input_file (str): name of the data file containing the observed image
            data_folder (str): folder containing the data files
            results_folder (str):
            sampler (str): see `run_chain` function
            ll_variance (float): variance of the Gaussian likelihood
            max_pixel_value (float): maximum pixel intensity value
            change_viewpoint_variance (float): variance for the change viewpoint move
            max_joint_count (int): maximum number of joints in the shape. required if shape_type is 'paperclip'
            move_joint_variance (float): variance for the move joint move. required if shape_type is 'paperclip'
            max_new_segment_length (float): maximum new segment length for add joint move. required if shape_type is
                'paperclip'
            max_segment_length_change (float): maximum change ratio for change segment length move. required if
                shape_type is 'paperclip'
            rotate_midsegment_variance (float): variance for the rotate midsegment move. required if shape_type is
                'paperclip'
            burn_in (int): see `run_chain` function
            sample_count (int): see `run_chain` function
            best_sample_count (int): see `run_chain` function
            thinning_period (int): see `run_chain` function
            report_period (int): see `run_chain` function
            temperatures (list): see `run_chain` function

    Returns:
        dict: run results
    """
    try:
        input_file = kwargs['input_file']
        results_folder = kwargs['results_folder']
        data_folder = kwargs['data_folder']
        sampler = kwargs['sampler']

        ll_variance = kwargs['ll_variance']
        max_pixel_value = kwargs['max_pixel_value']
        change_viewpoint_variance = kwargs['change_viewpoint_variance']

        max_joint_count = kwargs['max_joint_count']
        move_joint_variance = kwargs['move_joint_variance']
        max_new_segment_length = kwargs['max_new_segment_length']
        max_segment_length_change = kwargs['max_segment_length_change']
        rotate_midsegment_variance = kwargs['rotate_midsegment_variance']

        burn_in = kwargs['burn_in']
        sample_count = kwargs['sample_count']
        best_sample_count = kwargs['best_sample_count']
        thinning_period = kwargs['thinning_period']
        report_period = kwargs['report_period']

        # pid is set by Experiment class
        chain_id = kwargs['pid']

        temperatures = None
        if sampler == 'pt':
            temperatures = kwargs['temperatures']

    except KeyError as e:
        raise ValueError("All experiment parameters should be provided. Missing parameter {0:s}".format(e.message))

    import numpy as np

    # seed using chain_id to prevent parallel processes from getting the same random seed
    np.random.seed(int((time.time() * 1000) + chain_id) % 2**32)

    # read training data. subjects are trained on two sets of views 75 degrees apart.
    data = np.load("{0:s}/{1:s}_train.npy".format(data_folder, input_file))
    render_size = data.shape[1:]

    # if shape_type == 'paperclip':
    custom_lighting = False

    import Infer3DShape.vision_forward_model as i3d_vfm
    fwm = i3d_vfm.VisionForwardModel(render_size=render_size, offscreen_rendering=True, custom_lighting=custom_lighting)

    shape_params = {'LL_VARIANCE': ll_variance, 'MAX_PIXEL_VALUE': max_pixel_value, 'SEGMENT_LENGTH_VARIANCE': 0.0001}

    # construct viewpoint. during training, subjects see views: theta = 270, 285, 300, 345, 0, 15
    theta = np.random.rand() * 360.0
    viewpoint1 = np.array((np.sqrt(8.0), theta + 270.0, 45.0))
    viewpoint2 = np.array((np.sqrt(8.0), theta + 285.0, 45.0))
    viewpoint3 = np.array((np.sqrt(8.0), theta + 300.0, 45.0))
    viewpoint4 = np.array((np.sqrt(8.0), theta + 345.0, 45.0))
    viewpoint5 = np.array((np.sqrt(8.0), theta + 0.0, 45.0))
    viewpoint6 = np.array((np.sqrt(8.0), theta + 15.0, 45.0))
    viewpoint = [viewpoint1, viewpoint2, viewpoint3, viewpoint4, viewpoint5, viewpoint6]

    # construct initial hypothesis and kernel
    import Infer3DShape.i3d_proposal as i3d_proposal
    kernel_params = {'CHANGE_VIEWPOINT_VARIANCE': change_viewpoint_variance}
    moves = {'change_viewpoint': i3d_proposal.change_viewpoint_z}

    import Infer3DShape.paperclip_shape as i3d_pc
    h = i3d_pc.PaperClipShape(forward_model=fwm, viewpoint=viewpoint, params=shape_params,
                              min_joints=2, max_joints=max_joint_count, joint_count=6, mid_segment_id=2)

    kernel_params['MOVE_JOINT_VARIANCE'] = move_joint_variance
    kernel_params['MAX_NEW_SEGMENT_LENGTH'] = max_new_segment_length
    kernel_params['MAX_SEGMENT_LENGTH_CHANGE'] = max_segment_length_change
    kernel_params['ROTATE_MIDSEGMENT_VARIANCE'] = rotate_midsegment_variance

    moves['paperclip_move_joints'] = i3d_pc.paperclip_shape_move_joint
    moves['paperclip_move_branch'] = i3d_pc.paperclip_shape_move_branch
    moves['paperclip_change_segment_length'] = i3d_pc.paperclip_shape_change_segment_length
    moves['paperclip_change_branch_length'] = i3d_pc.paperclip_shape_change_branch_length
    moves['paperclip_add_remove_joint'] = i3d_pc.paperclip_shape_add_remove_joint
    moves['paperclip_rotate_midsegment'] = i3d_pc.paperclip_shape_rotate_midsegment

    import mcmclib.proposal as mcmc_proposal
    kernel = mcmc_proposal.RandomMixtureProposal(moves=moves, params=kernel_params)

    results = run_chain(name=input_file, sampler=sampler, initial_h=h, data=data, kernel=kernel, burn_in=burn_in,
                        thinning_period=thinning_period, sample_count=sample_count, best_sample_count=best_sample_count,
                        report_period=report_period, results_folder=results_folder, temperatures=temperatures)

    return results
Beispiel #8
0
        di = data[i]
        datum = caffe.proto.caffe_pb2.Datum()
        datum.channels = di.shape[0]
        datum.height = di.shape[1]
        datum.width = di.shape[2]
        datum.data = di.tobytes()
        datum.label = label
        key_id = "{:08}".format(keys[key_offset + i])
        txn.put(key_id.encode("ascii"), datum.SerializeToString())

    return data.shape[0]


if __name__ == "__main__":
    fwm = vfm.VisionForwardModel(render_size=RENDER_SIZE,
                                 offscreen_rendering=True,
                                 custom_lighting=True)
    fwm_view = vfm.VisionForwardModel(render_size=(300, 300),
                                      offscreen_rendering=False,
                                      custom_lighting=True)

    img_count = STIMULI_COUNT * IMAGE_PER_STIMULI
    train_img_count = int(img_count * TRAIN_RATIO)
    val_img_count = img_count - train_img_count

    train_keys = np.random.permutation(train_img_count)
    val_keys = np.random.permutation(val_img_count)

    train_db = lmdb.open("train_lmdb",
                         map_size=RENDER_WIDTH * RENDER_HEIGHT * 3 *
                         STIMULI_COUNT * 200)
Beispiel #9
0
    parts = []
    for ss in sm.spatial_states.itervalues():
        pos = ss.position * SCALE_FACTOR
        size = ss.size * SCALE_FACTOR
        parts.append(hyp.CuboidPrimitive(position=pos, size=size))

    h = hyp.Shape(forward_model=None, parts=parts)
    view_x = d * np.cos(view_angle * np.pi / 180.0)
    view_y = d * np.sin(view_angle * np.pi / 180.0)
    view_z = z
    h.viewpoint = [(view_x, view_y, view_z)]
    return h


if __name__ == "__main__":
    fwm = vfm.VisionForwardModel(render_size=(200, 200))

    stimuli_folder = 'stimuli20150624_144833'
    stimuli_file = '{0:s}/stimuli_set.pkl'.format(stimuli_folder)
    save_folder = '../../Infer3DShape/data/{0:s}'.format(stimuli_folder)
    # read viewpoints for stimuli
    view_angles = eval(
        open('{0:s}/viewpoints.txt'.format(stimuli_folder)).read())

    # read stimuli
    stim_set = cPickle.load(open(stimuli_file))

    shapes = {}
    for sname, stim in stim_set.stimuli_objects.iteritems():
        print(sname)
        h = create_shape_from_stimuli(stim, view_angles[sname])
Beispiel #10
0
    samples = run.samples.samples[5:]
    log_probs = run.samples.log_probs[5:]
    for sample in samples:
        sample.forward_model = forward_model

    best_samples = run.best_samples.samples
    best_log_probs = run.best_samples.log_probs
    for sample in best_samples:
        sample.forward_model = forward_model

    return samples, log_probs, best_samples, best_log_probs


if __name__ == "__main__":
    fwm = vfm.VisionForwardModel()

    hypothesis = 'BDAoOSSShapeMaxD'
    run_date = '20160128'

    data_folder = "./data/stimuli20150624_144833"
    samples_folder = "./results/{0:s}/{1:s}".format(hypothesis, run_date)

    objects = ['o1', 'o2', 'o3', 'o4', 'o5', 'o6', 'o7', 'o8', 'o9', 'o10']
    transformations = [
        't1_cs_d1', 't1_cs_d2', 't2_ap_d1', 't2_ap_d2', 't2_mf_d1', 't2_mf_d2',
        't2_rp_d1', 't2_rp_d2'
    ]
    variations = [o + '_' + t for t in transformations for o in objects]
    comparisons = {o: variations for o in objects}
    # comparisons = {o: [o + '_' + t for t in transformations] for o in objects}
Beispiel #11
0
def main(stimulus):
    stimuli_folder = './stimuli/exp2'
    results_folder = './results/exp2'

    conditions = ['inter', 'extra', 'ortho']

    use_best_samples = False

    run_list_file = './results/exp2/20160616.csv'
    run_list = pd.read_csv(run_list_file, index_col=0)

    # find the runs with minimum mse, i.e., max. probability
    run_list['stimulus_name'] = run_list['input_file'].apply(
        lambda x: x.split('_')[0])
    # run_ids = run_list.groupby('stimulus_name').apply(lambda x: x['run_id'][np.argmin(x['mse'])])
    run_ids = run_list.groupby('stimulus_name').apply(lambda x: x['run_id'])
    # get the filenames of run results
    run_files = os.listdir(results_folder)

    fwm = vfm.VisionForwardModel(render_size=(200, 200),
                                 offscreen_rendering=True,
                                 custom_lighting=False)

    # LogProbability: p(I_test|I_train) with p(I_test|S) calculated by integrating out viewpoint
    # LogProbability_best: p(I_test|I_train) with p(I_test|S) calculated by picking the best viewpoint
    # LogProbability_weighted: p(I_test|I_train) with each sample from p(S|I_train) weighted by its posterior prob.
    #   Note that this is not per se a proper way of estimation but it is an approximation and in cases of where a
    #   single sample dominates the posterior, it is quite close to MAP estimate.
    # LogProbability_weighted_best: similar to LogProbability_best and LogProbability_weighted_best
    # LogProbability_Image: An approximation of p(I_test|I != I_train)
    predictions = pd.DataFrame(
        index=np.arange(21),
        columns=[
            'Stimulus Id', 'Condition', 'ViewpointDifference',
            'LogProbability', 'LogProbability_best', 'LogProbability_weighted',
            'LogProbability_weighted_best', 'LogProbability_Image'
        ],
        dtype=float)

    row_id = 0
    print(stimulus)

    stim_samples, stim_sample_log_probs, stim_viewpoints = read_samples(
        fwm, results_folder, run_files, run_ids, stimulus, use_best_samples)

    for condition in conditions:
        print condition
        filename = "{0:s}/{1:s}_{2:s}.npy".format(stimuli_folder, stimulus,
                                                  condition)
        condition_data = np.load(filename)

        condition_predictions = predict_condition(stimulus, condition,
                                                  stim_samples,
                                                  stim_sample_log_probs,
                                                  condition_data,
                                                  stim_viewpoints)

        predictions.iloc[row_id:(row_id + 7)] = condition_predictions
        row_id += 7

    predictions.to_csv("predictions{0:s}_{1:s}.csv".format(
        "_best" if use_best_samples else "", stimulus))
 def setUp(self):
     self.fwm = vfm.VisionForwardModel(render_size=(50, 50))
     self.s = shape.Shape(forward_model=self.fwm, parts=[])
    mse = np.sum(np.square(data / samples[0].params['MAX_PIXEL_VALUE'])) / data.size
    logp_image = -mse / (2 * samples[0].params['LL_VARIANCE'])

    return logp, logp_best, logp_w, logp_wbest, logp_image


if __name__ == '__main__':
    train_stimuli = ['s1_80', 's5_20']
    test_stimuli = ['s1', 's5']
    stimuli_folder = './stimuli/exp1'
    results_folder = './results/exp1'

    test_viewpoints = range(0, 360, 20)
    use_best_samples = False

    fwm = vfm.VisionForwardModel(render_size=(200, 200), custom_lighting=False)

    # LogProbability: p(I_test|I_train) with p(I_test|S) calculated by integrating out viewpoint
    # LogProbability_best: p(I_test|I_train) with p(I_test|S) calculated by picking the best viewpoint
    # LogProbability_weighted: p(I_test|I_train) with each sample from p(S|I_train) weighted by its posterior prob.
    #   Note that this is not per se a proper way of estimation but it is an approximation and in cases of where a
    #   single sample dominates the posterior, it is quite close to MAP estimate.
    # LogProbability_weighted_best: similar to LogProbability_best and LogProbability_weighted_best
    # LogProbability_Image: An approximation of p(I_test|I != I_train)
    predictions = pd.DataFrame(index=np.arange(len(test_viewpoints) * len(test_stimuli) * len(train_stimuli)),
                               columns=['TrainingStimulus', 'TestStimulus', 'TestViewpoint', 'LogProbability',
                                        'LogProbability_best', 'LogProbability_weighted',
                                        'LogProbability_weighted_best', 'LogProbability_Image'],
                               dtype=float)

    row_id = 0
Beispiel #14
0
        y[(i * 3):((i + 1) * 3)] = s.parts[ix].position
    return s, y


if __name__ == "__main__":
    part_count = 2
    object_count = 20000
    max_pixel_value = 175.0

    data_folder = "./data"
    img_folder = "./data/png"
    save_img = False
    img_size = (50, 50)

    # use a small image from a single viewpoint
    fwm = vfm.VisionForwardModel(render_size=img_size,
                                 camera_pos=[(3.0, -3.0, 3.0)])

    x = np.zeros((object_count, img_size[0] * img_size[1]))
    y = np.zeros((object_count, part_count * 3))
    for i in range(object_count):
        hlp.progress_bar(current=i + 1,
                         max=object_count,
                         label='Generating object...')
        h, h_y = generate_object_fixed_part_count_and_size(
            part_count=part_count)
        img = fwm.render(h)
        # normalize to -1, 1
        img = ((img / max_pixel_value) - 0.5) * 2
        x[i, :] = img.flatten()
        y[i, :] = h_y
        if save_img:
Beispiel #15
0
def run_experiment1(**kwargs):
    """This method runs the chain with a PaperClipShape hypothesis and given parameters for the canonical view effect
    experiment.

    This method is intended to be used in an Experiment instance. This method prepares the necessary data and
    calls `Infer3DShape.run_chain`.

    Parameters:
        kwargs (dict): Keyword arguments are as follows
            input_file (str): mame of the data file containing the observed image
            data_folder (str): folder containing the data files
            results_folder (str):
            sampler (str): see `run_chain` function
            max_joint_count (int): maximum number of joints in the shape
            ll_variance (float): variance of the Gaussian likelihood
            max_pixel_value (float): maximum pixel intensity value
            move_joint_variance (float): variance for the move joint move
            max_new_segment_length (float): maximum new segment length for add joint move
            max_segment_length_change (float): maximum change ratio for change segment length move
            rotate_midsegment_variance (float): variance for the rotate midsegment move
            change_viewpoint_variance (float): variance for the change viewpoint move
            burn_in (int): see `run_chain` function
            sample_count (int): see `run_chain` function
            best_sample_count (int): see `run_chain` function
            thinning_period (int): see `run_chain` function
            report_period (int): see `run_chain` function
            temperatures (list): see `run_chain` function

    Returns:
        dict: run results
    """
    try:
        input_file = kwargs['input_file']
        results_folder = kwargs['results_folder']
        data_folder = kwargs['data_folder']
        sampler = kwargs['sampler']
        max_joint_count = kwargs['max_joint_count']
        ll_variance = kwargs['ll_variance']
        max_pixel_value = kwargs['max_pixel_value']
        move_joint_variance = kwargs['move_joint_variance']
        max_new_segment_length = kwargs['max_new_segment_length']
        max_segment_length_change = kwargs['max_segment_length_change']
        rotate_midsegment_variance = kwargs['rotate_midsegment_variance']
        change_viewpoint_variance = kwargs['change_viewpoint_variance']
        burn_in = kwargs['burn_in']
        sample_count = kwargs['sample_count']
        best_sample_count = kwargs['best_sample_count']
        thinning_period = kwargs['thinning_period']
        report_period = kwargs['report_period']
        temperatures = None
        if 'temperatures' in kwargs:
            temperatures = kwargs['temperatures']
    except KeyError as e:
        raise ValueError(
            "All experiment parameters should be provided. Missing parameter {0:s}"
            .format(e.message))

    import numpy as np

    # read the data file
    data = np.load("{0:s}/{1:s}.npy".format(data_folder, input_file))
    render_size = data.shape[1:]

    import Infer3DShape.vision_forward_model as i3d_vfm
    fwm = i3d_vfm.VisionForwardModel(render_size=render_size,
                                     offscreen_rendering=True,
                                     custom_lighting=False)

    shape_params = {
        'LL_VARIANCE': ll_variance,
        'MAX_PIXEL_VALUE': max_pixel_value,
        'SEGMENT_LENGTH_VARIANCE': 0.0001
    }

    viewpoint = np.array((np.sqrt(8.0), np.random.rand() * 360.0, 45.0))

    import Infer3DShape.paperclip_shape as i3d_pc
    h = i3d_pc.PaperClipShape(forward_model=fwm,
                              viewpoint=[viewpoint],
                              params=shape_params,
                              min_joints=2,
                              max_joints=max_joint_count,
                              joint_count=6,
                              mid_segment_id=2)

    kernel_params = {
        'CHANGE_VIEWPOINT_VARIANCE': change_viewpoint_variance,
        'MOVE_JOINT_VARIANCE': move_joint_variance,
        'MAX_NEW_SEGMENT_LENGTH': max_new_segment_length,
        'MAX_SEGMENT_LENGTH_CHANGE': max_segment_length_change,
        'ROTATE_MIDSEGMENT_VARIANCE': rotate_midsegment_variance
    }

    import Infer3DShape.i3d_proposal as i3d_proposal
    moves = {
        'change_viewpoint': i3d_proposal.
        change_viewpoint_z,  # in exp1, only rotations around z are allowed.
        'paperclip_move_joints': i3d_pc.paperclip_shape_move_joint,
        'paperclip_move_branch': i3d_pc.paperclip_shape_move_branch,
        'paperclip_change_segment_length':
        i3d_pc.paperclip_shape_change_segment_length,
        'paperclip_change_branch_length':
        i3d_pc.paperclip_shape_change_branch_length,
        'paperclip_add_remove_joint': i3d_pc.paperclip_shape_add_remove_joint,
        'paperclip_rotate_midsegment': i3d_pc.paperclip_shape_rotate_midsegment
    }

    import mcmclib.proposal as mcmc_proposal
    kernel = mcmc_proposal.RandomMixtureProposal(moves=moves,
                                                 params=kernel_params)

    results = run_chain(name=input_file,
                        sampler=sampler,
                        initial_h=h,
                        data=data,
                        kernel=kernel,
                        burn_in=burn_in,
                        thinning_period=thinning_period,
                        sample_count=sample_count,
                        best_sample_count=best_sample_count,
                        report_period=report_period,
                        results_folder=results_folder,
                        temperatures=temperatures)

    return results