コード例 #1
0
def test_gaussian_grasp_sampling(vis=False):
    np.random.seed(100)

    h = plt.figure()
    ax = h.add_subplot(111, projection='3d')

    sdf_3d_file_name = 'data/test/sdf/Co_clean.sdf'
    sf = sdf_file.SdfFile(sdf_3d_file_name)
    sdf_3d = sf.read()

    mesh_name = 'data/test/meshes/Co_clean.obj'
    of = obj_file.ObjFile(mesh_name)
    m = of.read()

    graspable = graspable_object.GraspableObject3D(sdf_3d,
                                                   mesh=m,
                                                   model_name=mesh_name)

    config_file = 'cfg/correlated.yaml'
    config = ec.ExperimentConfig(config_file)
    sampler = GaussianGraspSampler(config)

    start_time = time.clock()
    grasps = sampler.generate_grasps(graspable,
                                     target_num_grasps=200,
                                     vis=False)
    end_time = time.clock()
    duration = end_time - start_time
    logging.info('Gaussian grasp candidate generation took %f sec' %
                 (duration))
コード例 #2
0
ファイル: grasp_transfer.py プロジェクト: puneetp/GPIS
def getprobability(object, grasps):
    obj_name = object[1]
    sdf_name = object[2]
    obj_mesh = of.ObjFile(obj_name).read()
    sdf_ = sf.SdfFile(sdf_name).read()
    obj = go.GraspableObject3D(sdf_,
                               mesh=obj_mesh,
                               key=object[0].replace("_features.txt", ""),
                               model_name=obj_name)
    config_name = "cfg/correlated.yaml"
    config = ec.ExperimentConfig(config_name)
    np.random.seed(100)

    brute_force_iter = config['bandit_brute_force_iter']
    max_iter = config['bandit_max_iter']
    confidence = config['bandit_confidence']
    snapshot_rate = config['bandit_snapshot_rate']
    tc_list = [
        tc.MaxIterTerminationCondition(max_iter),
        #		tc.ConfidenceTerminationCondition(confidence)
    ]

    # run bandits!
    graspable_rv = pfc.GraspableObjectGaussianPose(obj, config)
    f_rv = scipy.stats.norm(config['friction_coef'],
                            config['sigma_mu'])  # friction Gaussian RV

    # compute feature vectors for all grasps
    feature_extractor = ff.GraspableFeatureExtractor(obj, config)
    all_features = feature_extractor.compute_all_features(grasps)

    candidates = []
    for grasp, features in zip(grasps, all_features):
        grasp_rv = pfc.ParallelJawGraspGaussian(grasp, config)
        pfc_rv = pfc.ForceClosureRV(grasp_rv, graspable_rv, f_rv, config)
        if features is None:
            pass
        else:
            pfc_rv.set_features(features)
            candidates.append(pfc_rv)

    def phi(rv):
        return rv.features

    nn = kernels.KDTree(phi=phi)
    kernel = kernels.SquaredExponentialKernel(sigma=config['kernel_sigma'],
                                              l=config['kernel_l'],
                                              phi=phi)
    objective = objectives.RandomBinaryObjective()

    # uniform allocation for true values
    ua = das.UniformAllocationMean(objective, candidates)
    ua_result = ua.solve(
        termination_condition=tc.MaxIterTerminationCondition(brute_force_iter),
        snapshot_rate=snapshot_rate)
    estimated_pfc = models.BetaBernoulliModel.beta_mean(
        ua_result.models[-1].alphas, ua_result.models[-1].betas)
    return estimated_pfc
コード例 #3
0
def test_load_grasps():
    logging.getLogger().setLevel(logging.INFO)
    config_filename = 'cfg/basic_labelling.yaml'
    config = ec.ExperimentConfig(config_filename)

    key = 'feline_greenies_dental_treats'
    db = Database(config)
    apc = db.datasets[0]
    grasps = apc.load_grasps(key,
                             'results/gce_grasps/amazon_picking_challenge')
    graspable = apc[key]
コード例 #4
0
def test_dataset():
    logging.getLogger().setLevel(logging.INFO)
    config_filename = 'cfg/basic_labelling.yaml'
    config = ec.ExperimentConfig(config_filename)

    db = Database(config)
    keys = []
    logging.info('Reading datset %s' % (db.datasets[0].name))
    for obj in db.datasets[0]:
        keys.append(obj.key)

    assert (len(keys) == 26)
コード例 #5
0
def test_antipodal_grasp_sampling(vis=False):
    np.random.seed(100)

    h = plt.figure()
    ax = h.add_subplot(111, projection='3d')

    sdf_3d_file_name = 'data/test/sdf/Co_clean.sdf'
    sf = sdf_file.SdfFile(sdf_3d_file_name)
    sdf_3d = sf.read()

    mesh_name = 'data/test/meshes/Co_clean.obj'
    of = obj_file.ObjFile(mesh_name)
    m = of.read()

    graspable = graspable_object.GraspableObject3D(sdf_3d,
                                                   mesh=m,
                                                   model_name=mesh_name)

    config_file = 'cfg/correlated.yaml'
    config = ec.ExperimentConfig(config_file)
    sampler = AntipodalGraspSampler(config)

    start_time = time.clock()
    grasps = sampler.generate_grasps(graspable, vis=False)
    end_time = time.clock()
    duration = end_time - start_time
    logging.info('Antipodal grasp candidate generation took %f sec' %
                 (duration))

    if vis:
        plt.close()  # lol
        for i, grasp in enumerate(grasps, 1):
            plt.figure()
            ax = plt.gca(projection='3d')
            found, (c1, c2) = grasp.close_fingers(graspable)
            c1_proxy = c1.plot_friction_cone(color='m')
            c2_proxy = c2.plot_friction_cone(color='y')
            ax.set_xlim([5, 20])
            ax.set_ylim([5, 20])
            ax.set_zlim([5, 20])
            plt.title('Grasp %d' % (i))
            plt.axis('off')
            plt.show(block=False)
            for angle in range(0, 360, 10):
                ax.view_init(elev=5.0, azim=angle)
                plt.draw()
            plt.close()
コード例 #6
0
ファイル: grasp_transfer.py プロジェクト: puneetp/GPIS
def getgrasps(object):
    obj_name = object[1]
    sdf_name = object[2]
    obj_mesh = of.ObjFile(obj_name).read()
    sdf_ = sf.SdfFile(sdf_name).read()
    obj = go.GraspableObject3D(sdf_,
                               mesh=obj_mesh,
                               key=object[0].replace("_features.txt", ""),
                               model_name=obj_name)
    config_name = "cfg/correlated.yaml"
    config = ec.ExperimentConfig(config_name)
    np.random.seed(100)
    if config['grasp_sampler'] == 'antipodal':
        sampler = ags.AntipodalGraspSampler(config)
        grasps = sampler.generate_grasps(
            obj, check_collisions=config['check_collisions'], vis=False)
        num_grasps = len(grasps)
        min_num_grasps = config['min_num_grasps']
        if num_grasps < min_num_grasps:
            target_num_grasps = min_num_grasps - num_grasps
            gaussian_sampler = gs.GaussianGraspSampler(config)
            gaussian_grasps = gaussian_sampler.generate_grasps(
                obj,
                target_num_grasps=target_num_grasps,
                check_collisions=config['check_collisions'],
                vis=False)
            grasps.extend(gaussian_grasps)

    else:
        sampler = gs.GaussianGraspSampler(config)
        grasps = sampler.generate_grasps(
            obj,
            check_collisions=config['check_collisions'],
            vis=False,
            grasp_gen_mult=6)
    max_num_grasps = config['max_num_grasps']
    if len(grasps) > max_num_grasps:
        np.random.shuffle(grasps)
        grasps = grasps[:max_num_grasps]
    return grasps
コード例 #7
0
ファイル: generate_database.py プロジェクト: puneetp/GPIS
    while head != '/' and tail in INVALID_CATEGORIES:
        head, tail = os.path.split(head)
    return tail


# read in params
if __name__ == '__main__':
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('config', default='cfg/generate_database.yaml')
    args = parser.parse_args()

    logging.getLogger().setLevel(logging.INFO)

    # read config file
    config = ec.ExperimentConfig(args.config)

    # filesystem params
    dataset = config['dataset']
    shape_db_root_folder = config['shape_data_root_folder']
    dest_root_folder = config['destination_root_folder']

    # numeric params
    min_dim = config['min_dim']
    dim = config['sdf_dim']
    padding = config['sdf_padding']
    density = config['density']
    gripper_size = config['gripper_size']

    dataset_start = 0
    # get indices of dataset configurations
コード例 #8
0
def test_gce_job_update():
    config_name = 'cfg/test_gce_update2.yaml'
    config = ec.ExperimentConfig(config_name)
    gce_job = GceJob(config)
    gce_job.store()
コード例 #9
0
def test_gce_job_run():
    config_name = 'cfg/test_gce.yaml'
    config = ec.ExperimentConfig(config_name)
    gce_job = GceJob(config)
    gce_job.run()
def test_window_correlation(width, num_steps, vis=True):
    import scipy
    import sdf_file, obj_file
    import discrete_adaptive_samplers as das
    import experiment_config as ec
    import feature_functions as ff
    import graspable_object as go  # weird Python issues
    import kernels
    import models
    import objectives
    import pfc
    import termination_conditions as tc

    np.random.seed(100)

    mesh_file_name = 'data/test/meshes/Co_clean.obj'
    sdf_3d_file_name = 'data/test/sdf/Co_clean.sdf'

    config = ec.ExperimentConfig('cfg/correlated.yaml')
    config['window_width'] = width
    config['window_steps'] = num_steps
    brute_force_iter = 100
    snapshot_rate = config['bandit_snapshot_rate']

    sdf = sdf_file.SdfFile(sdf_3d_file_name).read()
    mesh = obj_file.ObjFile(mesh_file_name).read()
    graspable = go.GraspableObject3D(sdf, mesh)
    grasp_axis = np.array([0, 1, 0])
    grasp_width = 0.1

    grasps = []
    for z in [-0.030, -0.035, -0.040, -0.045]:
        grasp_center = np.array([0, 0, z])
        grasp = g.ParallelJawPtGrasp3D(
            ParallelJawPtGrasp3D.configuration_from_params(
                grasp_center, grasp_axis, grasp_width))
        grasps.append(grasp)

    graspable_rv = pfc.GraspableObjectGaussianPose(graspable, config)
    f_rv = scipy.stats.norm(config['friction_coef'],
                            config['sigma_mu'])  # friction Gaussian RV

    # compute feature vectors for all grasps
    feature_extractor = ff.GraspableFeatureExtractor(graspable, config)
    all_features = feature_extractor.compute_all_features(grasps)

    candidates = []
    for grasp, features in zip(grasps, all_features):
        logging.info('Adding grasp %d' % len(candidates))
        grasp_rv = pfc.ParallelJawGraspGaussian(grasp, config)
        pfc_rv = pfc.ForceClosureRV(grasp_rv, graspable_rv, f_rv, config)
        pfc_rv.set_features(features)
        candidates.append(pfc_rv)

        if vis:
            _, (c1, c2) = grasp.close_fingers(graspable)
            plt.figure()
            c1_proxy = c1.plot_friction_cone(color='m')
            c2_proxy = c2.plot_friction_cone(color='y')
            plt.legend([c1_proxy, c2_proxy], ['Cone 1', 'Cone 2'])
            plt.title('Grasp %d' % (len(candidates)))

    objective = objectives.RandomBinaryObjective()
    ua = das.UniformAllocationMean(objective, candidates)
    logging.info('Running uniform allocation for true pfc.')
    ua_result = ua.solve(
        termination_condition=tc.MaxIterTerminationCondition(brute_force_iter),
        snapshot_rate=snapshot_rate)
    estimated_pfc = models.BetaBernoulliModel.beta_mean(
        ua_result.models[-1].alphas, ua_result.models[-1].betas)

    print 'true pfc'
    print estimated_pfc

    def phi(rv):
        return rv.features

    kernel = kernels.SquaredExponentialKernel(sigma=config['kernel_sigma'],
                                              l=config['kernel_l'],
                                              phi=phi)

    print 'kernel matrix'
    print kernel.matrix(candidates)

    if vis:
        plt.show()
コード例 #11
0
    manager.request_write_access()
    print 'Got access', manager.pid_
    time.sleep(2)
    manager.release_write_access()
    print 'Released access', manager.pid_

    time.sleep(2)
    manager.stop()


if __name__ == '__main__':
    logging.basicConfig(level=logging.INFO)
    argc = len(sys.argv)

    if argc < 2:
        logging.error('Must supply config file name')

    config_filename = sys.argv[1]
    config = ec.ExperimentConfig(config_filename)

    if argc == 2:
        mode = 'test'
    else:
        mode = sys.argv[2]

    if mode == 'test':
        test_access(config)
    elif mode == 'push':
        push_update_disk(config)
コード例 #12
0
def launch_experiment(args, sleep_time):
    """
    Perform OAuth 2 authorization, then start, list, and stop instance(s).
    """
    # Get total runtime
    start_time = time.time()
    launch_prep_start_time = time.time()

    # Parse arguments and load config file.
    config_file = args.config
    config = ec.ExperimentConfig(config_file)
    logging.basicConfig(level=logging.INFO)
    auth_http = oauth_authorization(config, args)

    # Retrieve / create instance data
    bucket = config['bucket']
    if not bucket:
        logging.error('Cloud Storage bucket required.')
        return
    instance_id = random_string(INSTANCE_NAME_LENGTH)
    instance_root = 'experiment-%s' % (instance_id)
    instance_name = '%s-' % (instance_root) + '%d'
    disk_name = instance_name + '-disk'
    image_name = config['compute']['image']
    run_script = config['compute']['run_script']

    # Make chunks
    chunks = make_chunks(config)
    all_num_grasps = config['all_num_grasps']
    grasp_samplers = config['grasp_samplers']

    # Initialize gce.Gce
    logging.info('Initializing GCE')
    gce_helper = gce.Gce(auth_http, config, project_id=config['project'])
    gcs_helper = gcs.Gcs(auth_http, config, project_id=config['project'])

    # Start an instance for each chunk
    num_instances = 0
    instances_per_region = 0
    zone_index = 0
    instances = []
    instance_names = []
    disk_names = []
    instance_results = []
    num_zones = len(config['compute']['zones'])

    yesno = raw_input(
        'Create %d instances? [Y/n] ' %
        (len(chunks) * len(grasp_samplers) * len(all_num_grasps)))
    if yesno.lower() == 'n':
        sys.exit(1)

    for chunk in chunks:
        for grasp_sampler in grasp_samplers:
            for num_grasps in all_num_grasps:
                # Create instance-specific configuration
                dataset = chunk['dataset']
                chunk_start, chunk_end = chunk['chunk']

                curr_instance_name = instance_name % num_instances
                curr_disk_name = disk_name % num_instances

                # Create instance metadata
                metadata = [
                    {
                        'key': 'config',
                        'value': config.file_contents
                    },
                    {
                        'key': 'instance_name',
                        'value': curr_instance_name
                    },
                    {
                        'key': 'project_name',
                        'value': config['project']
                    },
                    {
                        'key': 'bucket_name',
                        'value': bucket
                    },
                    # chunking metadata
                    {
                        'key': 'dataset',
                        'value': dataset
                    },
                    {
                        'key': 'chunk_start',
                        'value': chunk_start
                    },
                    {
                        'key': 'chunk_end',
                        'value': chunk_end
                    },
                    {
                        'key': 'run_script',
                        'value': run_script
                    },
                    {
                        'key': 'num_grasps',
                        'value': num_grasps
                    },
                    {
                        'key': 'grasp_sampler',
                        'value': grasp_sampler
                    }
                ]

                # Create a new instance
                logging.info('Creating GCE instance %s' % curr_instance_name)
                instances.append(
                    GceInstance(curr_instance_name, curr_disk_name, image_name,
                                config['compute']['zones'][zone_index],
                                metadata, config))

                # update loop info
                num_instances += 1
                instances_per_region += 1
                instance_names.append(curr_instance_name)
                disk_names.append(curr_disk_name)
                instance_console = (
                    'https://console.developers.google.com/'
                    'project/nth-clone-620/compute/instancesDetail/'
                    'zones/us-central1-a/instances/%s/console#end'
                ) % curr_instance_name

                # switch to new region if known to be above quota
                if instances_per_region >= config['compute']['instance_quota']:
                    instances_per_region = 0
                    zone_index += 1

                if zone_index >= num_zones:
                    logging.warning(
                        'Cannot create more instances! Capping experiment at %d instances.'
                        % (num_instances))
                    break

    # clear global q
    global instance_launch_queue
    while not instance_launch_queue.empty():
        instance_launch_queue.get()

    # launch all instances using multiprocessing
    launch_start_time = time.time()
    if config['num_processes'] == 1:
        for instance in instances:
            instance.start()
    else:
        pool = mp.Pool(min(config['num_processes'], len(instances)))
        pool.map(launch_instance, instances)
    logging.info('Done launching instances')

    # put instance launch names into a queue
    instance_results = []
    while not instance_launch_queue.empty():
        curr_instance_name = instance_launch_queue.get()
        instance_results.append('%s.tar.gz' % curr_instance_name)

    # set up service
    result_dl_start_time = time.time()
    service_not_ready = True
    while service_not_ready:
        try:
            service = discovery.build('storage',
                                      config['compute']['api_version'],
                                      http=auth_http)
            req = service.objects().list(bucket=bucket)
            service_not_ready = False
        except (ValueError, Exception) as e:
            logging.info('Connection failed. Retrying...')

    instance_results.sort()
    completed_instance_results = []
    while instance_results:
        # Wait before checking again
        done_override = wait_for_input(sleep_time, prompt='done? ')
        if done_override:
            completed_instance_results.extend(instance_results)
            instance_results = []
            break

        logging.info('Checking for completion...')
        try:
            resp = req.execute()
        except (ValueError, Exception) as e:
            logging.info('Connection failed. Retrying...')
            continue

        try:
            items = resp['items']
        except KeyError as e:
            logging.error(e)
            logging.error(resp)
            continue

        for item in items:
            if item['name'] in instance_results:
                completed_instance_results.append(item['name'])
                instance_results.remove(item['name'])
                logging.info('Instance %s completed!' % item['name'])
        logging.info('Waiting for %s', ' '.join(instance_results))

    # Delete the instances.
    delete_start_time = time.time()
    if config['num_processes'] == 1:
        for instance in instances:
            instance.stop()
    else:
        pool = mp.Pool(min(config['num_processes'], len(instances)))
        pool.map(stop_instance, instances)
    logging.info('Done stopping instances')

    # Print running instances
    all_running_instances = []
    for zone in config['compute']['zones']:
        zone_instances = gce_helper.list_instances(zone)
        lines = ['These are your running instances in zone %s:' % (zone)]
        for zone_instance in zone_instances:
            logging.info(zone_instance['name'])
            lines.append('    ' + zone_instance['name'])
        if not zone_instances:
            lines.append('    (none)')
        zone_instances_text = '\n'.join(lines)
        all_running_instances.append(zone_instances_text)
        logging.info(zone_instances_text)

    # Download the results
    download_start_time = time.time()
    store_dir, instance_result_dirs = gcs_helper.retrieve_results(
        config['bucket'], completed_instance_results, instance_root)

    # Send the user an email
    message = EMAIL_NOTIFICATION % dict(
        instance_id=instance_id,
        instance_names='\n'.join(map(lambda n: '    ' + n, instance_names)),
        experiment_config=config_file,
        script_commands=config['compute']['startup_script'],
        listinstances_output='\n\n'.join(all_running_instances))

    send_notification_email(message=message,
                            config=config,
                            subject="Your experiment has completed.")

    # Save config file
    with open(os.path.join(store_dir, 'config.yaml'), 'w') as f:
        f.write(config.file_contents)

    # Run the results script TODO: move above the email
    result_agg_start_time = time.time()
    results_script_call = 'python %s %s %s' % (config['results_script'],
                                               config_file, store_dir)
    os.system(results_script_call)

    # get runtime
    end_time = time.time()
    total_runtime = end_time - start_time
    launch_prep_time = launch_start_time - launch_prep_start_time
    launch_time = result_dl_start_time - launch_start_time
    run_time = delete_start_time - result_dl_start_time
    delete_time = download_start_time - delete_start_time
    dl_time = result_agg_start_time - download_start_time
    agg_time = end_time - result_agg_start_time

    logging.info('Total runtime: %f' % (total_runtime))
    logging.info('Prep time: %f' % (launch_prep_time))
    logging.info('Launch time: %f' % (launch_time))
    logging.info('Run time: %f' % (run_time))
    logging.info('Delete time: %f' % (delete_time))
    logging.info('Download time: %f' % (dl_time))
    logging.info('Result aggregation time: %f' % (agg_time))