Ejemplo n.º 1
0
def main(Vxf0, urdf, options):
    modelNames = ['w.mat',
                  'Sshape.mat']  # Two example models provided by Khansari
    modelNumber = 1  # could be zero or one depending on the experiment the user is running

    data, demoIdx = load_saved_mat_file(lyap + '/' + 'example_models/' +
                                        modelNames[modelNumber])

    Vxf0['d'] = int(data.shape[0] / 2)
    Vxf0.update(Vxf0)

    Vxf0 = guess_init_lyap(data, Vxf0, options['int_lyap_random'])
    cost = Cost()

    while cost.success:
        print('Optimizing the lyapunov function')
        Vxf, J = cost.learnEnergy(Vxf0, data, options)
        old_l = Vxf0['L']
        Vxf0['L'] += 1
        print('Constraints violated. increasing the size of L from {} --> {}'.
              format(old_l, Vxf0['L']))
        if cost.success:
            print('optimization succeeded without violating constraints')
            break

    # Plot the result of V
    h1 = plt.plot(data[0, :], data[1, :], 'r.', label='demonstrations')

    extra = 30

    axes_limits = [
        np.min(data[0, :]) - extra,
        np.max(data[0, :]) + extra,
        np.min(data[1, :]) - extra,
        np.max(data[1, :]) + extra
    ]

    h3 = cost.energyContour(Vxf, axes_limits, np.array(()), np.array(()),
                            np.array(()), False)
    h2 = plt.plot(0, 0, 'g*', markersize=15, linewidth=3, label='target')
    plt.title('Energy Levels of the learned Lyapunov Functions', fontsize=12)
    plt.xlabel('x (mm)', fontsize=15)
    plt.ylabel('y (mm)', fontsize=15)
    h = [h1, h2, h3]

    # Run DS
    opt_sim = dict()
    opt_sim['dt'] = 0.01
    opt_sim['i_max'] = 4000
    opt_sim['tol'] = 1

    d = data.shape[0] / 2  # dimension of data
    x0_all = data[:int(d), demoIdx[0, :-1] -
                  1]  # finding initial points of all demonstrations

    # get gmm params
    gmm = GMM(num_clusters=options['num_clusters'])
    gmm.update(data.T, K=options['num_clusters'], max_iterations=100)
    mu, sigma, priors = gmm.mu.T, gmm.sigma.T, gmm.logmass.T

    # rho0 and kappa0 impose minimum acceptable rate of decrease in the energy
    # function during the motion. Refer to page 8 of the paper for more information
    rho0 = 1
    kappa0 = 0.1

    inp = list(range(Vxf['d']))
    output = np.arange(Vxf['d'], 2 * Vxf['d'])

    xd, _ = dsStabilizer(x0_all, Vxf, rho0, kappa0, priors, mu, sigma, inp,
                         output, cost)

    # Evalute DS
    xT = np.array([])
    d = x0_all.shape[0]  # dimension of the model
    if not xT:
        xT = np.zeros((d, 1))

    # initialization
    nbSPoint = x0_all.shape[1]
    x = []
    #x0_all[0, 1] = -180  # modify starting point a bit to see performance in further regions
    #x0_all[1, 1] = -130
    x.append(x0_all)
    xd = []
    if xT.shape == x0_all.shape:
        XT = xT
    else:
        XT = np.tile(
            xT,
            [1, nbSPoint
             ])  # a matrix of target location (just to simplify computation)

    t = 0  # starting time
    dt = 0.01
    for i in range(4000):
        xd.append(
            dsStabilizer(x[i] - XT, Vxf, rho0, kappa0, priors, mu, sigma, inp,
                         output, cost)[0])

        x.append(x[i] + xd[i] * dt)
        t += dt

    for i in range(nbSPoint):
        # Choose one trajectory
        x = np.reshape(x, [len(x), d, nbSPoint])
        x0 = x[:, :, i]
        if i == 0:
            plt.plot(x0[:, 0],
                     x0[:, 1],
                     linestyle='--',
                     label='DS eval',
                     color='blue')
        else:
            plt.plot(x0[:, 0], x0[:, 1], linestyle='--', color='blue')
    plt.legend()
    plt.show()
Ejemplo n.º 2
0
def main(Vxf0, urdf, options):
    gmm = GMM(num_clusters=options['num_clusters'])

    if options['args'].data_type == 'h5_data':
        filename = '{}.h5'.format('torobo_processed_data')
        with h5py.File(filename, 'r+') as f:
            data = f['data/data'].value
        logging.debug(''.format(data.shape))
    elif options['args'].data_type == 'pipe_et_trumpet':
        path = 'data'
        name = 'cart_pos.csv'
        data, data6d = format_data(path, name, learn_type='2d')

    if options['use_6d']:
        data = data6d

    Vxf0['d'] = int(data.shape[0] / 2)
    Vxf0.update(Vxf0)

    Vxf0 = guess_init_lyap(data, Vxf0, options['int_lyap_random'])
    cost = Cost()

    # cost.success = False
    while cost.success:
        # cost.success = False
        Vxf, J = cost.learnEnergy(Vxf0, data, options)
        # if not cost.success:
        # increase L and restart the optimization process
        old_l = Vxf0['L']
        Vxf0['L'] += 1
        rospy.logwarn(
            'Constraints violated. increasing the size of L from {} --> {}'.
            format(old_l, Vxf0['L']))
        if cost.success:
            rospy.logdebug(
                'optimization succeeded without violating constraints')
            break

    # get gmm params
    gmm.update(data.T, K=options['num_clusters'], max_iterations=100)
    mu, sigma, priors = gmm.mu, gmm.sigma, gmm.logmass

    if options['disp']:
        logging.debug(' mu {}, sigma {}, priors: {}'.format(
            mu.shape, sigma.shape, priors.shape))

    inp = slice(0, Vxf['d'])  #np.array(range(0, Vxf['d']))
    out = slice(Vxf['d'],
                2 * Vxf['d'])  #np.array(range(Vxf['d'], 2* Vxf['d']))
    rho0, kappa0 = 1.0, 1.0

    gmr_handle = lambda x: GMR(priors, mu, sigma, x, inp, out)
    stab_handle = lambda dat: dsStabilizer(dat, gmr_handle, Vxf, rho0, kappa0)

    x0_all = data[0, :]
    XT = data[-1, :]

    logger.debug('XT: {} x0: {} '.format(XT.shape, x0_all.shape))
    home_pos = [0.0] * 7
    executor = ToroboExecutor(home_pos, urdf)
    x, xd = executor.optimize_traj(data6d, stab_handle, opt_exec)