class lwpr_dyn_model(DynamicsLearnerInterface):
    def __init__(self,
                 history_length,
                 prediction_horizon,
                 difference_learning,
                 averaging,
                 streaming,
                 settings=None):
        super().__init__(history_length,
                         prediction_horizon,
                         difference_learning,
                         averaging=averaging,
                         streaming=streaming)
        self.model_ = LWPR(self._get_input_dim(), self.observation_dimension)

        # Default values.
        init_D = 25
        init_alpha = 175
        self.time_threshold = np.inf
        if settings:
            init_D = settings['init_D']
            init_alpha = settings['init_alpha']
            self.time_threshold = settings.get('time_threshold', np.inf)
        self.model_.init_D = init_D * np.eye(self._get_input_dim())
        self.model_.init_alpha = init_alpha * np.eye(self._get_input_dim())

    def _learn(self, training_inputs, training_targets):
        def gen(inputs, targets):
            for i in range(inputs.shape[0]):
                yield targets[i], inputs[i]

        self._learn_from_stream(gen(training_inputs, training_targets),
                                training_inputs.shape[0])

    def _learn_from_stream(self, training_generator, generator_size):
        deck = deque(maxlen=100)
        for count in range(generator_size):
            training_target, training_input = next(training_generator)
            assert training_input.shape[0] == self._get_input_dim()
            assert training_target.shape[0] == self.observation_dimension
            time_before_update = time.perf_counter()
            self.model_.update(training_input, training_target)
            elapsed_time = time.perf_counter() - time_before_update
            deck.append(elapsed_time)
            if count and count % 1000 == 0:
                median_time = sorted(deck)[deck.maxlen // 2]
                print('Update time for iter {} is {}'.format(
                    count, median_time))
                if median_time > self.time_threshold:
                    break

    def _predict(self, inputs):
        assert self.model_, "a trained model must be available"
        prediction = np.zeros((inputs.shape[0], self.observation_dimension))
        for idx in range(inputs.shape[0]):
            prediction[idx, :] = self.model_.predict(inputs[idx])
        return prediction

    def name(self):
        return "LWPR"
Example #2
0
# initialize the LWPR model
model = LWPR(1, 1)
model.init_D = 20 * eye(1)
model.update_D = True
model.init_alpha = 40 * eye(1)
model.meta = False

print Xtr
print Ytr
# train the model
for k in range(20):
    ind = random.permutation(Ntr)
    mse = 0

    for i in range(Ntr):
        yp = model.update(Xtr[ind[i]], Ytr[ind[i]])
        mse = mse + (Ytr[ind[i], :] - yp)**2

    nMSE = mse / Ntr / var(Ytr)
    print "#Data: %5i  #RFs: %3i  nMSE=%5.3f" % (model.n_data, model.num_rfs,
                                                 nMSE)

# test the model with unseen data
Ntest = 500
Xtest = linspace(0, 10, Ntest)

Ytest = zeros((Ntest, 1))
Conf = zeros((Ntest, 1))

for k in range(500):
    Ytest[k, :], Conf[k, :] = model.predict_conf(array([Xtest[k]]))
Example #3
0
print model

# train the model
# for k in range(20):
#     ind = np.random.permutation(Ntr)
#     mse = 0

#     for i in range(Ntr):
#         yp = model.update(Xtr[ind[i]], Ytr[ind[i]])
#         mse = mse + (Ytr[ind[i], :] - yp)**2

#     nMSE = mse / Ntr / np.var(Ytr)
#     print "#Data: %5i  #RFs: %3i  nMSE=%5.3f" % (model.n_data, model.num_rfs, nMSE)

for i in range(Ntr):
    model.update(Xtr[i], Ytr[i])
print model.num_rfs

# test the model with unseen data
Ntest = 5000
Ttest = np.linspace(0, time, Ntest)

Xtest = np.exp(-ax * Ttest / time)
Ytest = np.zeros((Ntest, 1))
Conf = np.zeros((Ntest, 1))

for k in range(Ntest):
    Ytest[k, :], Conf[k, :] = model.predict_conf(
        np.array([Xtest[k] * (Ytr[-1] - Ytr[0])]))

plt.plot(t, Ytr, 'r.')
Example #4
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--environment", type=str, default='AntBulletEnv-v0')
    parser.add_argument("--no_data", type=int, default=10000)
    args = parser.parse_args()

    state_action, state, reward, next_state = gather_data(
        args.no_data, args.environment)
    assert len(state_action) == len(next_state)
    assert len(state_action) == len(reward)
    '''
    data = pickle.load(open('data.p'))
    state2, action2, reward2, next_state2 = data[0]
    state_action2 = np.concatenate([state2, action2], axis=-1)
    state_action = np.concatenate([state_action, state_action2], axis=0)
    reward = np.concatenate([reward, reward2], axis=0)
    next_state = np.concatenate([next_state, next_state2], axis=0)
    '''

    no_data = len(state_action)

    model_state = LWPR(state_action.shape[-1], next_state.shape[-1])
    model_state.init_D = 5. * np.eye(state_action.shape[-1])
    model_state.update_D = True
    model_state.init_alpha = 1. * np.eye(state_action.shape[-1])
    model_state.meta = True
    action_shape = state_action.shape[-1] - next_state.shape[-1]
    model_state.norm_in = np.array(([10.] * state.shape[-1]) +
                                   [2.] * action_shape)

    model_reward = LWPR(state_action.shape[-1], reward.shape[-1])
    model_reward.init_D = 1. * np.eye(state_action.shape[-1])
    model_reward.update_D = True
    model_reward.init_alpha = 20. * np.eye(state_action.shape[-1])
    model_reward.meta = True

    #for k in range(20):
    for k in range(1):
        ind = np.random.permutation(no_data)
        for i in range(no_data):
            print(k, i)
            model_state.update(state_action[ind[i]], next_state[ind[i]])
            #model_state.update(state_action[ind[i]], next_state[ind[i]] - state[ind[i]])
            model_reward.update(state_action[ind[i]], reward[ind[i]])

    uid = str(uuid.uuid4())
    for k in range(10):
        state_action_test, state_test, reward_test, next_state_test = gather_data_epoch(
            1, args.environment)
        '''
        if k % 2 == 0:
            state_action_test, state_test, reward_test, next_state_test = gather_data_epoch(1, args.environment)
        else:
            idx = np.random.randint(1, len(data))
            state_test, action_test, reward_test, next_state_test = data[idx]
            state_action_test = np.concatenate([state_test, action_test], axis=-1)
        '''
        Y = []
        confs = []
        Y_r = []
        confs_r = []
        for i in range(len(state_action_test)):
            y, conf = model_state.predict_conf(state_action_test[i])
            #Y.append(y + state_test[i])
            Y.append(y)
            confs.append(conf)
            y_r, conf_r = model_reward.predict_conf(state_action_test[i])
            Y_r.append(y_r)
            confs_r.append(conf_r)
        Y = np.stack(Y, axis=0)
        confs = np.stack(confs, axis=0)
        Y_r = np.stack(Y_r, axis=0)
        confs_r = np.stack(confs_r, axis=0)

        for i in range(next_state.shape[-1]):
            plt.figure()
            print('Here is the length of the trajectory:',
                  len(next_state_test))
            assert len(next_state_test[:, i:i + 1]) == len(Y[:, i:i + 1])
            #plt.plot(np.arange(len(next_state_test[:, i:i+1])), next_state_test[:, i:i+1] - state_test[:, i:i+1])
            plt.plot(np.arange(len(next_state_test[:, i:i + 1])),
                     next_state_test[:, i:i + 1])
            plt.errorbar(np.arange(len(Y[:, i:i + 1])),
                         Y[:, i:i + 1],
                         yerr=confs[:, i:i + 1],
                         color='r',
                         ecolor='y')
            plt.grid()
            #plt.savefig(args.environment+'_'+'k:'+str(k)+'_'+'dim:'+str(i)+'_'+uid+'.pdf')

        plt.figure()
        plt.plot(np.arange(len(reward_test)), reward_test)
        plt.errorbar(np.arange(len(Y_r)),
                     Y_r,
                     yerr=confs_r,
                     color='r',
                     ecolor='g')
        plt.grid()
        plt.show()
Example #5
0
class LWPRFA(FA):
    
    parametric = False

    def __init__(self, indim, outdim):
        FA.__init__(self, indim, outdim)
        self.filename = None

    def reset(self):
        FA.reset(self)
        
        # initialize the LWPR function
        self.lwpr = LWPR(self.indim, self.outdim)     
        self.lwpr.init_D = 10.*np.eye(self.indim)
        self.lwpr.init_alpha = 0.1*np.ones([self.indim, self.indim])
        self.lwpr.meta = True
    
    def predict(self, inp):
        """ predict the output for the given input. """
        # the next 3 lines fix a bug when lwpr models are pickled and unpickled again
        # without it, a TypeError is thrown "Expected a double precision numpy array."
        # even though the numpy array is double precision.
        inp = self._asFlatArray(inp)
        inp_tmp = np.zeros(inp.shape)
        inp_tmp[:] = inp
        return self.lwpr.predict(inp_tmp)

    def train(self):
        for i, t in self.dataset:
            i = self._asFlatArray(i)
            t = self._asFlatArray(t)
            self.lwpr.update(i, t)


    def _cleanup(self):
        if self.filename and os.path.exists(self.filename):
            os.remove(self.filename)

    def __getstate__(self):
        """ required for pickle. removes the lwpr model from the dictionary
            and saves it to file explicitly.
        """
        # create unique hash key for filename and write lwpr to file
        hashkey = hashlib.sha1(str(self.lwpr) + time.ctime() + str(np.random.random())).hexdigest()[:8]
        if not os.path.exists('.lwprmodels'):
            os.makedirs('.lwprmodels')
            
        # remove any old files if existing
        if self.filename:
            os.remove(self.filename)    
               
        self.filename = '.lwprmodels/lwpr_%s.binary'%hashkey
        self.lwpr.write_binary(self.filename)
        
        # remove lwpr from dictionary and return state
        state = self.__dict__.copy()
        del state['lwpr']
        return state
        
    def __setstate__(self, state):
        """ required for pickle. loads the stored lwpr model explicitly.
        """
        self.__dict__.update(state)
        self.lwpr = LWPR(self.filename)
def train_lwpr(datafile, resultfolder, max_num_train, patience_list, improvement_threshold, init_lwpr_setting, hist_window, start_epoch=0, cmd_scaler=1.0, modelfile='lwpr_model'):

    curr_path = os.getcwd()
    if resultfolder in os.listdir(curr_path):
        print "subfolder exists"
    else:
        print "Not Exist, so make subfolder"
        os.mkdir(resultfolder)

    # Load Data
    dataset = loadmat(datafile)
    train_data_x, train_data_y = dataset['train_data_x'], dataset['train_data_y']
    valid_data_x, valid_data_y = dataset['valid_data_x'], dataset['valid_data_y']

    num_data, num_valid = train_data_x.shape[0], valid_data_x.shape[0]

    speed_hw, cmd_hw = hist_window[0], hist_window[1]
    input_dim = 2*(speed_hw+cmd_hw)

    # normalize command part
    train_data_x[:, 2*speed_hw:] = train_data_x[:, 2*speed_hw:] * cmd_scaler
    valid_data_x[:, 2*speed_hw:] = valid_data_x[:, 2*speed_hw:] * cmd_scaler

    # Set-up Parameters/Model for Training Procedure
    max_num_trials = max_num_train
    improvement_threshold = improvement_threshold

    error_hist, best_model_error, prev_train_time = [], np.inf, 0
    initD, initA, penalty = init_lwpr_setting[0], init_lwpr_setting[1], init_lwpr_setting[2]
    w_gen, w_prune = init_lwpr_setting[3], init_lwpr_setting[4]

    best_model_epoch = 0

    if start_epoch < 1:
        # Initialize Two 1-Dimensional Models
        LWPR_model_left = LWPR(input_dim, 1)
        #LWPR_model_left.init_D = initD * np.eye(input_dim)
        tmp_arr = np.ones(input_dim)
        tmp_arr[input_dim-2*cmd_hw:input_dim] = init_lwpr_setting[5]
        LWPR_model_left.init_D = initD * np.diag(tmp_arr)
        LWPR_model_left.update_D = False # True
        #LWPR_model_left.init_alpha = initA * np.eye(input_dim)
        tmp_arr = np.ones(input_dim)
        tmp_arr[input_dim-2*cmd_hw:input_dim] = init_lwpr_setting[5]
        LWPR_model_left.init_alpha = initA * np.diag(tmp_arr)
        LWPR_model_left.penalty = penalty
        LWPR_model_left.meta = True
        LWPR_model_left.meta_rate = 20
        LWPR_model_left.w_gen = w_gen
        LWPR_model_left.w_prune = w_prune

        LWPR_model_right = LWPR(input_dim, 1)
        #LWPR_model_right.init_D = initD * np.eye(input_dim)
        tmp_arr = np.ones(input_dim)
        tmp_arr[input_dim-2*cmd_hw:input_dim] = init_lwpr_setting[5]
        LWPR_model_right.init_D = initD * np.diag(tmp_arr)
        LWPR_model_right.update_D = False # True
        #LWPR_model_right.init_alpha = initA * np.eye(input_dim)
        tmp_arr = np.ones(input_dim)
        tmp_arr[input_dim-2*cmd_hw:input_dim] = init_lwpr_setting[5]
        LWPR_model_right.init_alpha = initA * np.diag(tmp_arr)
        LWPR_model_right.penalty = penalty
        LWPR_model_right.meta = True
        LWPR_model_right.meta_rate = 20
        LWPR_model_right.w_gen = w_gen
        LWPR_model_right.w_prune = w_prune

        patience = patience_list[0]
    else:
        modelfile_name = './' + resultfolder + '/' + modelfile + '_left_epoch' + str(start_epoch-1) + '.bin'
        LWPR_model_left = LWPR(modelfile_name)
        print '\tRead LWPR model for left wheel(%d)' % (LWPR_model_left.num_rfs[0])

        modelfile_name = './' + resultfolder + '/' + modelfile + '_right_epoch' + str(start_epoch-1) + '.bin'
        LWPR_model_right = LWPR(modelfile_name)
        print '\tRead LWPR model for right wheel(%d)' % (LWPR_model_right.num_rfs[0])

        result_file_name = './' + resultfolder + '/Result_of_training_epoch' + str(start_epoch-1) + '.mat'
        result_file = loadmat(result_file_name)
        prev_train_time = result_file['train_time']
        patience = result_file['patience']
        best_model_error = result_file['best_model_error']
        for cnt in range(start_epoch):
            error_hist.append([result_file['history_validation_error'][cnt][0], result_file['history_validation_error'][cnt][1], result_file['history_validation_error'][cnt][2]])


    # Training Part
    model_prediction = np.zeros(valid_data_y.shape)
    tmp_x, tmp_y = np.zeros((input_dim, 1)), np.zeros((1,1))
    print 'start training'
    start_train_time = timeit.default_timer()

    for train_cnt in range(start_epoch, max_num_trials):
        if patience < train_cnt:
            break

        rand_ind = np.random.permutation(num_data)

        for data_cnt in range(num_data):
            tmp_x[:,0] = train_data_x[rand_ind[data_cnt], 0:input_dim]
            tmp_y[0,0] = train_data_y[rand_ind[data_cnt], 0]
            _ = LWPR_model_left.update(tmp_x, tmp_y)

            tmp_y[0,0] = train_data_y[rand_ind[data_cnt], 1]
            _ = LWPR_model_right.update(tmp_x, tmp_y)

            if data_cnt % 5000 == 0:
                print '\ttrain epoch %d, data index %d, #rfs=%d/%d' % (train_cnt, data_cnt, LWPR_model_left.num_rfs, LWPR_model_right.num_rfs)

        for data_cnt in range(num_valid):
            tmp_x[:,0] = valid_data_x[data_cnt, 0:input_dim]
            model_prediction[data_cnt, 0], _ = LWPR_model_left.predict_conf(tmp_x)
            model_prediction[data_cnt, 1], _ = LWPR_model_right.predict_conf(tmp_x)

        diff = abs(valid_data_y - model_prediction)

        new_error = np.asarray([np.sum(diff)/float(num_valid), np.sqrt(np.sum(diff**2)/float(num_valid)), np.max(diff)])
        error_hist.append([new_error[0], new_error[1], new_error[2]])

        # save result of one training epoch
        modelfile_name = './' + resultfolder + '/' + modelfile + '_left_epoch' + str(train_cnt) + '.bin'
        LWPR_model_left.write_binary(modelfile_name)

        modelfile_name = './' + resultfolder + '/' + modelfile + '_right_epoch' + str(train_cnt) + '.bin'
        LWPR_model_right.write_binary(modelfile_name)

        if new_error[1] < best_model_error * improvement_threshold:
            best_model_epoch = train_cnt
            best_model_error = new_error[1]
            patience = max(patience, min(train_cnt+10, int(train_cnt * patience_list[1])) )

            modelfile_name = './' + resultfolder + '/' + modelfile + '_best_left_epoch' + str(train_cnt) + '.bin'
            LWPR_model_left.write_binary(modelfile_name)

            modelfile_name = './' + resultfolder + '/' + modelfile + '_best_right_epoch' + str(train_cnt) + '.bin'
            LWPR_model_right.write_binary(modelfile_name)

        result_file_name = './' + resultfolder + '/Result_of_training_epoch' + str(train_cnt) + '.mat'
        result = {}
        result['train_time'] = timeit.default_timer() - start_train_time + prev_train_time
        result['best_model_error'] = best_model_error
        result['history_validation_error'] = error_hist
        result['patience'] = patience
        result['improvement_threshold'] = improvement_threshold
        result['init_D'] = initD
        result['init_alpha'] = initA
        result['penalty'] = penalty
        result['w_generate_criterion'] = w_gen
        result['w_prune_criterion'] = w_prune
        result['number_speed_in_input'] = 2*speed_hw
        result['number_cmd_in_input'] = 2*cmd_hw
        savemat(result_file_name, result)

        print '\n\tSave Intermediate Result Successfully'
        print '\t%d-th learning : #Data=%d/%d, #rfs=%d/%d, error=%f\n' %(train_cnt, LWPR_model_left.n_data, LWPR_model_right.n_data, LWPR_model_left.num_rfs, LWPR_model_right.num_rfs, error_hist[train_cnt][1])

    print 'end training'
    return best_model_epoch
Example #7
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--environment", type=str, default='AntBulletEnv-v0')
    parser.add_argument("--no_data_start", type=int, default=10000)
    parser.add_argument("--train_policy_batch_size", type=int, default=30)
    parser.add_argument("--cma_maxiter", type=int, default=1000)
    parser.add_argument("--unroll_steps", type=int, default=200)
    args = parser.parse_args()

    print args

    env = gym.make(args.environment)

    input_dim = env.observation_space.shape[0]+env.action_space.shape[0]
    output_dim = env.observation_space.shape[0] + 1
    model = LWPR(input_dim, output_dim)
    model.init_D = 1. * np.eye(input_dim)
    model.update_D = True
    model.init_alpha = 20. * np.eye(input_dim)
    model.meta = True

    agent = AGENT(env.observation_space.shape[0],
                  env.action_space.shape[0],
                  action_space_low=env.action_space.low,
                  action_space_high=env.action_space.high,
                  unroll_steps=args.unroll_steps)

    init_states = np.stack([env.reset() for _ in range(args.train_policy_batch_size)], axis=0)

    #Train the dynamics model the intial data.
    data_buffer = gather_data3(env, args.no_data_start)
    states, actions, rewards, next_states, _ = zip(*data_buffer)
    states = np.stack(states, axis=0)
    actions = np.stack(actions, axis=0)
    rewards = np.array(rewards)[..., np.newaxis]
    next_states = np.stack(next_states, axis=0)

    state_actions = np.concatenate([states, actions], axis=-1)
    state_diff = next_states - states
    targets = np.concatenate([state_diff, rewards], axis=-1)

    assert len(state_actions) == len(targets)
    ind = np.random.permutation(len(state_actions))
    for i in range(len(state_actions)):
        model.update(state_actions[ind[i]], targets[ind[i]])

    for epoch in range(1000):
        agent._fit(model, init_states, args.cma_maxiter)

        total_rewards = 0.
        state = env.reset()
        while True:
            action = agent._forward(agent.thetas, state[np.newaxis, ...])[0]
            next_state, reward, done, _ = env.step(action)
            state_action = np.concatenate([state, action])
            state_diff = next_state - state
            target = np.append(state_diff, reward)
            model.update(state_action, target)

            total_rewards += float(reward)

            state = next_state.copy()

            if done:
                print 'epoch:', epoch, 'total_rewards:', total_rewards
                break
Example #8
0
# initialize lwpr model
model = LWPR(n_in, n_out)
model.init_D = 10*eye(n_in)
model.init_alpha = 0.1* eye(n_in)
# model.kernel = 'BiSquare'

for i in range(10):
    for demonstration in demonstrations:
        output = np.asarray(demonstration[0])
        context = np.asarray(demonstration[1])

        # print("added output: " + str(output))
        # print("added context: " + str(context))
        
        model.update(context, output)

# generalize
# y = [-1.0, 0.0, 1.0]
y = [-1.0, -0.5, 0.0, 0.5, 1.0]

for y1 in y:
    plt.figure()
    for y2 in y: 
        context = np.asarray([y1, y2])
        output, conf = model.predict_conf(context)

        # print("predicted output: " + str(output))

        plt.title("Predicted trajectory")
        plt.ylim([y_min, y_max])