def NAR_Invoke(Proximity, VisualEvents): SeenSomethingMissionRelevant = False if Proximity: NAR.AddInput("<obstacle --> [observed]>. :|:") #TODO also encode color else: if closed_gripper: NAR.AddInput("<gripper --> [closed]>. :|:") else: NAR.AddInput("<gripper --> [open]>. :|:") if len(VisualEvents) > 0: Observed = False for obj in VisibleObjects: for v in VisualEvents: print(v) if obj in v: NAR.AddInput(v) SeenSomethingMissionRelevant = True if not SeenSomethingMissionRelevant: NAR.AddInput("(! <obstacle --> [observed]>). :|:") #TODO also encode color action = None if Proximity and not SeenSomethingMissionRelevant: #Don't allow forward as a reflex to not damage hardware executions = NAR.AddInput("(! <obstacle --> [observed]>)! :|:")["executions"] action = ExecMotorCommands(executions, DisableForward=True) else: executions = NAR.AddInput("<mission --> [progressed]>! :|:" if SeenSomethingMissionRelevant else "<{SELF} --> [moved]>! :|:")["executions"] executions += NAR.AddInput("5")["executions"] action = ExecMotorCommands(executions) if action == "forward": NAR.AddInput("<{SELF} --> [moved]>. :|:")
def add_nars(self, s): r = NAR.AddInput(s) p = "\n==>> " + s if 'raw' in r: p += "\n<< " + r['raw'] else: p += "\n<< " + str(r) now_info = p p += self.last_info + "\n" + "#" * 90 + "\n" + p self.last_info = now_info print(p) p += '\n' * 6 self.text_browser.setPlainText(p) self.text_browser.moveCursor(Qt.QTextCursor.End)
def NARAddInput(narsese): print(narsese) return NAR.AddInput(narsese)
BackgroundKnowledge = """ //Let's say robot already learned to navigate from previous experiment: //meaning to move forward if nothing is seen (due to innate boredom/forward goal) <(<nothing --> [observed]> &/ ^up) =/> forward>. //and to move left when an obstacle is in front (due to innate collision pain to avoid) <(<obstacle --> [observed]> &/ ^left) =/> (! collision)>. //Mission description: //1. Find a bottle <(<bottle --> [smallerX]> &/ ^left) =/> <mission --> [progressed]>>. <(<bottle --> [largerX]> &/ ^right) =/> <mission --> [progressed]>>. //2. Grab it if it's in front <((open &/ <bottle --> [equalX]>) &/ ^down) =/> <mission --> [progressed]>>. //3. Put it to other bottle <((closed &/ <bottle --> [equalX]>) &/ ^say) =/> <mission --> [progressed]>>. """ k=0 for bg in BackgroundKnowledge.split("\n"): bgstr = bg.strip() if len(bgstr) > 0: NAR.AddInput(bgstr) NARAddInput("*motorbabbling=false") while True: #1. Actively retrieve sensor input Proximity = scan() VisualEvents = subprocess.check_output("python3 vision_to_narsese.py once", shell=True, stderr=subprocess.STDOUT).decode("utf-8").split('\n') #2. Let NARS decide what to do NAR_Invoke(Proximity, VisualEvents) k+=1
class ARMA: """ AutoRegressive–Moving-Average model """ max_order = 20 lbd = 7 nar = NAR() def crossValidation(self, views, n_Day, serials_episodes_records, n_fold, type=0): q_min = 0 p_min = 0 rmse_min = 1000 w_min = 0 for q in range(2, self.max_order): for p in range(2, self.max_order): # print p,q w, rmse = self.run(p, q, views, n_Day, serials_episodes_records, n_fold, type) # print rmse # print p,q,rmse if rmse < rmse_min: q_min = q p_min = p rmse_min = rmse w_min = w # print p_min,q_min,w_min,rmse_min return p_min, q_min, w_min, rmse_min def run(self, p, q, views, n_Day, serials_episodes_records, n_fold, type=0): # p,w, rmse = self.nar.crossValidation(views,n_Day,serials_episodes_records, n_fold) w, rmse = self.nar.run(p, views, n_Day, serials_episodes_records, n_fold, type) if type == 0: x, epsilon = self.getRegressionMatrix_s(p, q, w, views, n_Day, serials_episodes_records) else: x, epsilon = self.getRegressionMatrix_m(p, q, w, views, n_Day, serials_episodes_records) # print x.shape,epsilon.shape # for e in epsilon: # print e rmse = 0 w = 0 for k in range(1, n_fold + 1): training_x, test_x = divideDataset(x, n_fold, k) training_epsilon, test_epsilon = divideDataset(epsilon, n_fold, k) temp_w = self.training(training_x, training_epsilon) t, temp_x = self.getKernelMatrix(test_x) phi = hstack([temp_x, test_epsilon]) predicted = dot(phi, temp_w) rmse += RMSE(predicted, t) w += temp_w rmse /= n_fold # print rmse w /= n_fold return w, rmse def getKernelMatrix(self, x): m, d = shape(x) t = x[:, d - 1] # target vector phi = ones([m, d]) phi[:, 1:] = x[:, 0:(d - 1)] return t, phi def training(self, x, epsilon): t, temp_x = self.getKernelMatrix(x) phi = hstack([temp_x, epsilon]) A = dot(phi.T, phi) A += self.lbd * identity(A.shape[0]) w = linalg.inv(A) * dot(phi.T, t) return w def getRegressionMatrix_s(self, p, q, w, views, n_Day, serials_episodes_records): """ get the Regression Matrix when predicting in the single time interval :param p: order of AR :param q: order of MA :param w: coefficients(w0,w1,....,wp) :param views: raw popularity data(serial*episode*time) :param n_Day: # predict the popularity during the n-th time interval, range: 0,1,2,..... serials_episodes_records: the number of records of each episode of every serial """ teleplay_num = len(serials_episodes_records) s1, s2, s3 = shape(views) temp_epsilon = array([zeros([s2, s3]) for i in range(s1)]) # calculate residual errors for s in range(teleplay_num): episodes_num = len(serials_episodes_records[s]) for nth in range(p, episodes_num): if serials_episodes_records[s][nth + 1] < n_Day: break instance = views[s, nth - p:nth + 1, n_Day] # t-p,...,t-2,t-1,t if instance.min() == 0: continue row_max = instance.max() instance = instance * 1.0 / row_max temp_epsilon[s][nth][0] = instance[-1] - ( w[0] + dot(instance[:-1], w[1:])) # generate matrix x = [] epsilon = [] for s in range(teleplay_num): episodes_num = len(serials_episodes_records[s]) for nth in range(p + q, episodes_num): if serials_episodes_records[s][nth + 1] < n_Day: break instance = views[s, nth - p:nth + 1, n_Day] # t-p,...,t-2,t-1,t if instance.min() == 0: continue row_max = instance.max() instance = instance * 1.0 / row_max x.append(instance) epsilon.append(temp_epsilon[s, nth - q:nth, 0]) return mat(x), mat(epsilon) def getRegressionMatrix_m(self, p, q, w, views, n_Day, serials_episodes_records): """ get the Regression Matrix when predicting in the multiple time interval :param p: order of AR :param q: order of MA :param w: coefficients(w0,w1,....,wp) :param views: raw popularity data(serial*episode*time) :param n_Day: # predict the popularity during the n-th time interval, range: 0,1,2,..... serials_episodes_records: the number of records of each episode of every serial """ teleplay_num = len(serials_episodes_records) l = int(math.floor(sqrt(2 * max([p, q]) + 1.1))) s1, s2, s3 = shape(views) temp_epsilon = array([zeros([s2, s3]) for i in range(s1)]) # calculate residual errors for s in range(teleplay_num): episodes_num = len(serials_episodes_records[s]) for nth in range(l, episodes_num): if serials_episodes_records[s][nth + 1] < n_Day: break instance = zeros([1, p + 1])[0, :] # t-p,...,t-2,t-1,t instance[p] = views[s, nth, n_Day] k = 1 for i in range(1, l + l): for j in range(0, i): if k >= (p + 1): break instance[p - k] = views[s, nth - i, n_Day + j] k += 1 if instance.min() == 0: continue row_max = instance.max() instance = instance * 1.0 / row_max temp_epsilon[s][nth][0] = instance[-1] - ( w[0] + dot(instance[:-1], w[1:])) # generate matrix x = [] epsilon = [] for s in range(teleplay_num): episodes_num = len(serials_episodes_records[s]) for nth in range(p + q, episodes_num): if serials_episodes_records[s][nth + 1] < n_Day: break instance = zeros([1, p + 1])[0, :] # t-p,...,t-2,t-1,t instance[p] = views[s, nth, n_Day] row_e = zeros([1, q])[0, :] k = 1 for i in range(1, l + l): for j in range(0, i): if k < (p + 1): instance[p - k] = views[s, nth - i, 0] if k < (q + 1): row_e[q - k] = temp_epsilon[s, nth - i, 0] if (k >= (p + 1)) and (k >= (q + 1)): break k += 1 if (instance.min() == 0) or (row_e.max() == 0): continue row_max = instance.max() instance = instance * 1.0 / row_max x.append(instance) epsilon.append(row_e) # print x,'abc' return mat(x), mat(epsilon)
//What's expected by the robot to learn: //move forward if nothing is seen (due to innate boredom/move goal) //<((! <obstacle --> [observed]>) &/ ^forward) =/> <{SELF} --> [moved]>>. //move left when an obstacle is in front (due to innate collision pain to avoid) //<(<obstacle --> [observed]> &/ ^left) =/> (! <obstacle --> [observed]>)>. //How to focus on objects (comment out if it should also be learned!) <(<$1 --> [smallerX]> &/ ^left) =/> <$1 --> [equalX]>>. <(<$1 --> [largerX]> &/ ^right) =/> <$1 --> [equalX]>>. //Mission description: //1. Pick a bottle if it's in front <((<gripper --> [open]> &/ <bottle --> [equalX]>) &/ ^pick) =/> <mission --> [progressed]>>. //2. Drop grabbed to other bottles <((<gripper --> [closed]> &/ <bottle --> [equalX]>) &/ ^drop) =/> <mission --> [progressed]>>. """ NAR.AddInput("*babblingops=3") NAR.AddInput("*motorbabbling=0.3") NAR.AddInput("*setopname 1 ^left") NAR.AddInput("*setopname 2 ^right") NAR.AddInput("*setopname 3 ^forward") NAR.AddInput("*setopname 4 ^pick") NAR.AddInput("*setopname 5 ^drop") k=0 for bg in BackgroundKnowledge.split("\n"): bgstr = bg.strip() if len(bgstr) > 0: NAR.AddInput(bgstr) while True: #1. Actively retrieve sensor input