def train(self, mu0, cov0, numIterTrain, transition, currentNumTupdates, time): """--------------------- Training ------------------------""" # Init likelihoods_fileName, tlabels_fileName, prmlabels_fileName, manual_tlabels, manual_prmlabels, success_fileName, failureFile = createFileNames( 1, currentNumTupdates) self.initialize_clusters(n_primitives, means0=mu0, cov0=cov0, constraints=myConstraints) run = 1 # Train by running gmm for "run1" of the demonstration data print("-------> training run1 ") for i in range(numIterTrain): if i == numIterTrain - 1: # save and plot likelihoods on the last iteration likelihoods_figName = "figures/run1_likelihoods_epochs{0:d}_T{1:d}.png".format( self.epoch, currentNumTupdates) self.expectation_step( run, t=time, # saveFigure = likelihoods_figName, saveFile=likelihoods_fileName, T_matrix_APF=transition) else: # T_matrix_APF implies that the expectation step is using an Augmented Particle Filter self.expectation_step(run, t=time, T_matrix_APF=transition) self.maximization_step() print("it: {0:d} likelihood function {1:e}".format( i, self.get_likelihood())) # Save training data self.save('references/mean', 'references/covar', 'references/pi') # Print training results means = np.load('references/mean.npy') covar = np.load('references/covar.npy') # Save tlabels and prmlabels from likelihoods files getlabels(likelihoods_fileName, tlabelFile=tlabels_fileName, prlabelFile=prmlabels_fileName) # Compute. save and plot success rate success_rate = compute_success_rate(likelihoods_fileName, manual_tlabels, manual_prmlabels) saveSuccessRateFile(success_fileName, success_rate, currentNumTupdates) print( "-------> training success_rate run1: {0:f}".format(success_rate))
def test(self, run_number, numIterTest, transition, currentNumTupdates, time): """--------------------- Testing ------------------------""" """ The following code will run iff you specify a run number on command line: python gmm.py [run_number] *note: a run is the raw sensor data corresponding to one human demonstration of the full task """ offset = 0.01 success = False likelihoods_fileName, tlabels_fileName, prmlabels_fileName, manual_tlabels, manual_prmlabels, success_fileName, failureFile = createFileNames( run_number, currentNumTupdates) # Testing print("-------> testing on: ", testfile, "-----------") while not success and offset < 10000: success = True offset = offset * 10 print("offset: ", offset) self.offset = offset try: self.initialize_clusters_from_savedfiles( n_primitives, 'references/mean.npy', 'references/covar.npy', 'references/pi.npy', constraints=myConstraints) for i in range(numIterTest): if i == numIterTest - 1: # save and plot likelihoods on the last iteration likelihoods_figName = "figures/run{0:d}_likelihoods_epochs{1:d}_T{2:d}.png".format( run_number, self.epoch, currentNumTupdates) self.expectation_step( run_number, t=time, # saveFigure = likelihoods_figName, saveFile=likelihoods_fileName, T_matrix_APF=transition) else: self.expectation_step(run_number, t=time, T_matrix_APF=transition, saveFile=likelihoods_fileName) self.maximization_step() print("it: {0:d} likelihood function {1:e}".format( i, self.get_likelihood())) except Exception as e: print("error: ", e) success = False # Save testing data self.save('references/meantest', 'references/covartest', 'references/pitest') # Print testing results means = np.load('references/meantest.npy') covar = np.load('references/covartest.npy') # Save tlabels and prmlabels from likelihoods files getlabels(likelihoods_fileName, tlabelFile=tlabels_fileName, prlabelFile=prmlabels_fileName) # Compute, save and plot success rate success_rate = compute_success_rate(likelihoods_fileName, manual_tlabels, manual_prmlabels) saveSuccessRateFile(success_fileName, success_rate, currentNumTupdates) print("-------> testing success_rate run{0:d}: {1:f}".format( run_number, success_rate))