Пример #1
0
	def generate_change_points_1(self):
		"""
		Generates changespoints by clustering within a demonstration.
		"""

		cp_index = 0

		for demonstration in self.list_of_demonstrations:

			N = self.data_N[demonstration]

			gmm = mixture.GMM(n_components = self.n_components_cp, covariance_type='full', n_iter=10000, thresh = 5e-5)
			gmm.fit(N)
			Y = gmm.predict(N)
	
			self.save_cluster_metrics(N, Y, 'cpts_' + demonstration)

			start, end = utils.get_start_end_annotations(constants.PATH_TO_DATA + constants.ANNOTATIONS_FOLDER
				+ demonstration + "_" + constants.CAMERA + ".p")

			size_of_X = self.data_X_size[demonstration]

			for i in range(len(Y) - 1):

				if Y[i] != Y[i + 1]:
					change_pt = N[i][size_of_X:]
					self.append_cp_array(change_pt)
					self.map_cp2frm[cp_index] = start + i * self.sr
					self.map_cp2demonstrations[cp_index] = demonstration
					self.list_of_cp.append(cp_index)

					cp_index += 1
Пример #2
0
def plot_broken_barh_from_pickle(demonstration, output_fname, labels_manual, colors_manual, labels_automatic_W,
	colors_automatic_W, labels_automatic_Z, colors_automatic_Z, labels_automatic_ZW, colors_automatic_ZW):

	PATH_TO_ANNOTATION = constants.PATH_TO_DATA + constants.ANNOTATIONS_FOLDER + demonstration + "_" + constants.CAMERA + ".p"
	start, end = utils.get_start_end_annotations(constants.PATH_TO_DATA + constants.ANNOTATIONS_FOLDER + demonstration + "_" + constants.CAMERA + ".p")
	length = end - start

	fig, ax = plt.subplots()

	# Plot 1) Manual 2) Time clusters
	ax.broken_barh(labels_manual, (17, 2), facecolors = colors_manual)
	ax.broken_barh(preprocess_labels(labels_automatic_W), (13, 2), facecolors = colors_automatic_W)
	ax.broken_barh(preprocess_labels(labels_automatic_Z), (9, 2), facecolors = colors_automatic_Z)
	ax.broken_barh(preprocess_labels(labels_automatic_ZW), (5, 2), facecolors = colors_automatic_ZW)

	TASK = constants.TASK_NAME
	if (TASK in ["lego", "plane"]):
		end = end + 20
	elif (TASK in ["000", "010", "011", "100"]):
		end = end + 10
	else:
		end = end + 50

	ticks = get_ticks(labels_manual)
	ax.set_ylim(3,21)
	ax.set_xlim(0, end)
	ax.set_xlabel('Frame number')
	ax.set_yticks([6, 10, 14, 18])
	ax.set_yticklabels(['Both (k + z)','Visual (z)','Kinematics (k)', 'Manual'])

	if output_fname:
		plt.savefig(output_fname)
	else:
		plt.show()
	pass
Пример #3
0
	def generate_change_points_2(self):
		"""
		Generates changespoints by clustering across demonstrations.
		"""
		cp_index = 0

		for demonstration in self.list_of_demonstrations:
			W = self.data_W[demonstration]
			Z = self.data_Z[demonstration]

			PATH_TO_ANNOTATION = constants.PATH_TO_DATA + constants.ANNOTATIONS_FOLDER + demonstration + "_" + str(constants.CAMERA) + ".p"
			annotations = pickle.load(open(PATH_TO_ANNOTATION, "rb"))
			manual_labels = utils.get_chronological_sequences(annotations)
			start, end = utils.get_start_end_annotations(PATH_TO_ANNOTATION)

			for elem in manual_labels:
				frm = elem[1]
				change_pt_W = W[(frm - start)/self.sr]
				change_pt_Z = Z[(frm - start)/self.sr]
				change_pt = utils.safe_concatenate(change_pt_W, change_pt_Z)

				self.append_cp_array(change_pt)
				self.map_cp2demonstrations[cp_index] = demonstration
				self.map_cp2frm[cp_index] = frm
				self.list_of_cp.append(cp_index)
				cp_index += 1
Пример #4
0
def preprocess(list_of_demonstrations):
    camera = constants.CAMERA

    for demonstration in list_of_demonstrations:
        PATH_TO_ANNOTATION = constants.PATH_TO_DATA + constants.ANNOTATIONS_FOLDER + demonstration + "_" + camera + ".p"
        start, end = utils.get_start_end_annotations(PATH_TO_ANNOTATION)

        OLD_FRM_PATH = constants.PATH_TO_DATA + "frames_unprocessed/" + demonstration + "_" + camera + "/"
        NEW_FRM_PATH = constants.PATH_TO_DATA + constants.NEW_FRAMES_FOLDER + demonstration + "_" + camera + "/"

        command = "mkdir " + NEW_FRM_PATH
        print command
        os.mkdir(NEW_FRM_PATH)

        for frm in range(start, end + 1):
            OLD_FRM_NAME = utils.get_full_image_path(OLD_FRM_PATH, frm)

            NEW_FRM_NAME = utils.get_full_image_path(NEW_FRM_PATH, frm)
            NEW_FRM_NAME_UNSCALED = utils.get_full_image_path(
                NEW_FRM_PATH + "unscaled_", frm)

            command = "ffmpeg -i " + OLD_FRM_NAME + " -filter:v " + constants.CROP_PARAMS[
                camera] + " " + NEW_FRM_NAME_UNSCALED
            print command
            os.system(command)

            command = "ffmpeg -i " + NEW_FRM_NAME_UNSCALED + " -vf scale=640:480 " + NEW_FRM_NAME
            print command
            os.system(command)

            command = "rm " + NEW_FRM_NAME_UNSCALED
            print command
            os.system(command)
Пример #5
0
def featurize_1(list_of_demonstrations, kinematics, sr):
	"""
	Given .p (pickle) files of sift features, this function concatenates
	Z with W vectors to produce X vector, dumped as pickle file.
	"""
	print "FEATURIZATION 1"

	data_X_1 = {}
	data_X_2 = {}
	for demonstration in list_of_demonstrations:
		print "SIFT for ", demonstration
		start, end = utils.get_start_end_annotations(constants.PATH_TO_DATA + constants.ANNOTATIONS_FOLDER
						+ demonstration + "_" + constants.CAMERA +".p")

		W = kinematics[demonstration]
		W_sampled = utils.sample_matrix(W, sampling_rate = sr)


		PATH_TO_SIFT = constants.PATH_TO_DATA + "sift_FCED/SIFT_"+ demonstration
		Z = pickle.load(open(PATH_TO_SIFT + "_1.p", "rb"))
		Z = Z[start:end + 1]
		Z_sampled_1 = utils.sample_matrix(Z, sampling_rate = sr)

		Z = pickle.load(open(PATH_TO_SIFT + "_2.p", "rb"))
		Z = Z[start:end + 1]
		Z_sampled_2 = utils.sample_matrix(Z, sampling_rate = sr)

		assert Z_sampled_1.shape[0] == W_sampled.shape[0]
		assert Z_sampled_2.shape[0] == W_sampled.shape[0]

		data_X_1[demonstration] = np.concatenate((W_sampled, Z_sampled_1), axis = 1)
		data_X_2[demonstration] = np.concatenate((W_sampled, Z_sampled_2), axis = 1)

	pickle.dump(data_X_1, open(PATH_TO_FEATURES + "SIFT_1.p", "wb"))
	pickle.dump(data_X_2, open(PATH_TO_FEATURES + "SIFT_2.p", "wb"))
Пример #6
0
def featurize_sift(list_of_demonstrations, kinematics, sr):
	"""
	Extracts SIFT features for all frames in list_of_demonstrations.
	"""

	data_X_xy = {}
	data_X_x = {}

	for demonstration in list_of_demonstrations:
		print "SIFT for ", demonstration
		PATH_TO_ANNOTATION = constants.PATH_TO_DATA + constants.ANNOTATIONS_FOLDER + demonstration + "_" + str(constants.CAMERA) + ".p"

		Z = []
		start, end = utils.get_start_end_annotations(PATH_TO_ANNOTATION)
		for frm in range(start, end + 1):
			PATH_TO_IMAGE = constants.PATH_TO_DATA + constants.NEW_FRAMES_FOLDER + demonstration + "_" + constants.CAMERA + "/"
			Z.append(sift.run_surf_frame(utils.get_full_image_path(PATH_TO_IMAGE, frm)))

		Z = np.array(Z)
		Z = Z.reshape(Z.shape[0],1)

		W = kinematics[demonstration]
		W_onlyx = utils.only_X(W)

		X = np.concatenate((W, Z), axis = 1)
		X_onlyx = np.concatenate((W_onlyx, Z), axis = 1)

		data_X_xy[demonstration] = X
		data_X_x[demonstration] = X_onlyx

	pickle.dump(data_X_xy, open(PATH_TO_FEATURES + "SIFT_xy.p", "wb"))
	pickle.dump(data_X_x, open(PATH_TO_FEATURES + "SIFT_x.p", "wb"))
Пример #7
0
def featurize_sift(list_of_demonstrations, kinematics, sr):
    """
	Extracts SIFT features for all frames in list_of_demonstrations.
	"""

    data_X_xy = {}
    data_X_x = {}

    for demonstration in list_of_demonstrations:
        print "SIFT for ", demonstration
        PATH_TO_ANNOTATION = constants.PATH_TO_DATA + constants.ANNOTATIONS_FOLDER + demonstration + "_" + str(
            constants.CAMERA) + ".p"

        Z = []
        start, end = utils.get_start_end_annotations(PATH_TO_ANNOTATION)
        for frm in range(start, end + 1):
            PATH_TO_IMAGE = constants.PATH_TO_DATA + constants.NEW_FRAMES_FOLDER + demonstration + "_" + constants.CAMERA + "/"
            Z.append(
                sift.run_surf_frame(
                    utils.get_full_image_path(PATH_TO_IMAGE, frm)))

        Z = np.array(Z)
        Z = Z.reshape(Z.shape[0], 1)

        W = kinematics[demonstration]
        W_onlyx = utils.only_X(W)

        X = np.concatenate((W, Z), axis=1)
        X_onlyx = np.concatenate((W_onlyx, Z), axis=1)

        data_X_xy[demonstration] = X
        data_X_x[demonstration] = X_onlyx

    pickle.dump(data_X_xy, open(PATH_TO_FEATURES + "SIFT_xy.p", "wb"))
    pickle.dump(data_X_x, open(PATH_TO_FEATURES + "SIFT_x.p", "wb"))
Пример #8
0
def preprocess(list_of_demonstrations):
	camera = constants.CAMERA

	for demonstration in list_of_demonstrations:
		PATH_TO_ANNOTATION = constants.PATH_TO_DATA + constants.ANNOTATIONS_FOLDER + demonstration + "_" + camera + ".p"
		start, end = utils.get_start_end_annotations(PATH_TO_ANNOTATION)

		OLD_FRM_PATH = constants.PATH_TO_DATA + "frames_unprocessed/" + demonstration + "_" + camera + "/"
		NEW_FRM_PATH = constants.PATH_TO_DATA + constants.NEW_FRAMES_FOLDER + demonstration + "_" + camera + "/"

		command = "mkdir " + NEW_FRM_PATH
		print command
		os.mkdir(NEW_FRM_PATH)

		for frm in range(start, end + 1):
			OLD_FRM_NAME = utils.get_full_image_path(OLD_FRM_PATH, frm)

			NEW_FRM_NAME = utils.get_full_image_path(NEW_FRM_PATH, frm)
			NEW_FRM_NAME_UNSCALED = utils.get_full_image_path(NEW_FRM_PATH + "unscaled_", frm)

			command = "ffmpeg -i " + OLD_FRM_NAME + " -filter:v " + constants.CROP_PARAMS[camera] + " " + NEW_FRM_NAME_UNSCALED
			print command
			os.system(command)

			command = "ffmpeg -i " + NEW_FRM_NAME_UNSCALED + " -vf scale=640:480 " + NEW_FRM_NAME
			print command
			os.system(command)

			command = "rm " + NEW_FRM_NAME_UNSCALED
			print command
			os.system(command)	
Пример #9
0
def plot_broken_barh_from_pickle(demonstration, output_fname, labels_manual,
                                 colors_manual, labels_automatic_W,
                                 colors_automatic_W, labels_automatic_Z,
                                 colors_automatic_Z, labels_automatic_ZW,
                                 colors_automatic_ZW):

    PATH_TO_ANNOTATION = constants.PATH_TO_DATA + constants.ANNOTATIONS_FOLDER + demonstration + "_" + constants.CAMERA + ".p"
    start, end = utils.get_start_end_annotations(constants.PATH_TO_DATA +
                                                 constants.ANNOTATIONS_FOLDER +
                                                 demonstration + "_" +
                                                 constants.CAMERA + ".p")
    length = end - start

    fig, ax = plt.subplots()

    # Plot 1) Manual 2) Time clusters
    ax.broken_barh(labels_manual, (17, 2), facecolors=colors_manual)
    ax.broken_barh(preprocess_labels(labels_automatic_W), (13, 2),
                   facecolors=colors_automatic_W)
    ax.broken_barh(preprocess_labels(labels_automatic_Z), (9, 2),
                   facecolors=colors_automatic_Z)
    ax.broken_barh(preprocess_labels(labels_automatic_ZW), (5, 2),
                   facecolors=colors_automatic_ZW)

    TASK = constants.TASK_NAME
    if (TASK in ["lego", "plane"]):
        end = end + 20
    elif (TASK in ["000", "010", "011", "100"]):
        end = end + 10
    else:
        end = end + 50

    ticks = get_ticks(labels_manual)
    ax.set_ylim(3, 21)
    ax.set_xlim(0, end)
    ax.set_xlabel('Frame number')
    ax.set_yticks([6, 10, 14, 18])
    ax.set_yticklabels(
        ['Both (k + z)', 'Visual (z)', 'Kinematics (k)', 'Manual'])

    if output_fname:
        plt.savefig(output_fname)
    else:
        plt.show()
    pass
Пример #10
0
def run_sift_images():
	list_of_demonstrations = ["Suturing_E001",]
	for demonstration in list_of_demonstrations:
		print "SIFT for ", demonstration
		PATH_TO_ANNOTATION = constants.PATH_TO_DATA + constants.ANNOTATIONS_FOLDER + demonstration + "_" + str(constants.CAMERA) + ".p"

		start, end = utils.get_start_end_annotations(PATH_TO_ANNOTATION)
		for frm in range(start, end + 1):
			if ((frm % 3) == 0):
				PATH_TO_IMAGE = utils.get_full_image_path(constants.PATH_TO_DATA + constants.NEW_FRAMES_FOLDER + demonstration + "_" + constants.CAMERA + "/", frm)

				print PATH_TO_IMAGE
				img = cv2.imread(PATH_TO_IMAGE)
				sift = cv2.SIFT(nfeatures = 50)
				kp, des = sift.detectAndCompute(img, None)
				img = cv2.drawKeypoints(img, kp)
				cv2.imshow('sift',img)
				cv2.imwrite('../sift_images/' + str(frm) +".jpg",img)
Пример #11
0
def plot_interactive():
	results = pickle.load(open("pickle_files/Suturing_E001_changepoints_Z2.p", "rb"))
	jackknife_index = 3
	demonstration = "Suturing_E001"

	changepoints = results[demonstration]['changepoints'][jackknife_index]
	labels = results[demonstration]['plot_labels_automatic']

	PATH_TO_ANNOTATION = constants.PATH_TO_DATA + constants.ANNOTATIONS_FOLDER + demonstration + "_" + str(constants.CAMERA) + ".p"
	start_d, end_d = utils.get_start_end_annotations(PATH_TO_ANNOTATION)

	changepoints_processed = []
	for elem in changepoints:
		changepoints_processed.append((elem[0] - start_d)/12)

	labels_processed = []
	for elem in labels:
		labels_processed.append((elem[0] - start_d)/12)

	total_frames = [int(elem[0]) for elem in changepoints]
	total_frames += [int(elem[0]) for elem in labels]
	total_frames.sort()

	for end_frame in total_frames:

		# Frame
		ax1 = plt.subplot(121)
		PATH_TO_FIGURE = constants.PATH_TO_DATA + constants.NEW_FRAMES_FOLDER + demonstration + "_" + str(constants.CAMERA) + "/"
		im = mpimg.imread(utils.get_full_image_path(PATH_TO_FIGURE, end_frame))
		ax1.set_title(str(end_frame))
		ax1.xaxis.set_visible(False)
		ax1.yaxis.set_visible(False)
		ax1.imshow(im)

		# AlexNet
		ax2 = plt.subplot(122)
		data = pickle.load(open("pickle_files/Suturing_E001_AlexNet_dimred.p", "rb"))
		ax2.set_title('AlexNet')
		ax2.set_ylim([-0.1, 1.1])
		ax2.set_xlim([-0.1, 1.1])
		ax2.xaxis.set_visible(False)
		ax2.yaxis.set_visible(False)
		plot_AlexNet(data, demonstration, changepoints = changepoints_processed, plotter = ax2, labels = labels_processed, plot_tsne = True, end_frame = (end_frame - start_d)/12, interactive_mode = True)
Пример #12
0
def plot_all():
	# Suturing_E001
	# changepoints = pickle.load(open("pickle_files/Suturing_E001_changepoints_Z1.p", "rb"))
	# jackknife_index = 1
	demonstration = "Suturing_E001"

	PATH_TO_ANNOTATION = constants.PATH_TO_DATA + constants.ANNOTATIONS_FOLDER + demonstration + "_" + str(constants.CAMERA) + ".p"
	start_d, end_d = utils.get_start_end_annotations(PATH_TO_ANNOTATION)

	# changepoints = changepoints[jackknife_index]
	changepoints_processed = []

	results = pickle.load(open("pickle_files/Suturing_E001_changepoints_Z2.p", "rb"))
	jackknife_index = 3
	demonstration = "Suturing_E001"

	changepoints = results[demonstration]['changepoints'][jackknife_index]
	labels = results[demonstration]['plot_labels_automatic']

	for elem in changepoints:
		changepoints_processed.append((elem[0] - start_d)/6)

	labels_processed = []
	for elem in labels:
		labels_processed.append((elem[0] - start_d)/6)

	# VGG
	data = pickle.load(open("pickle_files/Suturing_E001_VGG_dimred.p", "rb"))
	plot_VGG(data, demonstration, changepoints = None, labels = None, plot_tsne = True)

	# AlexNet
	data = pickle.load(open("pickle_files/Suturing_E001_AlexNet_dimred.p", "rb"))
	plot_AlexNet(data, demonstration, changepoints = None, labels = None, plot_tsne = True)

	# Raw Pixels
	data = pickle.load(open("pickle_files/Suturing_E001_raw_pixel_dimred.p", "rb"))
	plot_raw_image_pixels(data, demonstration, changepoints = None, labels = None, plot_tsne = True)

	# SIFT
	data = pickle.load(open("pickle_files/Suturing_E001_SIFT_dimred.p", "rb"))
	plot_SIFT(data, demonstration, changepoints = None, labels = None, plot_tsne = True)
Пример #13
0
	def generate_change_points(self):

		cp_index = 0

		for demonstration in self.list_of_demonstrations:

			# print "Changepoints for " + demonstration
			N = self.data_N[demonstration]
			# print N[0].shape

		for demonstration in self.list_of_demonstrations:

			# print "Changepoints for " + demonstration
			N = self.data_N[demonstration]

			gmm = mixture.GMM(n_components = 10, covariance_type='full')
			gmm.fit(N)
			Y = gmm.predict(N)
	
			self.save_cluster_metrics(N, Y, gmm.means_, 'cpts_' + demonstration, gmm)

			start, end = utils.get_start_end_annotations(constants.PATH_TO_DATA + constants.ANNOTATIONS_FOLDER
				+ demonstration + "_capture2.p")

			size_of_X = self.data_X_size[demonstration]

			for i in range(len(Y) - 1):

				if Y[i] != Y[i + 1]:

					change_pt = N[i][size_of_X:]
					# print N.shape, change_pt.shape
					self.append_cp_array(change_pt)
					self.map_cp2frm[cp_index] = start + i * self.sr
					self.map_cp2demonstrations[cp_index] = demonstration
					self.list_of_cp.append(cp_index)

					cp_index += 1
Пример #14
0
def generate_sift_features():
	list_of_demonstrations = ["plane_9",]
	for demonstration in list_of_demonstrations:
		print "SIFT for ", demonstration
		PATH_TO_ANNOTATION = constants.PATH_TO_DATA + constants.ANNOTATIONS_FOLDER + demonstration + "_" + str(constants.CAMERA) + ".p"

		X1 = None
		X2 = None
		n_features = 20
		sift = cv2.SIFT(nfeatures = n_features)

		start, end = utils.get_start_end_annotations(PATH_TO_ANNOTATION)
		for frm in range(start, end + 1):
			# if ((frm % 3) == 0):
				PATH_TO_IMAGE = utils.get_full_image_path(constants.PATH_TO_DATA + constants.NEW_FRAMES_FOLDER + demonstration + "_" + constants.CAMERA + "/", frm)

				print PATH_TO_IMAGE
				img = cv2.imread(PATH_TO_IMAGE)
				kp, des = sift.detectAndCompute(img, None)
				img = cv2.drawKeypoints(img, kp)
				cv2.imshow('sift',img)
				cv2.imwrite('../sift_images/' + demonstration + "/" + str(frm) +".jpg",img)

				vector1 = []
				vector2 = []
				kp.sort(key = lambda x: x.response, reverse = True)
				for kp_elem in kp:
					vector1 += [kp_elem.response, kp_elem.pt[0], kp_elem.pt[1], kp_elem.size, kp_elem.angle]
					vector2 += [kp_elem.pt[0], kp_elem.pt[1]]
				try:
					X1 = utils.safe_concatenate(X1, utils.reshape(np.array(vector1[:n_features * 5])))
					X2 = utils.safe_concatenate(X2, utils.reshape(np.array(vector2[:n_features * 2])))
				except ValueError as e:
					IPython.embed()

		pickle.dump(X1, open("sift_features/SIFT_" + demonstration + "_1.p", "wb"))
		pickle.dump(X2, open("sift_features/SIFT_" + demonstration + "_2.p", "wb"))
Пример #15
0
def featurize_1(list_of_demonstrations, kinematics, sr):
    """
	Given .p (pickle) files of sift features, this function concatenates
	Z with W vectors to produce X vector, dumped as pickle file.
	"""
    print "FEATURIZATION 1"

    data_X_1 = {}
    data_X_2 = {}
    for demonstration in list_of_demonstrations:
        print "SIFT for ", demonstration
        start, end = utils.get_start_end_annotations(
            constants.PATH_TO_DATA + constants.ANNOTATIONS_FOLDER +
            demonstration + "_" + constants.CAMERA + ".p")

        W = kinematics[demonstration]
        W_sampled = utils.sample_matrix(W, sampling_rate=sr)

        PATH_TO_SIFT = constants.PATH_TO_DATA + "sift_FCED/SIFT_" + demonstration
        Z = pickle.load(open(PATH_TO_SIFT + "_1.p", "rb"))
        Z = Z[start:end + 1]
        Z_sampled_1 = utils.sample_matrix(Z, sampling_rate=sr)

        Z = pickle.load(open(PATH_TO_SIFT + "_2.p", "rb"))
        Z = Z[start:end + 1]
        Z_sampled_2 = utils.sample_matrix(Z, sampling_rate=sr)

        assert Z_sampled_1.shape[0] == W_sampled.shape[0]
        assert Z_sampled_2.shape[0] == W_sampled.shape[0]

        data_X_1[demonstration] = np.concatenate((W_sampled, Z_sampled_1),
                                                 axis=1)
        data_X_2[demonstration] = np.concatenate((W_sampled, Z_sampled_2),
                                                 axis=1)

    pickle.dump(data_X_1, open(PATH_TO_FEATURES + "SIFT_1.p", "wb"))
    pickle.dump(data_X_2, open(PATH_TO_FEATURES + "SIFT_2.p", "wb"))
Пример #16
0
def generate_raw_image_pixels(list_of_demonstrations):
	"""
	PCA and t-SNE on raw image pixels
    """

	# Design matrix of raw image pixels
	X = None

	for demonstration in list_of_demonstrations:
		print "Raw image pixels ", demonstration
		PATH_TO_ANNOTATION = constants.PATH_TO_DATA + constants.ANNOTATIONS_FOLDER + demonstration + "_" + str(constants.CAMERA) + ".p"

		start, end = utils.get_start_end_annotations(PATH_TO_ANNOTATION)
		for frm in range(start, end + 1):
			if ((frm % 6) == 0):
				PATH_TO_IMAGE = utils.get_full_image_path(constants.PATH_TO_DATA + constants.NEW_FRAMES_FOLDER + demonstration + "_" + constants.CAMERA + "/", frm)
				print demonstration, str(frm)
				img = utils.reshape(cv2.imread(PATH_TO_IMAGE).flatten())
				X = utils.safe_concatenate(X, img)

	X_pca = utils.pca(X, PC = 2)
	X_tsne = utils.tsne(X)
	data_dimred = [X_pca, X_tsne]
	pickle.dump(X_tsne, open("raw_pixel_" + demonstration + "_dimred.p", "wb"))
Пример #17
0
    def generate_change_points_1(self):
        """
		Generates changespoints by clustering within a demonstration.
		"""

        cp_index = 0

        for demonstration in self.list_of_demonstrations:

            N = self.data_N[demonstration]

            gmm = mixture.GMM(n_components=self.n_components_cp,
                              covariance_type='full',
                              n_iter=10000,
                              thresh=5e-5)
            gmm.fit(N)
            Y = gmm.predict(N)

            self.save_cluster_metrics(N, Y, 'cpts_' + demonstration)

            start, end = utils.get_start_end_annotations(
                constants.PATH_TO_DATA + constants.ANNOTATIONS_FOLDER +
                demonstration + "_" + constants.CAMERA + ".p")

            size_of_X = self.data_X_size[demonstration]

            for i in range(len(Y) - 1):

                if Y[i] != Y[i + 1]:
                    change_pt = N[i][size_of_X:]
                    self.append_cp_array(change_pt)
                    self.map_cp2frm[cp_index] = start + i * self.sr
                    self.map_cp2demonstrations[cp_index] = demonstration
                    self.list_of_cp.append(cp_index)

                    cp_index += 1
Пример #18
0
def weighted_score(list_of_demonstrations, list_of_frm_demonstrations):
    """
	Implements weighted pruning for given demonstrations represented in list_of_frm_demonstrations.
	Returns weighted score.
	"""

    if constants.TASK_NAME not in ["suturing", "needle_passing"]:
        return None

    if not constants.WEIGHTED_PRUNING_MODE:
        return None

    N = float(len(list_of_demonstrations))
    uniform_weight = 1 / N

    # Weights for skill-weighted pruning, where each demonstration has the same weight for
    map_demonstration2weight = {}

    # Weight is inversely proportional to completion time.
    map_demonstration2weight_t = {}

    for demonstration in list_of_demonstrations:

        # Base weight
        weight = uniform_weight

        PATH_TO_ANNOTATION = constants.PATH_TO_DATA + constants.ANNOTATIONS_FOLDER + demonstration + "_" + constants.CAMERA + ".p"
        start, end = utils.get_start_end_annotations(PATH_TO_ANNOTATION)
        weight_t = 1.0 / (end - start)

        # Stripped demonstration task (Suturing_, Needle_passing_, etc.) from demonstration name
        demonstration_name = demonstration.split("_")[-1]

        if demonstration_name in experts:
            weight *= constants.WEIGHT_EXPERT
        elif demonstration_name in intermediates:
            weight *= constants.WEIGHT_INTERMEDIATE
        else:
            if demonstration_name not in novices:
                print "ERROR: Unidentified Demonstration"
                IPython.embed()
        map_demonstration2weight[demonstration] = weight
        map_demonstration2weight_t[demonstration] = weight_t

    normalization_factor = sum(map_demonstration2weight.values())
    normalization_factor_t = sum(map_demonstration2weight_t.values())

    #Weight normalization
    for demonstration in list_of_demonstrations:
        weight = map_demonstration2weight[demonstration]
        map_demonstration2weight[demonstration] = weight / float(
            normalization_factor)

        weight_t = map_demonstration2weight_t[demonstration]
        map_demonstration2weight_t[demonstration] = weight_t / float(
            normalization_factor_t)

    score = 0.0
    for demonstration in set(list_of_frm_demonstrations):
        score += map_demonstration2weight[demonstration]

    score_t = 0.0
    for demonstration in set(list_of_frm_demonstrations):
        score_t += map_demonstration2weight_t[demonstration]

    return score_t
Пример #19
0
    "plane_3_js.p", "plane_4_js.p", "plane_5_js.p", "plane_6_js.p",
    "plane_7_js.p", "plane_8_js.p", "plane_9_js.p", "plane_10_js.p"
]

list_of_trajectories = [
    "plane_3.p", "plane_4.p", "plane_5.p", "plane_6.p", "plane_7.p",
    "plane_8.p", "plane_9.p", "plane_10.p"
]

list_of_annotations = [
    "plane_3_capture2.p", "plane_4_capture2.p", "plane_5_capture2.p",
    "plane_6_capture2.p", "plane_7_capture2.p", "plane_8_capture2.p",
    "plane_9_capture2.p", "plane_10_capture2.p"
]

for i in range(len(list_of_annotations)):
    print list_of_annotations[i], list_of_joint_states[
        i], list_of_trajectories[i]
    start, end = utils.get_start_end_annotations(constants.PATH_TO_DATA +
                                                 "annotations/" +
                                                 list_of_annotations[i])
    X = None
    trajectory = pickle.load(
        open(constants.PATH_TO_KINEMATICS + list_of_joint_states[i], "rb"))
    for frm in range(start, end + 1):
        traj_point = trajectory[frm]
        print traj_point.velocity[16:-12]
        vector = list(traj_point.position[16:-12]) + list(
            traj_point.velocity[16:-12])
        X = utils.safe_concatenate(X, utils.reshape(np.array(vector)))
    # pickle.dump(X, open(constants.PATH_TO_KINEMATICS + list_of_trajectories[i],"wb"))
Пример #20
0
def plot_broken_barh(demonstration, data, save_fname=None, T=10):
    """
	Parameters:
	-----------
	demonstration: String name of demonstration without camera specification , e.g. "Suturing_E001"

	list_of_frms_[1,2,3,4]: List of changepoint frames for each of 4 different clustering experiments.
	Use this to compare manually vs. automatically generated transition points.

	* For now, the list_of_frms are constrained to 4 for visualization sanity sake.
	"""

    numDemos = min(5, len(data.keys()) + 1)
    sizeTestSet = numDemos - 1

    PATH_TO_ANNOTATION = constants.PATH_TO_DATA + constants.ANNOTATIONS_FOLDER + demonstration + "_" + constants.CAMERA + ".p"
    start, end = utils.get_start_end_annotations(constants.PATH_TO_DATA +
                                                 constants.ANNOTATIONS_FOLDER +
                                                 demonstration + "_" +
                                                 constants.CAMERA + ".p")
    length = end - start
    segments = pickle.load(open(PATH_TO_ANNOTATION, "rb"))

    fig, ax = plt.subplots()
    # Generate labels for 1) Manual 2) Time clusters

    labels_automatic_0, colors_automatic_0, labels_automatic_1, colors_automatic_1, means, list_of_frms = get_time_clusters(
        data, T)

    labels_manual, colors_manual = setup_manual_labels(segments)

    # Plot 1) Manual 2) Time clusters
    ax.broken_barh(labels_manual, (25, 2), facecolors=colors_manual)
    ax.broken_barh(labels_automatic_0, (21, 2), facecolors=colors_automatic_0)

    list_of_plot_ranges = [(17, 2), (13, 2), (9, 2), (5, 2)]

    for i in range(min(sizeTestSet, 4)):
        labels_automatic, colors_automatic = setup_automatic_labels(
            list_of_frms[i], "k")
        ax.broken_barh(labels_automatic,
                       list_of_plot_ranges[i],
                       facecolors=colors_automatic)

    TASK = constants.TASK_NAME
    if (TASK in ["lego", "plane"]):
        end = end + 20
    elif (TASK in ["000", "010", "011", "100"]):
        end = end + 10
    else:
        end = end + 50

    ticks = get_ticks(labels_manual)
    ax.set_ylim(3, 29)
    ax.set_xlim(0, end)
    ax.set_xlabel('Frame number')
    # ax.set_xticks(ticks)
    ax.set_yticks([6, 10, 14, 18, 22, 26])
    ax.set_yticklabels([
        'Automatic4', 'Automatic3', 'Automatic2', 'Automatic1',
        'Time Clustering', 'Manual'
    ])

    if save_fname:
        plt.savefig(save_fname)
    else:
        plt.show()
    pass

    time_sequence_1 = [elem[0] + elem[1] for elem in labels_manual]
    time_sequence_2 = means

    dtw_score = compute_dtw(time_sequence_1, time_sequence_2)
    normalized_dtw_score = dtw_score / float(length) * 100

    return dtw_score, normalized_dtw_score, length, labels_manual, colors_manual, labels_automatic_0, colors_automatic_0
Пример #21
0
def plot_broken_barh(demonstration, data, save_fname = None, T = 10):
	"""
	Parameters:
	-----------
	demonstration: String name of demonstration without camera specification , e.g. "Suturing_E001"

	list_of_frms_[1,2,3,4]: List of changepoint frames for each of 4 different clustering experiments.
	Use this to compare manually vs. automatically generated transition points.

	* For now, the list_of_frms are constrained to 4 for visualization sanity sake.
	"""

	numDemos = min(5, len(data.keys()) + 1)
	sizeTestSet = numDemos - 1

	PATH_TO_ANNOTATION = constants.PATH_TO_DATA + constants.ANNOTATIONS_FOLDER + demonstration + "_" + constants.CAMERA + ".p"
	start, end = utils.get_start_end_annotations(constants.PATH_TO_DATA + constants.ANNOTATIONS_FOLDER + demonstration + "_" + constants.CAMERA + ".p")
	length = end - start
	segments = pickle.load(open(PATH_TO_ANNOTATION, "rb"))

	fig, ax = plt.subplots()
	# Generate labels for 1) Manual 2) Time clusters

	
	labels_automatic_0, colors_automatic_0, labels_automatic_1, colors_automatic_1, means, list_of_frms = get_time_clusters(data, T)

	labels_manual, colors_manual = setup_manual_labels(segments)

	# Plot 1) Manual 2) Time clusters
	ax.broken_barh(labels_manual, (25, 2), facecolors = colors_manual)
	ax.broken_barh(labels_automatic_0, (21, 2), facecolors = colors_automatic_0)

	list_of_plot_ranges = [(17, 2), (13, 2), (9, 2), (5, 2)]

	for i in range(min(sizeTestSet, 4)):
		labels_automatic, colors_automatic = setup_automatic_labels(list_of_frms[i], "k")
		ax.broken_barh(labels_automatic, list_of_plot_ranges[i], facecolors = colors_automatic)

	TASK = constants.TASK_NAME
	if (TASK in ["lego", "plane"]):
		end = end + 20
	elif (TASK in ["000", "010", "011", "100"]):
		end = end + 10
	else:
		end = end + 50

	ticks = get_ticks(labels_manual)
	ax.set_ylim(3,29)
	ax.set_xlim(0, end)
	ax.set_xlabel('Frame number')
	# ax.set_xticks(ticks)
	ax.set_yticks([6, 10, 14, 18, 22, 26])
	ax.set_yticklabels(['Automatic4','Automatic3','Automatic2', 'Automatic1','Time Clustering', 'Manual'])

	if save_fname:
		plt.savefig(save_fname)
	else:
		plt.show()
	pass

	time_sequence_1 = [elem[0] + elem[1] for elem in labels_manual ]
	time_sequence_2 = means

	dtw_score = compute_dtw(time_sequence_1, time_sequence_2)
	normalized_dtw_score = dtw_score/float(length) * 100

	return dtw_score, normalized_dtw_score, length, labels_manual, colors_manual, labels_automatic_0, colors_automatic_0
Пример #22
0
def plot_broken_barh_all(demonstration,
                         data_W,
                         data_Z,
                         data_ZW,
                         save_fname=None,
                         save_fname2=None):
    """
	Plots time-clusters for W, K, KW.
	"""

    PATH_TO_ANNOTATION = constants.PATH_TO_DATA + constants.ANNOTATIONS_FOLDER + demonstration + "_" + constants.CAMERA + ".p"
    start, end = utils.get_start_end_annotations(constants.PATH_TO_DATA +
                                                 constants.ANNOTATIONS_FOLDER +
                                                 demonstration + "_" +
                                                 constants.CAMERA + ".p")
    length = end - start
    segments = pickle.load(open(PATH_TO_ANNOTATION, "rb"))

    TASK = constants.TASK_NAME
    if (TASK in ["lego", "plane"]):
        end = end + 20
    elif (TASK in ["000", "010", "011", "100"]):
        end = end + 2
    else:
        end = end + 50

    labels_manual, colors_manual = setup_manual_labels(segments)
    labels_automatic_W, colors_automatic_W, labels_automatic_W_0, colors_automatic_W_0, means_W, list_of_frms_W = get_time_clusters(
        data_W, constants.N_COMPONENTS_TIME_W)
    labels_automatic_Z, colors_automatic_Z, labels_automatic_Z_0, colors_automatic_Z_0, means_Z, list_of_frms_Z = get_time_clusters(
        data_Z, constants.N_COMPONENTS_TIME_Z)
    labels_automatic_ZW, colors_automatic_ZW, labels_automatic_ZW_0, colors_automatic_ZW_0, means_ZW, list_of_frms_ZW = get_time_clusters(
        data_ZW, constants.N_COMPONENTS_TIME_ZW)

    fig, ax = plt.subplots()
    ax.broken_barh(labels_manual, (17, 2), facecolors=colors_manual)
    ax.broken_barh(labels_automatic_W, (13, 2), facecolors=colors_automatic_W)
    ax.broken_barh(labels_automatic_Z, (9, 2), facecolors=colors_automatic_Z)
    ax.broken_barh(labels_automatic_ZW, (5, 2), facecolors=colors_automatic_ZW)

    ax.set_ylim(3, 21)
    ax.set_xlim(0, end)
    ax.set_xlabel('Frame number')
    ax.set_yticks([6, 10, 14, 18])
    ax.set_yticklabels(['ZW', 'Z', 'W', 'Manual'])

    if save_fname:
        plt.savefig(save_fname)
    else:
        plt.show()
    pass

    fig, ax = plt.subplots()
    ax.broken_barh(labels_manual, (17, 2), facecolors=colors_manual)
    ax.broken_barh(labels_automatic_W_0, (13, 2),
                   facecolors=colors_automatic_W_0)
    ax.broken_barh(labels_automatic_Z_0, (9, 2),
                   facecolors=colors_automatic_Z_0)
    ax.broken_barh(labels_automatic_ZW_0, (5, 2),
                   facecolors=colors_automatic_ZW_0)

    ax.set_ylim(3, 21)
    ax.set_xlim(0, end)
    ax.set_xlabel('Frame number')
    ax.set_yticks([6, 10, 14, 18])
    ax.set_yticklabels(['ZW_0', 'Z_0', 'W_0', 'Manual'])

    if save_fname2:
        plt.savefig(save_fname2)
    else:
        plt.show()
    pass

    time_sequence_1 = [elem[0] + elem[1] for elem in labels_manual]

    time_sequence_2 = means_W
    dtw_score_W = compute_dtw(time_sequence_1, time_sequence_2)

    time_sequence_2 = means_Z
    dtw_score_Z = compute_dtw(time_sequence_1, time_sequence_2)

    time_sequence_2 = means_ZW
    dtw_score_ZW = compute_dtw(time_sequence_1, time_sequence_2)

    dtw_score_W_normalized = dtw_score_W / float(length) * 100
    dtw_score_Z_normalized = dtw_score_Z / float(length) * 100
    dtw_score_ZW_normalized = dtw_score_ZW / float(length) * 100

    return dtw_score_W, dtw_score_Z, dtw_score_ZW, dtw_score_W_normalized, dtw_score_Z_normalized, dtw_score_ZW_normalized, length
Пример #23
0
    def generate_change_points_2(self):
        """
		Generates changespoints by clustering across demonstrations.
		"""
        cp_index = 0
        i = 0
        big_N = None
        map_index2demonstration = {}
        map_index2frm = {}
        size_of_X = self.data_X_size[self.list_of_demonstrations[0]]

        for demonstration in self.list_of_demonstrations:
            print demonstration
            N = self.data_N[demonstration]

            start, end = utils.get_start_end_annotations(
                constants.PATH_TO_DATA + constants.ANNOTATIONS_FOLDER +
                demonstration + "_" + constants.CAMERA + ".p")

            for j in range(N.shape[0]):
                map_index2demonstration[i] = demonstration
                map_index2frm[i] = start + j * self.sr
                i += 1

            big_N = utils.safe_concatenate(big_N, N)

        print "Generated big_N"

        if constants.REMOTE == 1:
            if self.fit_GMM:
                gmm = mixture.GMM(n_components=self.n_components_cp,
                                  covariance_type='full',
                                  thresh=0.01)

            if self.fit_DPGMM:
                # dpgmm = mixture.DPGMM(n_components = 100, covariance_type='diag', n_iter = 10000, alpha = 100, thresh= 2e-4)

                #DO NOT FIDDLE WITH PARAMS WITHOUT CONSENT :)
                avg_len = int(big_N.shape[0] /
                              len(self.list_of_demonstrations))
                DP_GMM_COMPONENTS = int(
                    avg_len / constants.DPGMM_DIVISOR
                )  #tuned with suturing experts only for kinematics
                print "L0 ", DP_GMM_COMPONENTS, "ALPHA: ", constants.ALPHA_ZW_CP
                dpgmm = mixture.DPGMM(n_components=DP_GMM_COMPONENTS,
                                      covariance_type='diag',
                                      n_iter=1000,
                                      alpha=constants.ALPHA_ZW_CP,
                                      thresh=1e-7)

        elif constants.REMOTE == 2:
            gmm = mixture.GMM(n_components=self.n_components_cp,
                              covariance_type='full')
        else:
            gmm = mixture.GMM(n_components=self.n_components_cp,
                              covariance_type='full')

        if self.fit_GMM:
            start = time.time()
            gmm.fit(big_N)
            end = time.time()
            "GMM time taken: ", str(end - start)
            Y_gmm = gmm.predict(big_N)

            print "L0: Clusters in GMM", len(set(Y_gmm))
            Y = Y_gmm

        if self.fit_DPGMM:
            start = time.time()
            dpgmm.fit(big_N)
            end = time.time()
            "DP-GMM time taken: ", str(end - start)
            Y_dpgmm = dpgmm.predict(big_N)

            Y = Y_dpgmm
            print "L0: Clusters in DP-GMM", len(set(Y_dpgmm))

        for w in range(len(Y) - 1):

            if Y[w] != Y[w + 1]:
                change_pt = big_N[w][:size_of_X]
                self.append_cp_array(change_pt)
                self.map_cp2frm[cp_index] = map_index2frm[w]
                self.map_cp2demonstrations[cp_index] = map_index2demonstration[
                    w]
                self.list_of_cp.append(cp_index)

                cp_index += 1

        print "Done with generating change points", len(self.list_of_cp)
Пример #24
0
def featurize_LCD_VLAD(list_of_demonstrations, kinematics, layer, net_name, folder, dimensions, batch_size, fname, config = [True, True, True]):
	M = dimensions[0]
	a = dimensions[1]

	print "Featurizing LCD + VLAD: ", layer, net_name, folder, M, a, batch_size

	BATCH_SIZE = batch_size

	if constants.SIMULATION:
		BATCH_SIZE = 5

	data_X_PCA = {}
	data_X_CCA = {}
	data_X_GRP = {}

	size_sampled_matrices = [utils.sample_matrix(kinematics[demo], sampling_rate = BATCH_SIZE).shape[0] for demo in list_of_demonstrations]
	PC = min(100, min(size_sampled_matrices))
	print "PC: ", PC

	for demonstration in list_of_demonstrations:
		print demonstration
		W = kinematics[demonstration]
		Z = load_cnn_features(demonstration, layer, folder, net_name)
		W_new = utils.sample_matrix(W, sampling_rate = BATCH_SIZE)

		Z_batch = None
		W_batch = None
		j = 1

		Z_new = None

		IPython.embed()

		PATH_TO_ANNOTATION = constants.PATH_TO_DATA + constants.ANNOTATIONS_FOLDER + demonstration + "_" + str(constants.CAMERA) + ".p"
		start, end = utils.get_start_end_annotations(PATH_TO_ANNOTATION)

		Z = []

		IPython.embed()

		for i in range(Z):

			vector_W = W[i]
			W_batch = utils.safe_concatenate(W_batch, vector_W)

			vector_Z = Z[i]
			vector_Z = vector_Z.reshape(M, a, a)
			vector_Z = lcd.LCD(vector_Z)
			Z_batch = utils.safe_concatenate(Z_batch, vector_Z)

			if (j == BATCH_SIZE):
				print "NEW BATCH", str(i)
				Z_batch_VLAD = encoding.encode_VLAD(Z_batch)
				Z_new = utils.safe_concatenate(Z_new, Z_batch_VLAD)

				# Re-initialize batch variables
				j = 0
				Z_batch = None
				W_batch = None

			j += 1

		# tail case
		if Z_batch is not None:
			print "TAIL CASE"
			print "NEW BATCH", str(i)
			Z_batch_VLAD = encoding.encode_VLAD(Z_batch)
			Z_new = utils.safe_concatenate(Z_new, Z_batch_VLAD)

		if config[0]:
			Z_new_pca = utils.pca_incremental(Z_new, PC = PC)
			print Z_new_pca.shape
			assert W_new.shape[0] == Z_new_pca.shape[0]
			X_PCA = np.concatenate((W_new, Z_new_pca), axis = 1)
			data_X_PCA[demonstration] = X_PCA

		if config[1]:
			Z_new_cca = utils.cca(W_new, Z_new)
			print Z_new_cca.shape
			assert W_new.shape[0] == Z_new_cca.shape[0]
			X_CCA = np.concatenate((W_new, Z_new_cca), axis = 1)
			data_X_CCA[demonstration] = X_CCA

		if config[2]:
			Z_new_grp = utils.grp(Z_new)
			print Z_new_grp.shape
			assert W_new.shape[0] == Z_new_grp.shape[0]
			X_GRP = np.concatenate((W_new, Z_new_grp), axis = 1)
			data_X_GRP[demonstration] = X_GRP

	if config[0]:
		pickle.dump(data_X_PCA, open(PATH_TO_FEATURES + fname + "_PCA" + ".p", "wb"))
	if config[1]:
		pickle.dump(data_X_CCA, open(PATH_TO_FEATURES + fname + "_CCA" + ".p", "wb"))
	if config[2]:
		pickle.dump(data_X_GRP, open(PATH_TO_FEATURES + fname + "_GRP" + ".p", "wb"))
Пример #25
0
	def generate_change_points_2(self):
		"""
		Generates changespoints by clustering across demonstrations.
		"""
		cp_index = 0
		i = 0
		big_N = None
		map_index2demonstration = {}
		map_index2frm = {}

		for demonstration in self.list_of_demonstrations:
			print demonstration
			N = self.data_N[demonstration]

			start, end = utils.get_start_end_annotations(constants.PATH_TO_DATA + constants.ANNOTATIONS_FOLDER
				+ demonstration + "_" + constants.CAMERA + ".p")

			for j in range(N.shape[0]):
				map_index2demonstration[i] = demonstration
				map_index2frm[i] = start + j * self.sr
				i += 1

			big_N = utils.safe_concatenate(big_N, N)

		print "Generating Changepoints. Fitting GMM/DP-GMM ..."

		if constants.REMOTE == 1:
			if self.fit_DPGMM:
				print "Init DPGMM"
				avg_len = int(big_N.shape[0]/len(self.list_of_demonstrations))
				DP_GMM_COMPONENTS = int(avg_len/constants.DPGMM_DIVISOR)
				print "L0", DP_GMM_COMPONENTS, "ALPHA: ", self.ALPHA_CP
				dpgmm = mixture.DPGMM(n_components = DP_GMM_COMPONENTS, covariance_type='diag', n_iter = 10000, alpha = self.ALPHA_CP, thresh= 1e-7)

			if self.fit_GMM:
				print "Init GMM"
				gmm = mixture.GMM(n_components = self.n_components_cp, covariance_type='full', n_iter=5000, thresh = 5e-5)

		if constants.REMOTE == 2:
			gmm = mixture.GMM(n_components = self.n_components_cp, covariance_type='full', thresh = 0.01)

		else:
			gmm = mixture.GMM(n_components = self.n_components_cp, covariance_type='full')

		if self.fit_GMM:
			print "Fitting GMM"
			start = time.time()
			gmm.fit(big_N)
			end = time.time()
			print "GMM Time:", end - start

			Y_gmm = gmm.predict(big_N)
			print "L0: Clusters in GMM", len(set(Y_gmm))
			Y = Y_gmm

		if self.fit_DPGMM:
			print "Fitting DPGMM"
			start = time.time()
			dpgmm.fit(big_N)
			end = time.time()
			print "DPGMM Time:", end - start

			Y_dpgmm = dpgmm.predict(big_N)
			print "L0: Clusters in DP-GMM", len(set(Y_dpgmm))
			Y = Y_dpgmm

		for w in range(len(Y) - 1):

			if Y[w] != Y[w + 1]:
				change_pt = big_N[w][:self.X_dimension]
				self.append_cp_array(utils.reshape(change_pt))
				self.map_cp2frm[cp_index] = map_index2frm[w]
				self.map_cp2demonstrations[cp_index] = map_index2demonstration[w]
				self.list_of_cp.append(cp_index)

				cp_index += 1

		print "Done with generating change points, " + str(cp_index)
    def generate_change_points_2(self):
        """
		Generates changespoints by clustering across demonstrations.
		"""
        cp_index = 0
        i = 0
        big_N = None
        map_index2demonstration = {}
        map_index2frm = {}

        for demonstration in self.list_of_demonstrations:
            print demonstration
            N = self.data_N[demonstration]

            start, end = utils.get_start_end_annotations(
                constants.PATH_TO_DATA + constants.ANNOTATIONS_FOLDER + demonstration + "_" + constants.CAMERA + ".p"
            )

            for j in range(N.shape[0]):
                map_index2demonstration[i] = demonstration
                map_index2frm[i] = start + j * self.sr
                i += 1

            big_N = utils.safe_concatenate(big_N, N)

        print "Generating Changepoints. Fitting GMM/DP-GMM ..."

        if constants.REMOTE == 1:
            if self.fit_DPGMM:
                print "Init DPGMM"
                avg_len = int(big_N.shape[0] / len(self.list_of_demonstrations))
                DP_GMM_COMPONENTS = int(avg_len / constants.DPGMM_DIVISOR)
                print "L0", DP_GMM_COMPONENTS, "ALPHA: ", self.ALPHA_CP
                dpgmm = mixture.DPGMM(
                    n_components=DP_GMM_COMPONENTS,
                    covariance_type="diag",
                    n_iter=10000,
                    alpha=self.ALPHA_CP,
                    thresh=1e-7,
                )

            if self.fit_GMM:
                print "Init GMM"
                gmm = mixture.GMM(n_components=self.n_components_cp, covariance_type="full", n_iter=5000, thresh=5e-5)

        if constants.REMOTE == 2:
            gmm = mixture.GMM(n_components=self.n_components_cp, covariance_type="full", thresh=0.01)

        else:
            gmm = mixture.GMM(n_components=self.n_components_cp, covariance_type="full")

        if self.fit_GMM:
            print "Fitting GMM"
            start = time.time()
            gmm.fit(big_N)
            end = time.time()
            print "GMM Time:", end - start

            Y_gmm = gmm.predict(big_N)
            print "L0: Clusters in GMM", len(set(Y_gmm))
            Y = Y_gmm

        if self.fit_DPGMM:
            print "Fitting DPGMM"
            start = time.time()
            dpgmm.fit(big_N)
            end = time.time()
            print "DPGMM Time:", end - start

            Y_dpgmm = dpgmm.predict(big_N)
            print "L0: Clusters in DP-GMM", len(set(Y_dpgmm))
            Y = Y_dpgmm

        for w in range(len(Y) - 1):

            if Y[w] != Y[w + 1]:
                change_pt = big_N[w][: self.X_dimension]
                self.append_cp_array(utils.reshape(change_pt))
                self.map_cp2frm[cp_index] = map_index2frm[w]
                self.map_cp2demonstrations[cp_index] = map_index2demonstration[w]
                self.list_of_cp.append(cp_index)

                cp_index += 1

        print "Done with generating change points, " + str(cp_index)
Пример #27
0
def plot_broken_barh_all(demonstration, data_W, data_Z, data_ZW, save_fname = None, save_fname2 = None):
	"""
	Plots time-clusters for W, K, KW.
	"""

	PATH_TO_ANNOTATION = constants.PATH_TO_DATA + constants.ANNOTATIONS_FOLDER + demonstration + "_" + constants.CAMERA + ".p"
	start, end = utils.get_start_end_annotations(constants.PATH_TO_DATA + constants.ANNOTATIONS_FOLDER + demonstration + "_" + constants.CAMERA + ".p")
	length = end - start
	segments = pickle.load(open(PATH_TO_ANNOTATION, "rb"))

	TASK = constants.TASK_NAME
	if (TASK in ["lego", "plane"]):
		end = end + 20
	elif (TASK in ["000", "010", "011", "100"]):
		end = end + 2
	else:
		end = end + 50

	labels_manual, colors_manual = setup_manual_labels(segments)
	labels_automatic_W, colors_automatic_W, labels_automatic_W_0, colors_automatic_W_0, means_W, list_of_frms_W = get_time_clusters(data_W, constants.N_COMPONENTS_TIME_W)
	labels_automatic_Z, colors_automatic_Z, labels_automatic_Z_0, colors_automatic_Z_0, means_Z, list_of_frms_Z = get_time_clusters(data_Z, constants.N_COMPONENTS_TIME_Z)
	labels_automatic_ZW, colors_automatic_ZW, labels_automatic_ZW_0, colors_automatic_ZW_0, means_ZW, list_of_frms_ZW = get_time_clusters(data_ZW, constants.N_COMPONENTS_TIME_ZW)

	fig, ax = plt.subplots()
	ax.broken_barh(labels_manual, (17, 2), facecolors = colors_manual)
	ax.broken_barh(labels_automatic_W, (13, 2), facecolors = colors_automatic_W)
	ax.broken_barh(labels_automatic_Z, (9, 2), facecolors = colors_automatic_Z)
	ax.broken_barh(labels_automatic_ZW, (5, 2), facecolors = colors_automatic_ZW)

	ax.set_ylim(3,21)
	ax.set_xlim(0, end)
	ax.set_xlabel('Frame number')
	ax.set_yticks([6, 10, 14, 18])
	ax.set_yticklabels(['ZW','Z','W', 'Manual'])

	if save_fname:
		plt.savefig(save_fname)
	else:
		plt.show()
	pass

	fig, ax = plt.subplots()
	ax.broken_barh(labels_manual, (17, 2), facecolors = colors_manual)
	ax.broken_barh(labels_automatic_W_0, (13, 2), facecolors = colors_automatic_W_0)
	ax.broken_barh(labels_automatic_Z_0, (9, 2), facecolors = colors_automatic_Z_0)
	ax.broken_barh(labels_automatic_ZW_0, (5, 2), facecolors = colors_automatic_ZW_0)

	ax.set_ylim(3,21)
	ax.set_xlim(0, end)
	ax.set_xlabel('Frame number')
	ax.set_yticks([6, 10, 14, 18])
	ax.set_yticklabels(['ZW_0','Z_0','W_0', 'Manual'])

	if save_fname2:
		plt.savefig(save_fname2)
	else:
		plt.show()
	pass

	time_sequence_1 = [elem[0] + elem[1] for elem in labels_manual]

	time_sequence_2 = means_W
	dtw_score_W = compute_dtw(time_sequence_1, time_sequence_2)

	time_sequence_2 = means_Z
	dtw_score_Z = compute_dtw(time_sequence_1, time_sequence_2)

	time_sequence_2 = means_ZW
	dtw_score_ZW = compute_dtw(time_sequence_1, time_sequence_2)

	dtw_score_W_normalized = dtw_score_W/float(length) * 100
	dtw_score_Z_normalized = dtw_score_Z/float(length) * 100
	dtw_score_ZW_normalized = dtw_score_ZW/float(length) * 100

	return dtw_score_W, dtw_score_Z, dtw_score_ZW, dtw_score_W_normalized, dtw_score_Z_normalized, dtw_score_ZW_normalized, length
Пример #28
0
def weighted_score(list_of_demonstrations, list_of_frm_demonstrations):
	"""
	Implements weighted pruning for given demonstrations represented in list_of_frm_demonstrations.
	Returns weighted score.
	"""

	if constants.TASK_NAME not in ["suturing", "needle_passing"]:
		return None

	if not constants.WEIGHTED_PRUNING_MODE:
		return None

	N = float(len(list_of_demonstrations))
	uniform_weight = 1/N

	# Weights for skill-weighted pruning, where each demonstration has the same weight for 
	map_demonstration2weight = {}

	# Weight is inversely proportional to completion time.
	map_demonstration2weight_t = {}

	for demonstration in list_of_demonstrations:

		# Base weight
		weight = uniform_weight

		PATH_TO_ANNOTATION = constants.PATH_TO_DATA + constants.ANNOTATIONS_FOLDER + demonstration + "_" + constants.CAMERA + ".p"
		start, end = utils.get_start_end_annotations(PATH_TO_ANNOTATION)
		weight_t = 1.0/(end - start)

		# Stripped demonstration task (Suturing_, Needle_passing_, etc.) from demonstration name
		demonstration_name = demonstration.split("_")[-1]

		if demonstration_name in experts:
			weight *= constants.WEIGHT_EXPERT
		elif demonstration_name in intermediates:
			weight *= constants.WEIGHT_INTERMEDIATE
		else:
			if demonstration_name not in novices:
				print "ERROR: Unidentified Demonstration"
				IPython.embed()
		map_demonstration2weight[demonstration] = weight
		map_demonstration2weight_t[demonstration] = weight_t

	normalization_factor = sum(map_demonstration2weight.values())
	normalization_factor_t = sum(map_demonstration2weight_t.values())

	#Weight normalization
	for demonstration in list_of_demonstrations:
		weight = map_demonstration2weight[demonstration]
		map_demonstration2weight[demonstration] = weight/float(normalization_factor)

		weight_t = map_demonstration2weight_t[demonstration]
		map_demonstration2weight_t[demonstration] = weight_t/float(normalization_factor_t)

	score = 0.0
	for demonstration in set(list_of_frm_demonstrations):
		score += map_demonstration2weight[demonstration]

	score_t = 0.0
	for demonstration in set(list_of_frm_demonstrations):
		score_t += map_demonstration2weight_t[demonstration]

	return score_t
Пример #29
0
import numpy as np
import pickle

import constants
import utils
import parser

list_of_joint_states = ["plane_3_js.p", "plane_4_js.p", "plane_5_js.p",
		"plane_6_js.p", "plane_7_js.p", "plane_8_js.p", "plane_9_js.p", "plane_10_js.p"]

list_of_trajectories = ["plane_3.p", "plane_4.p", "plane_5.p",
		"plane_6.p", "plane_7.p", "plane_8.p", "plane_9.p", "plane_10.p"]

list_of_annotations = ["plane_3_capture2.p", "plane_4_capture2.p", "plane_5_capture2.p",
		"plane_6_capture2.p", "plane_7_capture2.p", "plane_8_capture2.p", "plane_9_capture2.p", "plane_10_capture2.p"]

for i in range(len(list_of_annotations)):
	print list_of_annotations[i], list_of_joint_states[i], list_of_trajectories[i]
	start, end = utils.get_start_end_annotations(constants.PATH_TO_DATA + "annotations/" + list_of_annotations[i])
	X = None
	trajectory = pickle.load(open(constants.PATH_TO_KINEMATICS + list_of_joint_states[i], "rb"))
	for frm in range(start, end + 1):
		traj_point = trajectory[frm]
		print traj_point.velocity[16:-12]
		vector = list(traj_point.position[16:-12]) + list(traj_point.velocity[16:-12])
		X = utils.safe_concatenate(X, utils.reshape(np.array(vector)))
	# pickle.dump(X, open(constants.PATH_TO_KINEMATICS + list_of_trajectories[i],"wb"))
Пример #30
0
	def generate_change_points_2(self):
		"""
		Generates changespoints by clustering across demonstrations.
		"""
		cp_index = 0
		i = 0
		big_N = None
		map_index2demonstration = {}
		map_index2frm = {}
		size_of_X = self.data_X_size[self.list_of_demonstrations[0]]

		for demonstration in self.list_of_demonstrations:
			print demonstration
			N = self.data_N[demonstration]

			start, end = utils.get_start_end_annotations(constants.PATH_TO_DATA + constants.ANNOTATIONS_FOLDER
				+ demonstration + "_" + constants.CAMERA + ".p")

			for j in range(N.shape[0]):
				map_index2demonstration[i] = demonstration
				map_index2frm[i] = start + j * self.sr
				i += 1

			big_N = utils.safe_concatenate(big_N, N)

		print "Generated big_N"

		if constants.REMOTE == 1:
			if self.fit_GMM:
				gmm = mixture.GMM(n_components = self.n_components_cp, covariance_type='full', thresh = 0.01)

			if self.fit_DPGMM:
				# dpgmm = mixture.DPGMM(n_components = 100, covariance_type='diag', n_iter = 10000, alpha = 100, thresh= 2e-4)

				#DO NOT FIDDLE WITH PARAMS WITHOUT CONSENT :)
				avg_len = int(big_N.shape[0]/len(self.list_of_demonstrations))
				DP_GMM_COMPONENTS = int(avg_len/constants.DPGMM_DIVISOR) #tuned with suturing experts only for kinematics
				print "L0 ", DP_GMM_COMPONENTS, "ALPHA: ", constants.ALPHA_ZW_CP
				dpgmm = mixture.DPGMM(n_components = DP_GMM_COMPONENTS, covariance_type='diag', n_iter = 1000, alpha = constants.ALPHA_ZW_CP, thresh= 1e-7)

		elif constants.REMOTE == 2:
			gmm = mixture.GMM(n_components = self.n_components_cp, covariance_type='full')
		else:
			gmm = mixture.GMM(n_components = self.n_components_cp, covariance_type='full')

		if self.fit_GMM:
			start = time.time()
			gmm.fit(big_N)
			end = time.time()
			"GMM time taken: ", str(end - start)
			Y_gmm = gmm.predict(big_N)

			print "L0: Clusters in GMM", len(set(Y_gmm))
			Y = Y_gmm

		if self.fit_DPGMM:
			start = time.time()
			dpgmm.fit(big_N)
			end = time.time()
			"DP-GMM time taken: ", str(end - start)
			Y_dpgmm = dpgmm.predict(big_N)

			Y = Y_dpgmm
			print "L0: Clusters in DP-GMM", len(set(Y_dpgmm))

		for w in range(len(Y) - 1):

			if Y[w] != Y[w + 1]:
				change_pt = big_N[w][:size_of_X]
				self.append_cp_array(change_pt)
				self.map_cp2frm[cp_index] = map_index2frm[w]
				self.map_cp2demonstrations[cp_index] = map_index2demonstration[w]
				self.list_of_cp.append(cp_index)

				cp_index += 1

		print "Done with generating change points", len(self.list_of_cp)
Пример #31
0
def featurize_LCD_VLAD(list_of_demonstrations,
                       kinematics,
                       layer,
                       net_name,
                       folder,
                       dimensions,
                       batch_size,
                       fname,
                       config=[True, True, True]):
    M = dimensions[0]
    a = dimensions[1]

    print "Featurizing LCD + VLAD: ", layer, net_name, folder, M, a, batch_size

    BATCH_SIZE = batch_size

    if constants.SIMULATION:
        BATCH_SIZE = 5

    data_X_PCA = {}
    data_X_CCA = {}
    data_X_GRP = {}

    size_sampled_matrices = [
        utils.sample_matrix(kinematics[demo],
                            sampling_rate=BATCH_SIZE).shape[0]
        for demo in list_of_demonstrations
    ]
    PC = min(100, min(size_sampled_matrices))
    print "PC: ", PC

    for demonstration in list_of_demonstrations:
        print demonstration
        W = kinematics[demonstration]
        Z = load_cnn_features(demonstration, layer, folder, net_name)
        W_new = utils.sample_matrix(W, sampling_rate=BATCH_SIZE)

        Z_batch = None
        W_batch = None
        j = 1

        Z_new = None

        IPython.embed()

        PATH_TO_ANNOTATION = constants.PATH_TO_DATA + constants.ANNOTATIONS_FOLDER + demonstration + "_" + str(
            constants.CAMERA) + ".p"
        start, end = utils.get_start_end_annotations(PATH_TO_ANNOTATION)

        Z = []

        IPython.embed()

        for i in range(Z):

            vector_W = W[i]
            W_batch = utils.safe_concatenate(W_batch, vector_W)

            vector_Z = Z[i]
            vector_Z = vector_Z.reshape(M, a, a)
            vector_Z = lcd.LCD(vector_Z)
            Z_batch = utils.safe_concatenate(Z_batch, vector_Z)

            if (j == BATCH_SIZE):
                print "NEW BATCH", str(i)
                Z_batch_VLAD = encoding.encode_VLAD(Z_batch)
                Z_new = utils.safe_concatenate(Z_new, Z_batch_VLAD)

                # Re-initialize batch variables
                j = 0
                Z_batch = None
                W_batch = None

            j += 1

        # tail case
        if Z_batch is not None:
            print "TAIL CASE"
            print "NEW BATCH", str(i)
            Z_batch_VLAD = encoding.encode_VLAD(Z_batch)
            Z_new = utils.safe_concatenate(Z_new, Z_batch_VLAD)

        if config[0]:
            Z_new_pca = utils.pca_incremental(Z_new, PC=PC)
            print Z_new_pca.shape
            assert W_new.shape[0] == Z_new_pca.shape[0]
            X_PCA = np.concatenate((W_new, Z_new_pca), axis=1)
            data_X_PCA[demonstration] = X_PCA

        if config[1]:
            Z_new_cca = utils.cca(W_new, Z_new)
            print Z_new_cca.shape
            assert W_new.shape[0] == Z_new_cca.shape[0]
            X_CCA = np.concatenate((W_new, Z_new_cca), axis=1)
            data_X_CCA[demonstration] = X_CCA

        if config[2]:
            Z_new_grp = utils.grp(Z_new)
            print Z_new_grp.shape
            assert W_new.shape[0] == Z_new_grp.shape[0]
            X_GRP = np.concatenate((W_new, Z_new_grp), axis=1)
            data_X_GRP[demonstration] = X_GRP

    if config[0]:
        pickle.dump(data_X_PCA,
                    open(PATH_TO_FEATURES + fname + "_PCA" + ".p", "wb"))
    if config[1]:
        pickle.dump(data_X_CCA,
                    open(PATH_TO_FEATURES + fname + "_CCA" + ".p", "wb"))
    if config[2]:
        pickle.dump(data_X_GRP,
                    open(PATH_TO_FEATURES + fname + "_GRP" + ".p", "wb"))