def preprocess(list_of_demonstrations):
	camera = constants.CAMERA

	for demonstration in list_of_demonstrations:
		PATH_TO_ANNOTATION = constants.PATH_TO_DATA + constants.ANNOTATIONS_FOLDER + demonstration + "_" + camera + ".p"
		start, end = parser.get_start_end_annotations(PATH_TO_ANNOTATION)

		OLD_FRM_PATH = constants.PATH_TO_DATA + "frames_unprocessed/" + demonstration + "_" + camera + "/"
		NEW_FRM_PATH = constants.PATH_TO_DATA + constants.NEW_FRAMES_FOLDER + demonstration + "_" + camera + "/"

		command = "mkdir " + NEW_FRM_PATH
		print command
		os.mkdir(NEW_FRM_PATH)

		for frm in range(start, end + 1):
			OLD_FRM_NAME = utils.get_full_image_path(OLD_FRM_PATH, frm)

			NEW_FRM_NAME = utils.get_full_image_path(NEW_FRM_PATH, frm)
			NEW_FRM_NAME_UNSCALED = utils.get_full_image_path(NEW_FRM_PATH + "unscaled_", frm)

			command = "ffmpeg -i " + OLD_FRM_NAME + " -filter:v " + constants.CROP_PARAMS[camera] + " " + NEW_FRM_NAME_UNSCALED
			print command
			os.system(command)

			command = "ffmpeg -i " + NEW_FRM_NAME_UNSCALED + " -vf scale=640:480 " + NEW_FRM_NAME
			print command
			os.system(command)

			command = "rm " + NEW_FRM_NAME_UNSCALED
			print command
			os.system(command)	
Example #2
0
def preprocess(list_of_demonstrations):
    camera = constants.CAMERA

    for demonstration in list_of_demonstrations:
        PATH_TO_ANNOTATION = constants.PATH_TO_DATA + constants.ANNOTATIONS_FOLDER + demonstration + "_" + camera + ".p"
        start, end = utils.get_start_end_annotations(PATH_TO_ANNOTATION)

        OLD_FRM_PATH = constants.PATH_TO_DATA + "frames_unprocessed/" + demonstration + "_" + camera + "/"
        NEW_FRM_PATH = constants.PATH_TO_DATA + constants.NEW_FRAMES_FOLDER + demonstration + "_" + camera + "/"

        command = "mkdir " + NEW_FRM_PATH
        print command
        os.mkdir(NEW_FRM_PATH)

        for frm in range(start, end + 1):
            OLD_FRM_NAME = utils.get_full_image_path(OLD_FRM_PATH, frm)

            NEW_FRM_NAME = utils.get_full_image_path(NEW_FRM_PATH, frm)
            NEW_FRM_NAME_UNSCALED = utils.get_full_image_path(
                NEW_FRM_PATH + "unscaled_", frm)

            command = "ffmpeg -i " + OLD_FRM_NAME + " -filter:v " + constants.CROP_PARAMS[
                camera] + " " + NEW_FRM_NAME_UNSCALED
            print command
            os.system(command)

            command = "ffmpeg -i " + NEW_FRM_NAME_UNSCALED + " -vf scale=640:480 " + NEW_FRM_NAME
            print command
            os.system(command)

            command = "rm " + NEW_FRM_NAME_UNSCALED
            print command
            os.system(command)
	def process_individual_frames(self, PATH_TO_DATA, annotations, list_of_layers, sampling_rate, LCD):
		i = 0
		label_map = {}
		frm_map = {}
		X = {}
		map_index_data = pickle.load(open(annotations, "rb"))

		for index in map_index_data:
			segments = map_index_data[index]
			print "Processing images for label " + str(index)
			for seg in segments:
				print str(seg)
				frm_num = seg[0]
				while frm_num <= seg[1]:
					print frm_num
					frm_map[i] = frm_num
					label_map[i] = index
					im = caffe.io.load_image(utils.get_full_image_path(PATH_TO_DATA, frm_num))
					self.net.blobs['data'].data[...] = self.transformer.preprocess('data', im)
					out = self.net.forward()
					for layer in list_of_layers:
						if layer == 'input':
							data = cv2.imread(full_image_path)
						else:
							data = self.net.blobs[layer].data[0]
						data = utils.flatten(data)
						utils.dict_insert(layer, data, X)
					frm_num += sampling_rate
					i += 1
		return X, label_map, frm_map
Example #4
0
    def process_individual_frames(self, PATH_TO_DATA, annotations,
                                  list_of_layers, sampling_rate):
        X = {}
        map_index_data = pickle.load(open(annotations, "rb"))

        segments = utils.get_chronological_sequences(map_index_data)
        for seg in segments:
            print str(seg)
            frm_num = seg[0]
            while frm_num <= seg[1]:
                print frm_num, annotations
                im = caffe.io.load_image(
                    utils.get_full_image_path(PATH_TO_DATA, frm_num))
                self.net.blobs['data'].data[...] = self.transformer.preprocess(
                    'data', im)
                out = self.net.forward()
                for layer in list_of_layers:
                    if layer == 'input':
                        data = cv2.imread(full_image_path)
                    else:
                        data = self.net.blobs[layer].data[0]
                    data = utils.flatten(data)
                    utils.dict_insert(layer, data, X)
                frm_num += sampling_rate
        return X
Example #5
0
def featurize_sift(list_of_demonstrations, kinematics, sr):
    """
	Extracts SIFT features for all frames in list_of_demonstrations.
	"""

    data_X_xy = {}
    data_X_x = {}

    for demonstration in list_of_demonstrations:
        print "SIFT for ", demonstration
        PATH_TO_ANNOTATION = constants.PATH_TO_DATA + constants.ANNOTATIONS_FOLDER + demonstration + "_" + str(
            constants.CAMERA) + ".p"

        Z = []
        start, end = utils.get_start_end_annotations(PATH_TO_ANNOTATION)
        for frm in range(start, end + 1):
            PATH_TO_IMAGE = constants.PATH_TO_DATA + constants.NEW_FRAMES_FOLDER + demonstration + "_" + constants.CAMERA + "/"
            Z.append(
                sift.run_surf_frame(
                    utils.get_full_image_path(PATH_TO_IMAGE, frm)))

        Z = np.array(Z)
        Z = Z.reshape(Z.shape[0], 1)

        W = kinematics[demonstration]
        W_onlyx = utils.only_X(W)

        X = np.concatenate((W, Z), axis=1)
        X_onlyx = np.concatenate((W_onlyx, Z), axis=1)

        data_X_xy[demonstration] = X
        data_X_x[demonstration] = X_onlyx

    pickle.dump(data_X_xy, open(PATH_TO_FEATURES + "SIFT_xy.p", "wb"))
    pickle.dump(data_X_x, open(PATH_TO_FEATURES + "SIFT_x.p", "wb"))
Example #6
0
def featurize_sift(list_of_demonstrations, kinematics, sr):
	"""
	Extracts SIFT features for all frames in list_of_demonstrations.
	"""

	data_X_xy = {}
	data_X_x = {}

	for demonstration in list_of_demonstrations:
		print "SIFT for ", demonstration
		PATH_TO_ANNOTATION = constants.PATH_TO_DATA + constants.ANNOTATIONS_FOLDER + demonstration + "_" + str(constants.CAMERA) + ".p"

		Z = []
		start, end = utils.get_start_end_annotations(PATH_TO_ANNOTATION)
		for frm in range(start, end + 1):
			PATH_TO_IMAGE = constants.PATH_TO_DATA + constants.NEW_FRAMES_FOLDER + demonstration + "_" + constants.CAMERA + "/"
			Z.append(sift.run_surf_frame(utils.get_full_image_path(PATH_TO_IMAGE, frm)))

		Z = np.array(Z)
		Z = Z.reshape(Z.shape[0],1)

		W = kinematics[demonstration]
		W_onlyx = utils.only_X(W)

		X = np.concatenate((W, Z), axis = 1)
		X_onlyx = np.concatenate((W_onlyx, Z), axis = 1)

		data_X_xy[demonstration] = X
		data_X_x[demonstration] = X_onlyx

	pickle.dump(data_X_xy, open(PATH_TO_FEATURES + "SIFT_xy.p", "wb"))
	pickle.dump(data_X_x, open(PATH_TO_FEATURES + "SIFT_x.p", "wb"))
	def process_batch(self, PATH_TO_DATA, annotations, list_of_layers, sampling_rate, batch_size, LCD):
		i = 0
		label_map = {}
		frm_map = {}
		X = {}
		map_index_data = pickle.load(open(annotations, "rb"))

		for index in map_index_data:
			segments = map_index_data[index]
			# print "Processing images for label " + str(index)
			for seg in segments:
				# print str(seg)
				frm_num = seg[0]
				b = 1 #Running count of num frames in batch
				batch_data = {}
				while frm_num <= (seg[1] + batch_size):
					# Initialize Batch
					if b == 1:
						label_map[i] = index
						frm_map[i] = frm_num
					# Process frames and build up features in batches
					im = caffe.io.load_image(utils.get_full_image_path(PATH_TO_DATA, frm_num))
					self.net.blobs['data'].data[...] = self.transformer.preprocess('data', im)
					out = self.net.forward()
					for layer in list_of_layers:
						if LCD:
							if layer == 'input':
								print "ERROR: Cannot do LCD on input layer"
								sys.exit()
							data = self.net.blobs[layer].data[0]
							data = lcd.LCD(data)
							utils.dict_insert(layer, data, batch_data, axis = 0)
						else:
							if layer == 'input':
								data = cv2.imread(full_image_path)
							else:
								data = self.net.blobs[layer].data[0]
							data = utils.flatten(data)
							utils.dict_insert(layer, data, batch_data, axis = 1)
					if b == batch_size:
						print("Batch %3d" % i)
						b = 0
						i += 1
						# Concatenate with main data dictionary
						for layer in list_of_layers:
							data = encoding.encode_VLAD(batch_data[layer] , 5)
							utils.dict_insert(layer, data, X)
						batch_data = {}

					b += 1
					frm_num += sampling_rate
		return X, label_map, frm_map
Example #8
0
def run_sift_images():
	list_of_demonstrations = ["Suturing_E001",]
	for demonstration in list_of_demonstrations:
		print "SIFT for ", demonstration
		PATH_TO_ANNOTATION = constants.PATH_TO_DATA + constants.ANNOTATIONS_FOLDER + demonstration + "_" + str(constants.CAMERA) + ".p"

		start, end = utils.get_start_end_annotations(PATH_TO_ANNOTATION)
		for frm in range(start, end + 1):
			if ((frm % 3) == 0):
				PATH_TO_IMAGE = utils.get_full_image_path(constants.PATH_TO_DATA + constants.NEW_FRAMES_FOLDER + demonstration + "_" + constants.CAMERA + "/", frm)

				print PATH_TO_IMAGE
				img = cv2.imread(PATH_TO_IMAGE)
				sift = cv2.SIFT(nfeatures = 50)
				kp, des = sift.detectAndCompute(img, None)
				img = cv2.drawKeypoints(img, kp)
				cv2.imshow('sift',img)
				cv2.imwrite('../sift_images/' + str(frm) +".jpg",img)
Example #9
0
def plot_interactive():
	results = pickle.load(open("pickle_files/Suturing_E001_changepoints_Z2.p", "rb"))
	jackknife_index = 3
	demonstration = "Suturing_E001"

	changepoints = results[demonstration]['changepoints'][jackknife_index]
	labels = results[demonstration]['plot_labels_automatic']

	PATH_TO_ANNOTATION = constants.PATH_TO_DATA + constants.ANNOTATIONS_FOLDER + demonstration + "_" + str(constants.CAMERA) + ".p"
	start_d, end_d = utils.get_start_end_annotations(PATH_TO_ANNOTATION)

	changepoints_processed = []
	for elem in changepoints:
		changepoints_processed.append((elem[0] - start_d)/12)

	labels_processed = []
	for elem in labels:
		labels_processed.append((elem[0] - start_d)/12)

	total_frames = [int(elem[0]) for elem in changepoints]
	total_frames += [int(elem[0]) for elem in labels]
	total_frames.sort()

	for end_frame in total_frames:

		# Frame
		ax1 = plt.subplot(121)
		PATH_TO_FIGURE = constants.PATH_TO_DATA + constants.NEW_FRAMES_FOLDER + demonstration + "_" + str(constants.CAMERA) + "/"
		im = mpimg.imread(utils.get_full_image_path(PATH_TO_FIGURE, end_frame))
		ax1.set_title(str(end_frame))
		ax1.xaxis.set_visible(False)
		ax1.yaxis.set_visible(False)
		ax1.imshow(im)

		# AlexNet
		ax2 = plt.subplot(122)
		data = pickle.load(open("pickle_files/Suturing_E001_AlexNet_dimred.p", "rb"))
		ax2.set_title('AlexNet')
		ax2.set_ylim([-0.1, 1.1])
		ax2.set_xlim([-0.1, 1.1])
		ax2.xaxis.set_visible(False)
		ax2.yaxis.set_visible(False)
		plot_AlexNet(data, demonstration, changepoints = changepoints_processed, plotter = ax2, labels = labels_processed, plot_tsne = True, end_frame = (end_frame - start_d)/12, interactive_mode = True)
	def process_individual_frames_2(self, PATH_TO_DATA, annotations, list_of_layers, sampling_rate):
		X = {}
		map_index_data = pickle.load(open(annotations, "rb"))

		segments = utils.get_chronological_sequences(map_index_data)
		for seg in segments:
			print str(seg)
			frm_num = seg[0]
			while frm_num <= seg[1]:
				print frm_num, annotations
				im = caffe.io.load_image(utils.get_full_image_path(PATH_TO_DATA, frm_num))
				self.net.blobs['data'].data[...] = self.transformer.preprocess('data', im)
				out = self.net.forward()
				for layer in list_of_layers:
					if layer == 'input':
						data = cv2.imread(full_image_path)
					else:
						data = self.net.blobs[layer].data[0]
					data = utils.flatten(data)
					utils.dict_insert(layer, data, X)
				frm_num += sampling_rate
		return X
Example #11
0
def generate_sift_features():
	list_of_demonstrations = ["plane_9",]
	for demonstration in list_of_demonstrations:
		print "SIFT for ", demonstration
		PATH_TO_ANNOTATION = constants.PATH_TO_DATA + constants.ANNOTATIONS_FOLDER + demonstration + "_" + str(constants.CAMERA) + ".p"

		X1 = None
		X2 = None
		n_features = 20
		sift = cv2.SIFT(nfeatures = n_features)

		start, end = utils.get_start_end_annotations(PATH_TO_ANNOTATION)
		for frm in range(start, end + 1):
			# if ((frm % 3) == 0):
				PATH_TO_IMAGE = utils.get_full_image_path(constants.PATH_TO_DATA + constants.NEW_FRAMES_FOLDER + demonstration + "_" + constants.CAMERA + "/", frm)

				print PATH_TO_IMAGE
				img = cv2.imread(PATH_TO_IMAGE)
				kp, des = sift.detectAndCompute(img, None)
				img = cv2.drawKeypoints(img, kp)
				cv2.imshow('sift',img)
				cv2.imwrite('../sift_images/' + demonstration + "/" + str(frm) +".jpg",img)

				vector1 = []
				vector2 = []
				kp.sort(key = lambda x: x.response, reverse = True)
				for kp_elem in kp:
					vector1 += [kp_elem.response, kp_elem.pt[0], kp_elem.pt[1], kp_elem.size, kp_elem.angle]
					vector2 += [kp_elem.pt[0], kp_elem.pt[1]]
				try:
					X1 = utils.safe_concatenate(X1, utils.reshape(np.array(vector1[:n_features * 5])))
					X2 = utils.safe_concatenate(X2, utils.reshape(np.array(vector2[:n_features * 2])))
				except ValueError as e:
					IPython.embed()

		pickle.dump(X1, open("sift_features/SIFT_" + demonstration + "_1.p", "wb"))
		pickle.dump(X2, open("sift_features/SIFT_" + demonstration + "_2.p", "wb"))
Example #12
0
def generate_raw_image_pixels(list_of_demonstrations):
	"""
	PCA and t-SNE on raw image pixels
    """

	# Design matrix of raw image pixels
	X = None

	for demonstration in list_of_demonstrations:
		print "Raw image pixels ", demonstration
		PATH_TO_ANNOTATION = constants.PATH_TO_DATA + constants.ANNOTATIONS_FOLDER + demonstration + "_" + str(constants.CAMERA) + ".p"

		start, end = utils.get_start_end_annotations(PATH_TO_ANNOTATION)
		for frm in range(start, end + 1):
			if ((frm % 6) == 0):
				PATH_TO_IMAGE = utils.get_full_image_path(constants.PATH_TO_DATA + constants.NEW_FRAMES_FOLDER + demonstration + "_" + constants.CAMERA + "/", frm)
				print demonstration, str(frm)
				img = utils.reshape(cv2.imread(PATH_TO_IMAGE).flatten())
				X = utils.safe_concatenate(X, img)

	X_pca = utils.pca(X, PC = 2)
	X_tsne = utils.tsne(X)
	data_dimred = [X_pca, X_tsne]
	pickle.dump(X_tsne, open("raw_pixel_" + demonstration + "_dimred.p", "wb"))