Ejemplo n.º 1
0
def vlad_experiment(X,
                    list_of_K,
                    list_of_PC,
                    label_map,
                    frm_map,
                    figure_name,
                    list_of_layers=constants.alex_net_layers):
    assert len(list_of_K) != 0
    assert len(list_of_PC) != 0

    for K in list_of_K:
        for PC in list_of_PC:
            for layer in list_of_layers:
                print("[VLAD] K = %3d  PC = %3d  layer = %s " % (K, PC, layer))

                X_layer = X[layer]
                X_vlad = encoding.encode_VLAD(X_layer, K, PC)

                X_vlad_pca = pca(X_vlad)
                X_vlad_tsne = tsne(X_vlad)

                name = figure_name + '_VLAD_' + 'K' + str(K) + '_PC' + str(
                    PC) + '_' + layer
                plot_annotated_embedding(X_vlad_pca,
                                         label_map,
                                         frm_map,
                                         name + '_pca',
                                         title="PCA - " + name)
                plot_annotated_embedding(X_vlad_tsne,
                                         label_map,
                                         frm_map,
                                         name + '_tsne',
                                         title="t-SNE - " + name)
Ejemplo n.º 2
0
	def process_batch(self, PATH_TO_DATA, annotations, list_of_layers, sampling_rate, batch_size, LCD):
		i = 0
		label_map = {}
		frm_map = {}
		X = {}
		map_index_data = pickle.load(open(annotations, "rb"))

		for index in map_index_data:
			segments = map_index_data[index]
			# print "Processing images for label " + str(index)
			for seg in segments:
				# print str(seg)
				frm_num = seg[0]
				b = 1 #Running count of num frames in batch
				batch_data = {}
				while frm_num <= (seg[1] + batch_size):
					# Initialize Batch
					if b == 1:
						label_map[i] = index
						frm_map[i] = frm_num
					# Process frames and build up features in batches
					im = caffe.io.load_image(utils.get_full_image_path(PATH_TO_DATA, frm_num))
					self.net.blobs['data'].data[...] = self.transformer.preprocess('data', im)
					out = self.net.forward()
					for layer in list_of_layers:
						if LCD:
							if layer == 'input':
								print "ERROR: Cannot do LCD on input layer"
								sys.exit()
							data = self.net.blobs[layer].data[0]
							data = lcd.LCD(data)
							utils.dict_insert(layer, data, batch_data, axis = 0)
						else:
							if layer == 'input':
								data = cv2.imread(full_image_path)
							else:
								data = self.net.blobs[layer].data[0]
							data = utils.flatten(data)
							utils.dict_insert(layer, data, batch_data, axis = 1)
					if b == batch_size:
						print("Batch %3d" % i)
						b = 0
						i += 1
						# Concatenate with main data dictionary
						for layer in list_of_layers:
							data = encoding.encode_VLAD(batch_data[layer] , 5)
							utils.dict_insert(layer, data, X)
						batch_data = {}

					b += 1
					frm_num += sampling_rate
		return X, label_map, frm_map
Ejemplo n.º 3
0
def vlad_experiment(X, list_of_K, list_of_PC, label_map, frm_map, figure_name, list_of_layers = constants.alex_net_layers):
	assert len(list_of_K) != 0
	assert len(list_of_PC) != 0

	for K in list_of_K:
		for PC in list_of_PC:
			for layer in list_of_layers:
				print("[VLAD] K = %3d  PC = %3d  layer = %s " % (K, PC, layer))

				X_layer = X[layer]
				X_vlad = encoding.encode_VLAD(X_layer, K, PC)

				X_vlad_pca = pca(X_vlad)
				X_vlad_tsne = tsne(X_vlad)

				name = figure_name + '_VLAD_' + 'K' + str(K) + '_PC'+ str(PC) + '_' + layer
				plot_annotated_embedding(X_vlad_pca, label_map, frm_map, name + '_pca', title = "PCA - " + name)
				plot_annotated_embedding(X_vlad_tsne, label_map, frm_map, name + '_tsne', title = "t-SNE - " + name)
Ejemplo n.º 4
0
def featurize_LCD_VLAD(list_of_demonstrations,
                       kinematics,
                       layer,
                       net_name,
                       folder,
                       dimensions,
                       batch_size,
                       fname,
                       config=[True, True, True]):
    M = dimensions[0]
    a = dimensions[1]

    print "Featurizing LCD + VLAD: ", layer, net_name, folder, M, a, batch_size

    BATCH_SIZE = batch_size

    if constants.SIMULATION:
        BATCH_SIZE = 5

    data_X_PCA = {}
    data_X_CCA = {}
    data_X_GRP = {}

    size_sampled_matrices = [
        utils.sample_matrix(kinematics[demo],
                            sampling_rate=BATCH_SIZE).shape[0]
        for demo in list_of_demonstrations
    ]
    PC = min(100, min(size_sampled_matrices))
    print "PC: ", PC

    for demonstration in list_of_demonstrations:
        print demonstration
        W = kinematics[demonstration]
        Z = load_cnn_features(demonstration, layer, folder, net_name)
        W_new = utils.sample_matrix(W, sampling_rate=BATCH_SIZE)

        Z_batch = None
        W_batch = None
        j = 1

        Z_new = None

        IPython.embed()

        PATH_TO_ANNOTATION = constants.PATH_TO_DATA + constants.ANNOTATIONS_FOLDER + demonstration + "_" + str(
            constants.CAMERA) + ".p"
        start, end = utils.get_start_end_annotations(PATH_TO_ANNOTATION)

        Z = []

        IPython.embed()

        for i in range(Z):

            vector_W = W[i]
            W_batch = utils.safe_concatenate(W_batch, vector_W)

            vector_Z = Z[i]
            vector_Z = vector_Z.reshape(M, a, a)
            vector_Z = lcd.LCD(vector_Z)
            Z_batch = utils.safe_concatenate(Z_batch, vector_Z)

            if (j == BATCH_SIZE):
                print "NEW BATCH", str(i)
                Z_batch_VLAD = encoding.encode_VLAD(Z_batch)
                Z_new = utils.safe_concatenate(Z_new, Z_batch_VLAD)

                # Re-initialize batch variables
                j = 0
                Z_batch = None
                W_batch = None

            j += 1

        # tail case
        if Z_batch is not None:
            print "TAIL CASE"
            print "NEW BATCH", str(i)
            Z_batch_VLAD = encoding.encode_VLAD(Z_batch)
            Z_new = utils.safe_concatenate(Z_new, Z_batch_VLAD)

        if config[0]:
            Z_new_pca = utils.pca_incremental(Z_new, PC=PC)
            print Z_new_pca.shape
            assert W_new.shape[0] == Z_new_pca.shape[0]
            X_PCA = np.concatenate((W_new, Z_new_pca), axis=1)
            data_X_PCA[demonstration] = X_PCA

        if config[1]:
            Z_new_cca = utils.cca(W_new, Z_new)
            print Z_new_cca.shape
            assert W_new.shape[0] == Z_new_cca.shape[0]
            X_CCA = np.concatenate((W_new, Z_new_cca), axis=1)
            data_X_CCA[demonstration] = X_CCA

        if config[2]:
            Z_new_grp = utils.grp(Z_new)
            print Z_new_grp.shape
            assert W_new.shape[0] == Z_new_grp.shape[0]
            X_GRP = np.concatenate((W_new, Z_new_grp), axis=1)
            data_X_GRP[demonstration] = X_GRP

    if config[0]:
        pickle.dump(data_X_PCA,
                    open(PATH_TO_FEATURES + fname + "_PCA" + ".p", "wb"))
    if config[1]:
        pickle.dump(data_X_CCA,
                    open(PATH_TO_FEATURES + fname + "_CCA" + ".p", "wb"))
    if config[2]:
        pickle.dump(data_X_GRP,
                    open(PATH_TO_FEATURES + fname + "_GRP" + ".p", "wb"))
Ejemplo n.º 5
0
def featurize_LCD_VLAD(list_of_demonstrations, kinematics, layer, net_name, folder, dimensions, batch_size, fname, config = [True, True, True]):
	M = dimensions[0]
	a = dimensions[1]

	print "Featurizing LCD + VLAD: ", layer, net_name, folder, M, a, batch_size

	BATCH_SIZE = batch_size

	if constants.SIMULATION:
		BATCH_SIZE = 5

	data_X_PCA = {}
	data_X_CCA = {}
	data_X_GRP = {}

	size_sampled_matrices = [utils.sample_matrix(kinematics[demo], sampling_rate = BATCH_SIZE).shape[0] for demo in list_of_demonstrations]
	PC = min(100, min(size_sampled_matrices))
	print "PC: ", PC

	for demonstration in list_of_demonstrations:
		print demonstration
		W = kinematics[demonstration]
		Z = load_cnn_features(demonstration, layer, folder, net_name)
		W_new = utils.sample_matrix(W, sampling_rate = BATCH_SIZE)

		Z_batch = None
		W_batch = None
		j = 1

		Z_new = None

		IPython.embed()

		PATH_TO_ANNOTATION = constants.PATH_TO_DATA + constants.ANNOTATIONS_FOLDER + demonstration + "_" + str(constants.CAMERA) + ".p"
		start, end = utils.get_start_end_annotations(PATH_TO_ANNOTATION)

		Z = []

		IPython.embed()

		for i in range(Z):

			vector_W = W[i]
			W_batch = utils.safe_concatenate(W_batch, vector_W)

			vector_Z = Z[i]
			vector_Z = vector_Z.reshape(M, a, a)
			vector_Z = lcd.LCD(vector_Z)
			Z_batch = utils.safe_concatenate(Z_batch, vector_Z)

			if (j == BATCH_SIZE):
				print "NEW BATCH", str(i)
				Z_batch_VLAD = encoding.encode_VLAD(Z_batch)
				Z_new = utils.safe_concatenate(Z_new, Z_batch_VLAD)

				# Re-initialize batch variables
				j = 0
				Z_batch = None
				W_batch = None

			j += 1

		# tail case
		if Z_batch is not None:
			print "TAIL CASE"
			print "NEW BATCH", str(i)
			Z_batch_VLAD = encoding.encode_VLAD(Z_batch)
			Z_new = utils.safe_concatenate(Z_new, Z_batch_VLAD)

		if config[0]:
			Z_new_pca = utils.pca_incremental(Z_new, PC = PC)
			print Z_new_pca.shape
			assert W_new.shape[0] == Z_new_pca.shape[0]
			X_PCA = np.concatenate((W_new, Z_new_pca), axis = 1)
			data_X_PCA[demonstration] = X_PCA

		if config[1]:
			Z_new_cca = utils.cca(W_new, Z_new)
			print Z_new_cca.shape
			assert W_new.shape[0] == Z_new_cca.shape[0]
			X_CCA = np.concatenate((W_new, Z_new_cca), axis = 1)
			data_X_CCA[demonstration] = X_CCA

		if config[2]:
			Z_new_grp = utils.grp(Z_new)
			print Z_new_grp.shape
			assert W_new.shape[0] == Z_new_grp.shape[0]
			X_GRP = np.concatenate((W_new, Z_new_grp), axis = 1)
			data_X_GRP[demonstration] = X_GRP

	if config[0]:
		pickle.dump(data_X_PCA, open(PATH_TO_FEATURES + fname + "_PCA" + ".p", "wb"))
	if config[1]:
		pickle.dump(data_X_CCA, open(PATH_TO_FEATURES + fname + "_CCA" + ".p", "wb"))
	if config[2]:
		pickle.dump(data_X_GRP, open(PATH_TO_FEATURES + fname + "_GRP" + ".p", "wb"))