def __init__(self, img1, img2): self.img1, self.img2 = self.init_images(img1, img2) self.img1_gray = cv2.cvtColor(self.img1, cv2.COLOR_BGR2GRAY) self.img2_gray = cv2.cvtColor(self.img2, cv2.COLOR_BGR2GRAY) self.detector = dlib.get_frontal_face_detector() self.predictor = dlib.shape_predictor('utils/shape_predictor_68_face_landmarks.dat') self.imgStack = ImageUtilities() self.landmarks_points = () self.ordered_triangles = {} self.img2_copy = self.img2.copy()
def classify(self, image: Image) -> ImageClassification: """ Classifies a single image with the online network and returns the classification. I case of an error an error message gets printed to the terminal and an exception might be thrown. :param image: The input image. :return: The classification result. """ if self._counter == 0: self._time_start = time.time() self._counter += 1 file = ImageUtilities.save_image_to_tempfile(image) data = {'key': OnlineClassifierConfiguration.API_KEY} files = {'image': open(file, 'rb')} resp = requests.post(OnlineClassifierConfiguration.API_URL, data=data, files=files) os.remove(file) if resp.status_code == OnlineClassifier.__API_RESPONSE_CODE_BAD_REQUEST: print_error("Online Classifier: Bad Request") print_debug(resp) raise ConnectionRefusedError("Bad Request") elif resp.status_code == OnlineClassifier.__API_RESPONSE_CODE_UNAUTHORIZED: print_error("Online Classifier: Unauthorized Request") print_debug("Used API key: " + OnlineClassifierConfiguration.API_KEY) raise ConnectionRefusedError("Unauthorized Request") elif resp.status_code == OnlineClassifier.__API_RESPONSE_CODE_SERVICE_UNAVAILABLE: print_error("Online Classifier: Service Unavailable") print_debug(resp) print_countdown(wait_time=10, prefix_text="Trying again in") return self.classify(image) elif resp.status_code == OnlineClassifier.__API_RESPONSE_CODE_TOO_MANY_REQUESTS: elapsed = time.time() - self._time_start wait_time = int(max(OnlineClassifier.__API_RATE_LIMIT_INTERVAL - elapsed, 0)) + 1 self._counter = 0 print_countdown(wait_time=wait_time, prefix_text="Too many requests. Trying again in") return self.classify(image) try: data = resp.json() except JSONDecodeError: print_error("Online Classifier: Could not decode JSON") print_debug(resp) return self.classify(file) classes = list() for cl in data: classes.append(Class(cl["class"], cl["confidence"])) return ImageClassification(classes)
def __iter__(self): """ This function is called when the population generator is iterated. It yields a set image individuals. :return: Yields image individuals """ files = rd.sample([ file for file in os.listdir(self._directory) if os.fsdecode(file).endswith(".ppm") ], self.size) for file in files: image = Image.open(os.path.join(self._directory, file)) image = ImageOps.fit( image, ClassifierConfiguration.DESIRED_IMAGE_DIMENSIONS, Image.ANTIALIAS) ImageUtilities.rearrange_image(image) yield ImageIndividual(image=image) self._progress_bar_step()
def _generate_noise(self) -> Image: """ Generates a random noise image. To get a random pixel value the get pixel value function is used. :return: The random noise image. """ img, pixel_count = ImageUtilities.get_empty_image() pixel_data = list() for i in range(pixel_count): pixel_data.append(self._get_pixel_value()) img.putdata(pixel_data) return img
def __iter__(self): """ This function is called when the population generator is iterated. It yields a set geometric individuals. :return: Yields geometric individuals """ for i in range(self.size): data = self._img.getdata() min = 0 max = 255 data = [(rd.randint(min, max), rd.randint(min, max), rd.randint(min, max)) if pixel != (0, 0, 0) else (0, 0, 0) for pixel in data] img, pixel = ImageUtilities.get_empty_image() img.putdata(data) yield ImageIndividual(image=img) self._progress_bar_step()
def _generate_noise(self) -> Image: """ Generates an image with the desired color distribution. :return: The image. """ img, pixel_count = ImageUtilities.get_empty_image() pixel_indices = list( np.random.choice(list(range(len(self._colors))), p=self._probabilities, size=pixel_count)) counts = Counter(pixel_indices) pixel_indices = sorted(pixel_indices, key=counts.get, reverse=True) pixel_data = [self._colors[idx] for idx in pixel_indices] img.putdata(pixel_data) return img
class SWAP: visualize = False img_path = '/home/viraj-uk/HUSTLE/FACE_SWAPPING/images' # replace image path accordingly def __init__(self, img1, img2): self.img1, self.img2 = self.init_images(img1, img2) self.img1_gray = cv2.cvtColor(self.img1, cv2.COLOR_BGR2GRAY) self.img2_gray = cv2.cvtColor(self.img2, cv2.COLOR_BGR2GRAY) self.detector = dlib.get_frontal_face_detector() self.predictor = dlib.shape_predictor('utils/shape_predictor_68_face_landmarks.dat') self.imgStack = ImageUtilities() self.landmarks_points = () self.ordered_triangles = {} self.img2_copy = self.img2.copy() @staticmethod def init_images(img1, img2): if img1 is None or img2 is None: raise FileNotFoundError img1 = cv2.imread(img1) img2 = cv2.imread(img2) return img1, img2 def show_images(self): row_1 = [self.img1, self.img2_copy, self.img2] stacked_image = self.imgStack.stack_images(0.7, row_1) cv2.imshow('Stacked Image', stacked_image) cv2.imwrite(os.path.join(self.img_path, 'result.jpg'), stacked_image) cv2.waitKey(0) cv2.destroyAllWindows() def face_landmarks(self, gray): faces = self.detector(gray) landmarks_points = [] for face in faces: landmarks = self.predictor(gray, face) for n in range(0, 68): x = landmarks.part(n).x y = landmarks.part(n).y landmarks_points.append((x, y)) return landmarks_points def get_triangles(self, which): rect = cv2.boundingRect(np.array(getattr(self.landmarks_points, which), np.int32)) subdivision = cv2.Subdiv2D(rect) subdivision.insert(getattr(self.landmarks_points, which)) return np.array(subdivision.getTriangleList(), np.int32) def first_order_triangles(self): # get delaunay triangles for face landmarks first_triangles = self.get_triangles('first') # compare delaunay triangles of both images minimum_triangles = min(len(self.get_triangles('first')), len(self.get_triangles('second'))) landmark_array = np.array(self.landmarks_points.first, np.int32) for i in range(0, minimum_triangles): pt1 = (first_triangles[i][0], first_triangles[i][1]) pt2 = (first_triangles[i][2], first_triangles[i][3]) pt3 = (first_triangles[i][4], first_triangles[i][5]) triangle_pt1 = np.where((landmark_array == pt1).all(axis=1)) triangle_pt2 = np.where((landmark_array == pt2).all(axis=1)) triangle_pt3 = np.where((landmark_array == pt3).all(axis=1)) self.ordered_triangles[i] = [triangle_pt1[0][0], triangle_pt2[0][0], triangle_pt3[0][0]] def swap(self): # landmark points for faces LandmarkPoints = namedtuple('LandmarkPoints', ['first', 'second']) self.landmarks_points = LandmarkPoints(self.face_landmarks(self.img1_gray), self.face_landmarks(self.img2_gray)) # order triangles on image 1, based on detected landmark points self.first_order_triangles() accumulated_mask = np.zeros((self.img2.shape[0], self.img2.shape[1]), np.uint8) for key in self.ordered_triangles: pt1, pt2, pt3 = self.ordered_triangles[key] points = np.array([[self.landmarks_points.first[pt1], self.landmarks_points.first[pt2], self.landmarks_points.first[pt3]]], np.int32) points2 = np.array([[self.landmarks_points.second[pt1], self.landmarks_points.second[pt2], self.landmarks_points.second[pt3]]], np.int32) M = cv2.getAffineTransform(np.float32(points), np.float32(points2)) step_warped_image = cv2.warpAffine(self.img1, M, (self.img1.shape[1], self.img1.shape[0])) step_warped_mask_triangle = np.zeros((step_warped_image.shape[0], step_warped_image.shape[1]), np.uint8) step_warped_mask_triangle = cv2.fillPoly(step_warped_mask_triangle, points2, (255)) self.img2 = cv2.subtract(self.img2, cv2.cvtColor(step_warped_mask_triangle, cv2.COLOR_GRAY2BGR)) step_warped_triangle = cv2.bitwise_and(step_warped_image, step_warped_image, mask=step_warped_mask_triangle) self.img2 = cv2.add(self.img2, step_warped_triangle) accumulated_mask = cv2.add(accumulated_mask, step_warped_mask_triangle) (x, y, w, h) = cv2.boundingRect(accumulated_mask) center = (int(x + w / 2), int(y + h / 2)) self.img2 = cv2.seamlessClone(self.img2, self.img2_copy, accumulated_mask, center, cv2.NORMAL_CLONE)
from utils.image_utilities import ImageUtilities from utils.road_sign_class_mapper_utilities import RoadSignClassMapper image_path = '../GTSRB/Final_Training/Images' data = [] classifier = OnlineClassifier() df = None mapper = RoadSignClassMapper() for i in range(43): if not mapper.get_name_by_class(i) is None: continue dir_name = os.path.join(image_path, str(i).zfill(5)) for k in range(20): file = random.choice(os.listdir(dir_name)) image = Image.open(os.path.join(dir_name, file)) image = ImageOps.fit(image, ClassifierConfiguration.DESIRED_IMAGE_DIMENSIONS, Image.ANTIALIAS) file = ImageUtilities.save_image_to_tempfile(image) classification = classifier.classify(file) entry = {'class_id': i} c = sorted(classification.classes, key=lambda x: x.confidence, reverse=True)[0] entry['class'] = c.name entry['confidence'] = c.confidence data.append(entry) df = pd.DataFrame(data) print(df.tail(20)) df.to_csv('class_names.csv')