def process(self): import numpy from calculators.image import ImageData audio = self.get(0) if isinstance(audio, AudioData): hop_length = 256 # number of samples per time-step in spectrogram n_mels = 128 # number of bins in spectrogram. Height of image time_steps = 384 # number of time-steps. Width of image start_sample = 0 # starting at beginning length_samples = time_steps*hop_length sr = 16000 y = numpy.fromstring(audio.audio, numpy.int16) / 32768.0 if self.audio is None: self.audio = y else: self.audio = numpy.append(self.audio, y) if len(self.audio) > length_samples: self.audio = self.audio[len(self.audio) - length_samples:] y = self.audio window = y[start_sample:start_sample+length_samples] img = spectrogram_image(window, sr=sr, hop_length=hop_length, n_mels=n_mels) self.set_output(0, ImageData(img, audio.timestamp)) return True return False
def process(self): image = self.get(0) if isinstance(image, ImageData): nf = image.image.copy() nf = self._process_image(nf) self.set_output(0, ImageData(nf, image.timestamp)) return True return False
def process(self): if self.input_data[0] is not None and self.input_data[1] is not None: image = self.get(0) (kp, box) = self.get(1) if isinstance(image, ImageData): nf = image.image.copy() if kp is not None: handtracker.hand_tracker.draw_hand(nf, kp) handtracker.hand_tracker.draw_box(nf, box) self.set_output(0, ImageData(nf, image.timestamp)) return True return False
def process(self): image = self.get(0) if isinstance(image, ImageData): nf = image.image.copy() img = nf[:, :, ::-1] kp, box = self.detector(img) if kp is not None: handtracker.hand_tracker.draw_hand(nf, kp) handtracker.hand_tracker.draw_box(nf, box) self.set_output(0, ImageData(nf, image.timestamp)) self.set_output(1, (kp, box)) return True return False
def process(self): image = self.get(0) if isinstance(image, ImageData): frame = image.image.copy() # Scale image for faster processing small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25) face_locations = face_recognition.face_locations(small_frame) # Loop through each face found in the unknown image for (top, right, bottom, left) in face_locations: # Scale coordinates back to original image size top *= 4 right *= 4 bottom *= 4 left *= 4 cv2.rectangle(frame, (left, top), (right, bottom), (0, 255, 0), 3) self.set_output(0, ImageData(frame, image.timestamp)) return True return False