def get_all_images(self): images = [] for msg in self.consumer: image = frame_from_bytes_str(msg['data']) images.append(image) return images
def load(self, input_data=None, load_last_saved=False): if load_last_saved: last_saved_training_count = self.training_count - 1 self.train_name = 'training_data_' + str(last_saved_training_count) path = os.path.join(config[profile]['directory'], project_name, "training_data", self.train_name) with open(os.path.join(path, "data.pkl"), "rb") as handle: self.data = pickle.load(handle) else: if input_data is None: sequences_data = {} for msg in self.consumer.consumer: sequence_name = msg.value.get('sequence_name') if sequence_name is None: sequence_name = 'sequence' if sequences_data.get(sequence_name) is None: sequences_data[sequence_name] = [] sequences_data[sequence_name].append( frame_from_bytes_str(msg.value['data'])) else: sequences_data = input_data if isinstance(sequences_data, dict): self.data = [] number_training_examples = 0 for seq, value in sequences_data.items(): self.data.append(value) number_training_examples += len(sequences_data[seq]) num_training_sequences = len(self.data) else: self.data = sequences_data num_training_sequences = 1 number_training_examples = len(self.data) if num_training_sequences == 0: raise DataNotFoundException( "There is not data to proceed with the training") else: print("Number of training examples:", number_training_examples) # Save the training data self.train_name = 'training_data_' + str(self.training_count) path = os.path.join(config[profile]['directory'], project_name, "training_data", self.train_name) os.makedirs(path, exist_ok=True) with open(os.path.join(path, "data.pkl"), "wb") as handle: pickle.dump(sequences_data, handle, protocol=pickle.HIGHEST_PROTOCOL) self.training_count += 1
def test_oneclass_svm_with_pca(self): oneclass_svm = OneClassSVM() oneclass_svm.fit(self.training_data_acquirer.data) sequence_size = 5 sequence_counter = 0 sequence = [] for msg in self.inference_data_acquisition.consumer.consumer: data = frame_from_bytes_str(msg.value['data']) sequence.append(data) sequence_counter += 1 if sequence_counter == sequence_size: # TODO send to prediction topic? prediciton = oneclass_svm.predict(sequence) print(prediciton) sequence_counter = 0 sequence = []
def test_cnn(self): cnn = CNNAutoEncoder(model_name='cnn_1', epochs=4, batch_size=1) cnn.fit(self.training_data_acquirer.data) #cnn.load_last_model() sequence_size = 12 sequence_counter = 0 sequence = [] for msg in self.inference_data_acquisition.consumer.consumer: data = frame_from_bytes_str(msg.value['data']) sequence.append(data) sequence_counter += 1 if sequence_counter == sequence_size: # TODO send to prediction topic? prediciton = cnn.predict(sequence) print(prediciton) sequence_counter = 0 sequence = []
def test_lstm(self): lstm = LSTMAutoEncoder(model_name='lstm_1', epochs=1) lstm.fit(self.training_data_acquirer.data) lstm.save_model() #lstm.load_last_model() sequence_size = 10 sequence_counter = 0 sequence = [] for msg in self.inference_data_acquisition.consumer.consumer: data = frame_from_bytes_str(msg.value['data']) sequence.append(data) sequence_counter += 1 if sequence_counter == sequence_size: # TODO send to prediction topic? prediciton = lstm.predict(sequence) print(prediciton) sequence_counter = 0 sequence = []
def test_gaussian_with_pca(self): gaussian = Gaussian(model_name='gaussian_1', pca=True, pca_n_components=.95) gaussian.fit(self.training_data_acquirer.data) gaussian.save_model() #gaussian.load_last_model() sequence_size = 5 sequence_counter = 0 sequence = [] for msg in self.inference_data_acquisition.consumer.consumer: data = frame_from_bytes_str(msg.value['data']) sequence.append(data) sequence_counter += 1 if sequence_counter == sequence_size: # TODO send to prediction topic? prediciton = gaussian.predict(sequence) print(prediciton) sequence_counter = 0 sequence = []
def infer(self, model, transform_function=None, **kwargs): for msg in self.consumer.consumer: data = frame_from_bytes_str(msg.value['data']) data = transform_function(data, **kwargs) return model.predict(data)
def send_frames(self): counter = 0 frame_sequence = [] original_frame_sequence = [] for msg in self.consumer.get_consumer(): frame = frame_from_bytes_str(msg.value['data']) original_frame_sequence.append(frame) if counter%self.cube_depth == 0 and counter!=0: # To grayscale frame = frame_to_gray(frame) # Divide pixels by 255 reduced_frame = reduce_frame(frame) frame_sequence.append(reduced_frame) features = generate_features(frame_sequence, self.cube_depth, self.tile_size) result = features.reshape(1, features.shape[0]*features.shape[1]) ##Keep original frames #jpeg encode jpeg_frames = np.array([cv2.imencode('.jpg', x)[1] for x in original_frame_sequence]) origin_frames = frame_to_bytes_str(jpeg_frames) end_frame_number = self.frame_counter source_end_timestamp = msg.value['timestamp'] extra_fields = {'origin_frames':origin_frames, 'start_frame_number': start_frame_number, 'end_frame_number': end_frame_number, 'source_end_timestamp': source_end_timestamp, 'source_start_timestamp': source_start_timestamp} self.producer.send_frame(result, extra_fields=extra_fields) start_frame_number = end_frame_number source_start_timestamp = source_end_timestamp # Reset frame sequences original_frame_sequence = [] frame_sequence = [] counter+=1 self.frame_counter+=1 else: if counter == 0: start_frame_number = self.frame_counter source_start_timestamp = msg.value['timestamp'] counter+=1 self.frame_counter+=1 continue # To grayscale frame = frame_to_gray(frame) # Divide pixels by 255 reduced_frame = reduce_frame(frame) frame_sequence.append(reduced_frame) counter+=1 self.frame_counter+=1
def send_frames(self): counter = 0 frame_sequence = [] original_frame_sequence = [] frame_height, frame_width = self.output_frame_size for msg in self.consumer.get_consumer(): frame = frame_from_bytes_str(msg.value['data']) original_frame_sequence.append(frame) if counter % self.sequence_size == 0 and counter != 0: # Resize frame = cv2.resize(frame, self.output_frame_size) # To grayscale if self.to_grayscale: frame = frame_to_gray(frame) # Divide pixels by 255 reduced_frame = reduce_frame(frame) frame_sequence.append(reduced_frame) #Todo: Think when image is RGB #Todo: Maybe possible with resize, reshape? clip = np.zeros(shape=(self.sequence_size, frame_height, frame_width, 1)) clip[:, :, :, 0] = frame_sequence #Keep original frames #jpeg encode jpeg_frames = np.array([ cv2.imencode('.jpg', x)[1] for x in original_frame_sequence ]) origin_frames = frame_to_bytes_str(jpeg_frames) end_frame_number = self.frame_counter source_end_timestamp = msg.value['timestamp'] extra_fields = { 'origin_frames': origin_frames, 'start_frame_number': start_frame_number, 'end_frame_number': end_frame_number, 'source_end_timestamp': source_end_timestamp, 'source_start_timestamp': source_start_timestamp } #send to producer self.producer.send_frame(clip, extra_fields=extra_fields) start_frame_number = end_frame_number source_start_timestamp = source_end_timestamp # Reset frame sequences original_frame_sequence = [] frame_sequence = [] counter += 1 self.frame_counter += 1 else: if counter == 0: start_frame_number = self.frame_counter source_start_timestamp = msg.value['timestamp'] counter += 1 self.frame_counter += 1 continue # Resize frame = cv2.resize(frame, self.output_frame_size) # To grayscale if self.to_grayscale: frame = frame_to_gray(frame) # Divide pixels by 255 reduced_frame = reduce_frame(frame) frame_sequence.append(reduced_frame) counter += 1 self.frame_counter += 1
def get_one_image(self): for msg in self.consumer: return frame_from_bytes_str(msg['data'])