def predict(self, video_file_path): images, preprocessed_images, x = extract_vgg16_features_live(self.vgg16_model, video_file_path) frames = x.shape[0] if frames > self.expected_frames: x = x[0:self.expected_frames, :] images = images[0:self.expected_frames, :] preprocessed_images = preprocessed_images[0:self.expected_frames, :] elif frames < self.expected_frames: temp = np.zeros(shape=(self.expected_frames, x.shape[1])) temp[0:frames, :] = x x = temp shape1 = list(images.shape) shape1[0] = self.expected_frames shape1 = tuple(shape1) temp1 = np.zeros(shape=shape1) temp1[0:frames, :] = images images = temp1 shape2 = list(preprocessed_images.shape) shape2[0] = self.expected_frames shape2 = tuple(shape2) temp2 = np.zeros(shape=shape2) temp2[0:frames, :] = preprocessed_images preprocessed_images = temp2 last_activation = self.model.predict(np.array([x])) predicted_class = np.argmax(last_activation[0]) predicted_label = self.labels_idx2word[predicted_class] return images, preprocessed_images, x, predicted_label, np.max(last_activation[0]), last_activation[0]
def predict(self, video_file_path): images, preprocessed_images, x = extract_vgg16_features_live(self.vgg16_model, video_file_path) frames = x.shape[0] if frames > self.expected_frames: x = x[0:self.expected_frames, :] elif frames < self.expected_frames: temp = np.zeros(shape=(self.expected_frames, x.shape[1])) temp[0:frames, :] = x x = temp predicted_class = np.argmax(self.model.predict(np.array([x]))[0]) predicted_label = self.labels_idx2word[predicted_class] return predicted_label