def generate_features_frames_old(frames, cube_depth=5, tile_size=10):
    counter = 0
    frame_sequence = []
    original_frame_sequence = []
    results = []

    for frame in tqdm(frames):
        original_frame_sequence.append(frame)
        if counter % cube_depth == 0 and counter != 0:
            # To grayscale
            frame = frame_to_gray(frame)
            # Divide pixels by 255
            reduced_frame = reduce_frame(frame)
            frame_sequence.append(reduced_frame)

            features = generate_features(frame_sequence, cube_depth, tile_size)
            result = features.reshape(1, features.shape[0] * features.shape[1])
            results.append(result)

            ##Keep original frames
            #jpeg encode
            # jpeg_frames = np.array([cv2.imencode('.jpg', x)[1] for x in original_frame_sequence])
            # origin_frames = frame_to_bytes_str(jpeg_frames)

            # Reset frame sequences
            original_frame_sequence = []
            frame_sequence = []
            counter += 1
        else:
            if counter == 0:
                counter += 1
                continue

            # To grayscale
            frame = frame_to_gray(frame)
            # Divide pixels by 255
            reduced_frame = reduce_frame(frame)
            frame_sequence.append(reduced_frame)
            counter += 1

    return results
def generate_features_frames(frames,
                             cube_depth=5,
                             tile_size=10,
                             description=''):
    results = []
    sequences = list(divide_chunks(frames, cube_depth))

    if len(
            sequences[-1]
    ) != cube_depth:  ## If last sequence does not have sufficient frames, leave it out
        sequences = sequences[0:-1]

    #for sequence in tqdm(sequences, desc=description): # Removing tqdm for now
    for sequence in sequences:
        sequence = [reduce_frame(frame_to_gray(frame)) for frame in sequence]
        features = generate_features(sequence, cube_depth, tile_size)
        result = features.reshape(1, features.shape[0] * features.shape[1])
        results.append(result)

    return results
 def send_frames(self):       
     counter = 0
     frame_sequence = []
     original_frame_sequence = []
   
     for msg in self.consumer.get_consumer():
         
         frame = frame_from_bytes_str(msg.value['data'])
         original_frame_sequence.append(frame)
         
           
         if counter%self.cube_depth == 0 and counter!=0:
             # To grayscale
             frame = frame_to_gray(frame)
             # Divide pixels by 255
             reduced_frame = reduce_frame(frame)
             frame_sequence.append(reduced_frame)
             
             
             features = generate_features(frame_sequence, self.cube_depth, self.tile_size)
             result = features.reshape(1, features.shape[0]*features.shape[1])
             
             
             ##Keep original frames
             #jpeg encode
             jpeg_frames = np.array([cv2.imencode('.jpg', x)[1] for x in original_frame_sequence])
             origin_frames = frame_to_bytes_str(jpeg_frames)
             
             
             end_frame_number = self.frame_counter
             source_end_timestamp = msg.value['timestamp']
             
             extra_fields = {'origin_frames':origin_frames, 
                             'start_frame_number': start_frame_number,
                             'end_frame_number': end_frame_number,
                             'source_end_timestamp': source_end_timestamp, 
                             'source_start_timestamp': source_start_timestamp}
             
             self.producer.send_frame(result, extra_fields=extra_fields) 
             
             start_frame_number = end_frame_number
             source_start_timestamp = source_end_timestamp
             # Reset frame sequences
             original_frame_sequence = []
             frame_sequence = []
             counter+=1
             self.frame_counter+=1
         else:
             if counter == 0:
                 start_frame_number = self.frame_counter
                 source_start_timestamp = msg.value['timestamp']
                 counter+=1
                 self.frame_counter+=1
                 continue
         
             # To grayscale
             frame = frame_to_gray(frame)
             # Divide pixels by 255
             reduced_frame = reduce_frame(frame)
             frame_sequence.append(reduced_frame)
             counter+=1
             self.frame_counter+=1
示例#4
0
 def gaussian_model_frame(self, frame):
     frame = frame_to_gray(frame)
     reduced_frame = reduce_frame(frame)
     return reduced_frame
示例#5
0
    def send_frames(self):
        counter = 0
        frame_sequence = []
        original_frame_sequence = []
        frame_height, frame_width = self.output_frame_size

        for msg in self.consumer.get_consumer():
            frame = frame_from_bytes_str(msg.value['data'])
            original_frame_sequence.append(frame)

            if counter % self.sequence_size == 0 and counter != 0:
                # Resize
                frame = cv2.resize(frame, self.output_frame_size)
                # To grayscale
                if self.to_grayscale:
                    frame = frame_to_gray(frame)
                # Divide pixels by 255
                reduced_frame = reduce_frame(frame)

                frame_sequence.append(reduced_frame)

                #Todo: Think when image is RGB
                #Todo: Maybe possible with resize, reshape?
                clip = np.zeros(shape=(self.sequence_size, frame_height,
                                       frame_width, 1))
                clip[:, :, :, 0] = frame_sequence

                #Keep original frames
                #jpeg encode
                jpeg_frames = np.array([
                    cv2.imencode('.jpg', x)[1] for x in original_frame_sequence
                ])
                origin_frames = frame_to_bytes_str(jpeg_frames)

                end_frame_number = self.frame_counter
                source_end_timestamp = msg.value['timestamp']

                extra_fields = {
                    'origin_frames': origin_frames,
                    'start_frame_number': start_frame_number,
                    'end_frame_number': end_frame_number,
                    'source_end_timestamp': source_end_timestamp,
                    'source_start_timestamp': source_start_timestamp
                }

                #send to producer
                self.producer.send_frame(clip, extra_fields=extra_fields)

                start_frame_number = end_frame_number
                source_start_timestamp = source_end_timestamp
                # Reset frame sequences
                original_frame_sequence = []
                frame_sequence = []
                counter += 1
                self.frame_counter += 1

            else:
                if counter == 0:
                    start_frame_number = self.frame_counter
                    source_start_timestamp = msg.value['timestamp']
                    counter += 1
                    self.frame_counter += 1
                    continue

                # Resize
                frame = cv2.resize(frame, self.output_frame_size)
                # To grayscale
                if self.to_grayscale:
                    frame = frame_to_gray(frame)
                # Divide pixels by 255
                reduced_frame = reduce_frame(frame)

                frame_sequence.append(reduced_frame)
                counter += 1
                self.frame_counter += 1
示例#6
0
 def cnn_autoencoder_frame(self, frame, input_size):
     frame = cv2.resize(frame, input_size)
     frame = frame_to_gray(frame)
     reduced_frame = reduce_frame(frame)
     return reduced_frame