def trim_by_points(file: str,
                   start_time: int,
                   end_time: int,
                   factor: str = 's') -> str:
  """Trim by starting minute OR starting seconds."""
  idx = 1
  start_time = int(start_time)
  end_time = int(end_time)

  _factor = 1 if factor == 's' else 60
  total_limit = int(duration(file) / _factor)

  if factor == 'p':
    start_time = int((start_time / 100) * total_limit)
    end_time = int((end_time / 100) * total_limit)
    total_limit = 100

  if end_time < start_time:
    raise Exception('Ending time is less than starting time.')
  else:
    if end_time >= total_limit:
      if factor == 'p':
        print('Video doesn\'t have frame to process.')
      else:
        print('Video doesn\'t have frames to process and will max out.')
      end_time = total_limit
    elif start_time < 0:
      print('Start should be greater than 0.')
      start_time = 0
    trim_video(file, filename(file, idx), start_time * _factor,
               end_time * _factor)
  return filename(file, idx)
def trim_sub_sample(file: str,
                    start_time: str,
                    end_time: str,
                    sample_start_time: str,
                    sample_end_time: str,
                    timestamp_format: str = '%H:%M:%S') -> str:
  """Trims sample of the video based on provided timestamp."""
  trim_duration = calculate_duration(sample_start_time, sample_end_time)
  _start_time = datetime.strptime(start_time, timestamp_format)
  _start_time = int(_start_time.strftime('%s'))
  _sample_start_time = datetime.strptime(sample_start_time, timestamp_format)
  _sample_start_time = int(_sample_start_time.strftime('%s'))
  _end_time = datetime.strptime(end_time, timestamp_format)
  _end_time = int(_end_time.strftime('%s'))
  _sample_end_time = datetime.strptime(sample_end_time, timestamp_format)
  _sample_end_time = int(_sample_end_time.strftime('%s'))
  idx = 1
  if duration(file) < trim_duration:
    trim_duration = duration(file)
  if _sample_start_time < _start_time:
    start = 0
  else:
    start = int(_sample_start_time - _start_time)
  if _sample_end_time < _end_time:
    end = int(start + trim_duration)
  else:
    end = duration(file)
  trim_video(file, filename(file, idx), start, end)
  return filename(file, idx)
def trim_num_parts(file: str,
                   num_parts: int,
                   equal_distribution: bool = False,
                   clip_length: Union[float, int, str] = 30,
                   random_start: bool = True,
                   random_sequence: bool = True) -> Optional[List]:
  """Trim video in number of equal parts.
  Trims the video as per the number of clips required.
  Args:
    file: File to be used for trimming.
    num_parts: Number of videos to be trimmed into.
    codec: Codec (default: libx264 -> .mp4) to be used while trimming.
    bitrate: Bitrate (default: min. 400) used while trimming.
    fps: FPS (default: 24) of the trimmed video clips.
    audio: Boolean (default: False) value to have audio in trimmed
            videos.
    preset: The speed (default: ultrafast) used for applying the
            compression technique on the trimmed videos.
    threads: Number of threads (default: 15) to be used for trimming.
    verbose: Boolean (default: False) value to display the status.
    return_list: Boolean (default: True) value to return list of all the
                 trimmed files.
  """
  num_parts = int(num_parts)
  clip_length = int(clip_length)
  split_part = duration(file) / num_parts
  start = 0
  # Start splitting the videos into 'num_parts' equal parts.
  video_list = []
  for idx in range(1, num_parts + 1):
    start, end = start, start + split_part
    trim_video(file, filename(file, idx), start, end)
    start += split_part
    video_list.append(filename(file, idx))
  if equal_distribution:
    for file in video_list:
      if clip_length <= split_part:
        start, end = 0, clip_length
        if random_start:
          start = random.randint(1, int(duration(file)))
          end = start + clip_length
        file, temp = quick_rename(file)
        trim_video(temp, file, start, end)
        time.sleep(2.0)
  if random_sequence:
    return random.shuffle(video_list)
  else:
    return video_list
def trim_by_factor(file: str,
                   factor: str = 's',
                   clip_length: Union[float, int, str] = 30,
                   last_clip: bool = True) -> List:
  """Trims the video by deciding factor.
  Trims the video as per the deciding factor i.e. trim by mins OR trim
  by secs.
  Args:
    file: File to be used for trimming.
    factor: Trimming factor (default: secs -> s) to consider.
    clip_length: Length (default: 30) of each video clip.
    last_clip: Boolean (default: True) value to consider the remaining
               portion of the trimmed video.
    codec: Codec (default: libx264 -> .mp4) to be used while trimming.
    bitrate: Bitrate (default: min. 400) used while trimming.
    fps: FPS (default: 24) of the trimmed video clips.
    audio: Boolean (default: False) value to have audio in trimmed
            videos.
    preset: The speed (default: ultrafast) used for applying the
            compression technique on the trimmed videos.
    threads: Number of threads (default: 15) to be used for trimming.
  """
  clip_length = int(clip_length)
  total_length = duration(file)
  video_list = []
  idx = 1
  if factor == 'm':
    start, end, clip_length = 0, clip_length * 60, clip_length * 60
  else:
    start, end = 0, clip_length
  while clip_length < total_length:
    trim_video(file, filename(file, idx), start, end)
    video_list.append(filename(file, idx))
    start, end, idx = end, end + clip_length, idx + 1
    total_length -= clip_length
  else:
    if last_clip:
      start, end = (duration(file) - total_length), duration(file)
      trim_video(file, filename(file, idx), start, end)
      video_list.append(filename(file, idx))
  return video_list
def start_live_recording(bucket_name: str,
                         order_name: str,
                         start_time: str,
                         end_time: str,
                         camera_address: str,
                         camera_username: str = 'xames3',
                         camera_password: str = 'iamironman',
                         camera_port: Union[int, str] = 554,
                         camera_timeout: Union[float, int] = 30.0,
                         timestamp_format: str = '%H:%M:%S',
                         log: logging.Logger = None) -> Optional[str]:
  """Saves videos based on time duration."""
  log = _log(__file__) if log is None else log
  run_date = datetime.now().strftime('%Y-%m-%d')
  start_time, end_time = f'{run_date} {start_time}', f'{run_date} {end_time}'
  duration = calculate_duration(start_time, end_time, timestamp_format, True)
  force_close = datetime.strptime(
    end_time, '%Y-%m-%d %H:%M:%S').replace(tzinfo=timezone.utc).timestamp()
  vid_type = video_type(True, True, True)
  temp_path = os.path.join(live,
                           f'{bucket_name}{order_name}_{timestamp_dirname()}')
  if not os.path.isdir(temp_path):
    os.mkdir(temp_path)
  temp_file = os.path.join(temp_path,
                           f'{bucket_name}{order_name}{vid_type}.mp4')
  url = configure_camera_url(camera_address, camera_username,
                             camera_password, int(camera_port))
  slept_duration, idx = 0, 1
  if duration != 0:
    try:
      while True:
        if camera_live(camera_address, camera_port, camera_timeout, log):
          file = filename(temp_file, idx)
          log.info('Recording started for selected camera.')
          os.system(ffmpeg_str(url, file, duration, camera_timeout))
          stop_utc = now().replace(tzinfo=timezone.utc).timestamp()
          stop_secs = now().second
          _old_file = file_size(file)
          old_duration = stop_secs if _old_file == '300.0 bytes' else drn(file)
          duration = duration - old_duration - slept_duration
          slept_duration = 0
          idx += 1
          if (force_close <= stop_utc) or (duration <= 0):
            output = concate_videos(temp_path, delete_old_files=True)
            if output:
              return output
        else:
          log.warning('Unable to record because of poor network connectivity.')
          slept_duration += camera_timeout
          log.warning('Compensating lost time & attempting after 30 secs.')
          time.sleep(camera_timeout)
    except Exception as error:
      log.critical(f'Something went wrong because of {error}')
def save_num_video(bucket_name: str,
                   order_name: str,
                   duration: Union[float, int, str],
                   num_of_clips: int,
                   camera_address: str,
                   camera_username: str = 'admin',
                   camera_password: str = 'iamironman') -> None:
  """Saves "N" number of live video streams."""
  vid_type = video_type(True, True, True)
  file = os.path.join(live, f'{bucket_name}{order_name}{vid_type}.mp4')
  for idx in range(1, num_of_clips + 1):
    file = filename(file, idx)
    url = configure_camera_url(
        camera_address, camera_username, camera_password)
    subprocess.check_call(ffmpeg_str(url, file, duration), shell=True)
def redact_faces(file: str,
                 use_ml_model: bool = True,
                 smooth_blur: bool = True,
                 resize: bool = True,
                 resize_width: int = 640,
                 debug_mode: bool = True,
                 log: logging.Logger = None) -> Optional[str]:
    """Apply face redaction in video using CaffeModel."""
    log = _log(__file__) if log is None else log

    x0, y0, x1, y1 = 0, 0, 0, 0
    boxes, temp_csv_entries = [], []
    face_count = {}

    directory = os.path.join(os.path.dirname(file), f'{Path(file).stem}')

    if not os.path.isdir(directory):
        os.mkdir(directory)

    temp_file = os.path.join(directory, f'{Path(file).stem}_redact.mp4')

    if debug_mode:
        log.info('Debug mode - Enabled.')

    log.info(f'Redacting faces from "{os.path.basename(file)}".')

    try:
        stream = cv2.VideoCapture(file)
        fps = stream.get(cv2.CAP_PROP_FPS)
        width, height = (int(stream.get(cv2.CAP_PROP_FRAME_WIDTH)),
                         int(stream.get(cv2.CAP_PROP_FRAME_HEIGHT)))

        if resize:
            width, height = resize_width, int(height *
                                              (resize_width / float(width)))

        save = cv2.VideoWriter(filename(temp_file, 1),
                               cv2.VideoWriter_fourcc(*'mp4v'), fps,
                               (width, height))

        while True:
            valid_frame, frame = stream.read()

            if not valid_frame:
                break

            if frame is None:
                break

            if resize:
                frame = rescale(frame, resize_width)

            height, width = frame.shape[:2]

            if use_ml_model:
                rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                faces = face_detector.detect_faces(rgb)

                for face_idx in faces:
                    # Considering detections which have confidence score higher than the
                    # set threshold.
                    if face_idx['confidence'] > 0.75:
                        x0, y0, x1, y1 = face_idx['box']
                        x0, y0 = abs(x0), abs(y0)
                        x1, y1 = x0 + x1, y0 + y1

                        face = frame[y0:y1, x0:x1]

                        if debug_mode:
                            draw_bounding_box(frame, (x0, y0), (x1, y1),
                                              color.red)
                        try:
                            if smooth_blur:
                                frame[y0:y1, x0:x1] = cv2.GaussianBlur(
                                    frame[y0:y1, x0:x1], (21, 21), 0)
                            else:
                                frame[y0:y1, x0:x1] = pixelate(face)
                        except Exception:
                            pass

                    boxes.append([x1, y1])
                    face_occurence = s2d(
                        int(stream.get(cv2.CAP_PROP_POS_MSEC) / 1000))

                    if face_occurence not in face_count.keys():
                        face_count[face_occurence] = []

                    face_count[face_occurence].append(len(boxes))
            else:
                face_cascade = cv2.CascadeClassifier(frontal_haar)
                gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                faces = face_cascade.detectMultiScale(gray_frame, 1.3, 5)

                for (x0, y0, x1, y1) in faces:
                    if debug_mode:
                        draw_bounding_box(frame, (x0, y0), (x0 + x1, y0 + y1),
                                          color.red)
                    try:
                        if smooth_blur:
                            frame[y0:(y0 + y1),
                                  x0:(x0 + x1)] = cv2.GaussianBlur(
                                      frame[y0:(y0 + y1), x0:(x0 + x1)],
                                      (21, 21), 0)
                        else:
                            frame[y0:(y0 + y1),
                                  x0:(x0 + x1)] = pixelate(frame[y0:(y0 + y1),
                                                                 x0:(x0 + x1)])
                    except Exception:
                        pass
                    boxes.append([x1, y1])
                    face_occurence = s2d(
                        int(stream.get(cv2.CAP_PROP_POS_MSEC) / 1000))

                    if face_occurence not in face_count.keys():
                        face_count[face_occurence] = []

                    face_count[face_occurence].append(len(boxes))

            boxes = []
            save.write(frame)

            if debug_mode:
                cv2.imshow('Video Processing Engine - Redaction', frame)

            if cv2.waitKey(1) & 0xFF == int(27):
                break

        stream.release()
        save.release()
        cv2.destroyAllWindows()

        with open(os.path.join(directory, f'{Path(file).stem}.csv'),
                  'a',
                  encoding=dev.DEF_CHARSET) as csv_file:
            log.info('Logging detections into a CSV file.')
            _file = csv.writer(csv_file, quoting=csv.QUOTE_MINIMAL)
            _file.writerow(['Max no. of detections per second', 'Time frame'])
            temp_csv_entries = [(max(v), k) for k, v in face_count.items()]
            _file.writerows(temp_csv_entries)

        log.info('Applying H264 encoding for bypassing browser issues.')
        os.system(
            f'ffmpeg -loglevel error -y -i {filename(temp_file, 1)} -vcodec '
            f'libx264 {temp_file}')

        return temp_file
    except Exception as error:
        log.critical(f'Something went wrong because of {error}')
Example #8
0
def track_motion(file: str,
                 precision: int = 1500,
                 resize: bool = True,
                 resize_width: int = 640,
                 debug_mode: bool = True,
                 log: logging.Logger = None) -> Optional[str]:
    """Track motion in the video using Background Subtraction method."""
    log = _log(__file__) if log is None else log
    kcw = KeyClipWriter(bufSize=32)
    consec_frames, x0, y0, x1, y1 = 0, 0, 0, 0, 0
    boxes, temp_csv_entries = [], []
    directory = os.path.join(os.path.dirname(file), f'{Path(file).stem}')
    if not os.path.isdir(directory):
        os.mkdir(directory)
    temp_file = os.path.join(directory, f'{Path(file).stem}_motion.mp4')
    idx = 1
    if debug_mode:
        log.info('Debug mode - Enabled.')
    log.info(f'Analyzing motion for "{os.path.basename(file)}".')
    try:
        stream = cv2.VideoCapture(file)
        fps = stream.get(cv2.CAP_PROP_FPS)
        first_frame = None
        while True:
            valid_frame, frame = stream.read()
            if not valid_frame:
                break
            if frame is None:
                break
            if resize:
                frame = rescale(frame, resize_width)
            update_frame = True
            gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            gray_frame = cv2.GaussianBlur(gray_frame, (21, 21), 0)
            if first_frame is None:
                first_frame = gray_frame
                continue
            frame_delta = cv2.absdiff(first_frame, gray_frame)
            threshold = cv2.threshold(frame_delta, 25, 255,
                                      cv2.THRESH_BINARY)[1]
            threshold = cv2.dilate(threshold, None, iterations=2)
            contours = cv2.findContours(threshold.copy(), cv2.RETR_EXTERNAL,
                                        cv2.CHAIN_APPROX_SIMPLE)
            contours = imutils.grab_contours(contours)
            for contour in contours:
                if cv2.contourArea(contour) < precision:
                    continue
                if debug_mode:
                    (x0, y0, x1, y1) = cv2.boundingRect(contour)
                    draw_bounding_box(frame, (x0, y0), (x0 + x1, y0 + y1))
                consec_frames = 0
                if not kcw.recording:
                    kcw.start(filename(temp_file, idx),
                              cv2.VideoWriter_fourcc(*'mp4v'), fps)
                    idx += 1
                boxes.append([x1, y1])
                status = motion_meta(len(boxes),
                                     stream.get(cv2.CAP_PROP_POS_MSEC))
                # log.info(status)
                temp_csv_entries.append(status)
            boxes = []
            if update_frame:
                consec_frames += 1
            kcw.update(frame)
            if kcw.recording and consec_frames == 32:
                log.info(
                    'Extracting buffered portion of video with detected motion.'
                )
                kcw.finish()
            if debug_mode:
                cv2.imshow('Video Processing Engine - Motion Detection', frame)
            if cv2.waitKey(1) & 0xFF == int(27):
                disconnect(stream)
        if kcw.recording:
            kcw.finish()
        if len(os.listdir(directory)) < 1:
            return file
        concate_temp = concate_videos(directory, delete_old_files=True)
        with open(os.path.join(directory, f'{Path(file).stem}.csv'),
                  'a',
                  encoding=dev.DEF_CHARSET) as csv_file:
            log.info('Logging detections into a CSV file.')
            _file = csv.writer(csv_file,
                               delimiter='\n',
                               quoting=csv.QUOTE_MINIMAL)
            _file.writerow(temp_csv_entries)
        if concate_temp:
            if os.path.isfile(concate_temp):
                log.info(
                    'Applying H264 encoding for bypassing browser issues.')
                os.system(
                    f'ffmpeg -loglevel error -y -i {concate_temp} -vcodec '
                    f'libx264 {temp_file}')
                log.info('Cleaning up archived files.')
                os.remove(concate_temp)
                return temp_file
    except Exception as error:
        log.critical(f'Something went wrong because of {error}')
Example #9
0
def live(bucket_name: str,
         order_name: str,
         run_date: str,
         start_time: str,
         end_time: str,
         camera_address: str,
         camera_username: str = 'xames3',
         camera_password: str = 'iamironman',
         camera_port: Union[int, str] = 554,
         camera_timeout: Union[float, int, str] = 30.0,
         timestamp_format: str = '%H:%M:%S',
         log: logging.Logger = None) -> Optional[str]:
    """Record live videos based on time duration using FFMPEG.

  Args:
    bucket_name: S3 bucket name.
    order_name: Order name.
    run_date: Date when to record the video.
    start_time: Time when to start recording the video.
    end_time: Time when to stop recording the video.
    camera_address: Camera's IP address.
    camera_username: Camera username.
    camera_password: Camera password.
    camera_port: Camera port number.
    camera_timeout: Maximum time to wait until disconnection occurs.
    timestamp_format: Timestamp for checking the recording start time.
    log: Logger object.
  """
    log = _log(__file__) if log is None else log

    camera_port = int(camera_port)
    camera_timeout = float(camera_timeout)

    start_time, end_time = f'{run_date} {start_time}', f'{run_date} {end_time}'
    duration = calculate_duration(start_time, end_time, timestamp_format, True)
    force_close = datetime.strptime(end_time, '%Y-%m-%d %H:%M:%S')
    force_close = force_close.replace(tzinfo=timezone.utc).timestamp()

    vid_type = video_type(True, True, True)
    temp = os.path.join(_lr, f'{bucket_name}{order_name}')

    if not os.path.isdir(temp):
        os.mkdir(temp)
    temp_file = os.path.join(temp, f'{bucket_name}{order_name}{vid_type}.mp4')

    url = configure_camera_url(camera_address, camera_username,
                               camera_password, camera_port)
    slept_duration, idx = 0, 1

    if duration != 0:
        try:
            while True:
                if camera_live(camera_address, camera_port, camera_timeout,
                               log):
                    file = filename(temp_file, idx)
                    log.info('Recording started for selected camera.')
                    os.system(ffmpeg_str(url, file, duration, camera_timeout))

                    stop_utc = now().replace(tzinfo=timezone.utc).timestamp()
                    stop_secs = now().second

                    _old_file = file_size(file)
                    old_duration = stop_secs if _old_file == '300.0 bytes' else drn(
                        file)
                    duration = duration - old_duration - slept_duration

                    slept_duration = 0
                    idx += 1
                    if (force_close <= stop_utc) or (duration <= 0):
                        output = concate_videos(temp, delete_old_files=True)
                        if output:
                            return output
                else:
                    log.warning(
                        'Unable to record because of poor network connectivity.'
                    )
                    slept_duration += camera_timeout
                    log.warning(
                        'Compensating lost time & attempting after 30 secs.')
                    time.sleep(camera_timeout)
        except Exception as error:
            log.critical(f'Something went wrong because of {error}')