def start_live_recording(bucket_name: str,
                         order_name: str,
                         start_time: str,
                         end_time: str,
                         camera_address: str,
                         camera_username: str = 'xames3',
                         camera_password: str = 'iamironman',
                         camera_port: Union[int, str] = 554,
                         camera_timeout: Union[float, int] = 30.0,
                         timestamp_format: str = '%H:%M:%S',
                         log: logging.Logger = None) -> Optional[str]:
  """Saves videos based on time duration."""
  log = _log(__file__) if log is None else log
  run_date = datetime.now().strftime('%Y-%m-%d')
  start_time, end_time = f'{run_date} {start_time}', f'{run_date} {end_time}'
  duration = calculate_duration(start_time, end_time, timestamp_format, True)
  force_close = datetime.strptime(
    end_time, '%Y-%m-%d %H:%M:%S').replace(tzinfo=timezone.utc).timestamp()
  vid_type = video_type(True, True, True)
  temp_path = os.path.join(live,
                           f'{bucket_name}{order_name}_{timestamp_dirname()}')
  if not os.path.isdir(temp_path):
    os.mkdir(temp_path)
  temp_file = os.path.join(temp_path,
                           f'{bucket_name}{order_name}{vid_type}.mp4')
  url = configure_camera_url(camera_address, camera_username,
                             camera_password, int(camera_port))
  slept_duration, idx = 0, 1
  if duration != 0:
    try:
      while True:
        if camera_live(camera_address, camera_port, camera_timeout, log):
          file = filename(temp_file, idx)
          log.info('Recording started for selected camera.')
          os.system(ffmpeg_str(url, file, duration, camera_timeout))
          stop_utc = now().replace(tzinfo=timezone.utc).timestamp()
          stop_secs = now().second
          _old_file = file_size(file)
          old_duration = stop_secs if _old_file == '300.0 bytes' else drn(file)
          duration = duration - old_duration - slept_duration
          slept_duration = 0
          idx += 1
          if (force_close <= stop_utc) or (duration <= 0):
            output = concate_videos(temp_path, delete_old_files=True)
            if output:
              return output
        else:
          log.warning('Unable to record because of poor network connectivity.')
          slept_duration += camera_timeout
          log.warning('Compensating lost time & attempting after 30 secs.')
          time.sleep(camera_timeout)
    except Exception as error:
      log.critical(f'Something went wrong because of {error}')
def trigger_utc_capture(bucket_name: str,
                        order_name: str,
                        start_time: str,
                        end_time: str,
                        camera_timezone: str,
                        camera_address: str,
                        camera_username: str = 'xames3',
                        camera_password: str = 'iamironman',
                        camera_port: Union[int, str] = 554,
                        camera_timeout: Union[float, int] = 30.0,
                        timestamp_format: str = '%H:%M:%S',
                        log: logging.Logger = None) -> str:
  """Starts video recording as per the triggering point."""
  log = _log(__file__) if log is None else log
  run_date = datetime.now().strftime('%Y-%m-%d')
  _start_time = f'{run_date} {start_time}'
  _start_time = datetime_to_utc(_start_time,
                                camera_timezone,
                                '%Y-%m-%d %H:%M:%S')
  log.info('Video processing engine is scheduled to start '
           f'recording at {_start_time}.')
  while True:
    if str(now()) == str(_start_time):
      log.info('Video processing engine has started recording.')
      recorded_file =  start_live_recording(bucket_name, order_name, start_time,
                                            end_time, camera_address,
                                            camera_username, camera_password,
                                            camera_port, camera_timeout,
                                            timestamp_format, log)
      log.info('Video processing engine has stopped recording.')
      if recorded_file is None:
        return 'RecordingError'
      else:
        return recorded_file
    time.sleep(1.0)
Beispiel #3
0
def compute(json_obj: Union[bytes, str]):
    try:
        json_data = json.loads(json_obj)
        scheduled = json_data.get('schedule_download', False)
        if scheduled:
            scheduled_time = f'{json_data["start_date"]} {json_data["start_time"]}'
            sleep_interval = datetime_to_utc(scheduled_time,
                                             json_data["camera_timezone"],
                                             '%Y-%m-%d %H:%M:%S')
            sleep_interval = datetime.strptime(scheduled_time,
                                               '%Y-%m-%d %H:%M')
            sleep_interval -= now()
            if sleep_interval.seconds <= 0:
                log.error('Scheduled time has passed already.')
                return None
            log.info(
                'Video is scheduled for downloading, the process will suspend '
                f'for {s2d(int(sleep_interval.seconds))}.')
            time.sleep(1.0 + sleep_interval.seconds)
        log.info('Initiate video download.')
        if json_data.get('access_type', None) == 'GCP':
            log.info('Download file via Google Drive.')
            download_from_google_drive(json_data.get('g_url', None),
                                       json_data.get('stored_filename', None),
                                       log)
        elif json_data.get('access_type', None) == 'Microsoft':
            log.info('Download file via Microsoft Azure.')
            download_from_azure(json_data.get('azure_account_name', None),
                                json_data.get('azure_account_key', None),
                                json_data.get('azure_container_name', None),
                                json_data.get('azure_blob_name', None),
                                json_data.get('stored_filename', None), log)
        elif json_data.get('access_type', None) == 'FTP':
            log.info('Transfer file via FTP.')
            download_using_ftp(json_data.get('remote_username', None),
                               json_data.get('remote_password', None),
                               json_data.get('remote_public_address', None),
                               json_data.get('remote_file', None),
                               json_data.get('stored_filename', None), log)
        elif json_data.get('access_type', None) == 'S3':
            log.info('Download file via Amazon S3 storage.')
            access_file_update(json_data.get('s3_access_key', None),
                               json_data.get('s3_secret_key', None),
                               json_data.get('s3_url', None),
                               json_data.get('stored_filename', None), log,
                               json_data.get('s3_bucket_name', None))
        elif json_data.get('access_type', None) == 'FTP TOOL':
            log.info('Transfer file via TeamViewer (FTP Tool).')
            os.path.join(downloads, json_data.get('stored_filename', None))
    except Exception as error:
        log.exception(error)
def trigger_live_capture(bucket_name: str,
                         order_name: str,
                         start_time: str,
                         end_time: str,
                         camera_address: str,
                         camera_username: str = 'xames3',
                         camera_password: str = 'iamironman',
                         camera_port: Union[int, str] = 554,
                         camera_timeout: Union[float, int] = 30.0,
                         timestamp_format: str = '%H:%M:%S',
                         log: logging.Logger = None) -> Optional[str]:
  """Starts video recording as per the triggering point."""
  log = _log(__file__) if log is None else log
  run_date = datetime.now().strftime('%Y-%m-%d')
  _start_time = f'{run_date} {start_time}'
  while True:
    if str(now()) >= _start_time:
      return start_live_recording(bucket_name, order_name, start_time,
                                  end_time, camera_address, camera_username,
                                  camera_password, camera_port, camera_timeout,
                                  timestamp_format, log)
    time.sleep(1.0)
Beispiel #5
0
def live(bucket_name: str,
         order_name: str,
         run_date: str,
         start_time: str,
         end_time: str,
         camera_address: str,
         camera_username: str = 'xames3',
         camera_password: str = 'iamironman',
         camera_port: Union[int, str] = 554,
         camera_timeout: Union[float, int, str] = 30.0,
         timestamp_format: str = '%H:%M:%S',
         log: logging.Logger = None) -> Optional[str]:
    """Record live videos based on time duration using FFMPEG.

  Args:
    bucket_name: S3 bucket name.
    order_name: Order name.
    run_date: Date when to record the video.
    start_time: Time when to start recording the video.
    end_time: Time when to stop recording the video.
    camera_address: Camera's IP address.
    camera_username: Camera username.
    camera_password: Camera password.
    camera_port: Camera port number.
    camera_timeout: Maximum time to wait until disconnection occurs.
    timestamp_format: Timestamp for checking the recording start time.
    log: Logger object.
  """
    log = _log(__file__) if log is None else log

    camera_port = int(camera_port)
    camera_timeout = float(camera_timeout)

    start_time, end_time = f'{run_date} {start_time}', f'{run_date} {end_time}'
    duration = calculate_duration(start_time, end_time, timestamp_format, True)
    force_close = datetime.strptime(end_time, '%Y-%m-%d %H:%M:%S')
    force_close = force_close.replace(tzinfo=timezone.utc).timestamp()

    vid_type = video_type(True, True, True)
    temp = os.path.join(_lr, f'{bucket_name}{order_name}')

    if not os.path.isdir(temp):
        os.mkdir(temp)
    temp_file = os.path.join(temp, f'{bucket_name}{order_name}{vid_type}.mp4')

    url = configure_camera_url(camera_address, camera_username,
                               camera_password, camera_port)
    slept_duration, idx = 0, 1

    if duration != 0:
        try:
            while True:
                if camera_live(camera_address, camera_port, camera_timeout,
                               log):
                    file = filename(temp_file, idx)
                    log.info('Recording started for selected camera.')
                    os.system(ffmpeg_str(url, file, duration, camera_timeout))

                    stop_utc = now().replace(tzinfo=timezone.utc).timestamp()
                    stop_secs = now().second

                    _old_file = file_size(file)
                    old_duration = stop_secs if _old_file == '300.0 bytes' else drn(
                        file)
                    duration = duration - old_duration - slept_duration

                    slept_duration = 0
                    idx += 1
                    if (force_close <= stop_utc) or (duration <= 0):
                        output = concate_videos(temp, delete_old_files=True)
                        if output:
                            return output
                else:
                    log.warning(
                        'Unable to record because of poor network connectivity.'
                    )
                    slept_duration += camera_timeout
                    log.warning(
                        'Compensating lost time & attempting after 30 secs.')
                    time.sleep(camera_timeout)
        except Exception as error:
            log.critical(f'Something went wrong because of {error}')
def phase_one(json_obj: str, run_date: str, curr: datetime,
              log: logging.Logger) -> None:
    """Just Phase One."""
    try:
        start = now()
        # upload, junk, trimmed, urls = [], [], [], []
        org_file = None
        # report = os.path.join(reports, '{}.csv')

        json_data = json.loads(json_obj)
        log.info('Parsed consumer JSON request.')

        country = json_data.get('country_code', 'xa')
        customer = json_data.get('customer_id', 0)
        contract = json_data.get('contract_id', 0)
        order = json_data.get('order_id', 0)
        store = json_data.get('store_id', 0)
        area = json_data.get('area_code', 'e')
        camera = json_data.get('camera_id', 0)
        use_stored = json_data.get('use_stored', False)
        start_time = json_data['start_time']
        end_time = json_data['end_time']
        address = json_data['camera_address']
        username = json_data.get('camera_username', 'admin')
        password = json_data['camera_password']
        port = json_data.get('camera_port', 554)
        timeout = (json_data.get('camera_timeout', 30.0))
        timestamp = json_data.get('timestamp_format', '%H:%M:%S')
        sampling_rate = json_data['sampling_rate']
        motion = json_data.get('analyze_motion', False)
        face = json_data.get('analyze_face', False)
        compress = json_data.get('perform_compression', True)
        trim = json_data.get('perform_trimming', True)
        trimpress = json_data.get('trim_compressed', True)
        db_order = json_data.get('order_pk', 0)

        log.info(
            f'Video processing engine started spinning for camera #{camera}')

        bucket = bucket_name(country, customer, contract, order, log)
        order = order_name(store, area, camera, curr, log)

        if use_stored:
            dl_file = json_data['sub_json']['stored_filename']
            org_file = os.path.join(downloads, f'{dl_file}.mp4')
            log.info('Using downloaded video for this order.')

            if not os.path.isfile(org_file):
                log.error('File not selected for processing.')
                raise Exception('[e] File not selected for processing.')

        else:
            log.info(f'Recording from camera #{camera} for this order.')
            org_file = live(bucket, order, run_date, start_time, end_time,
                            address, username, password, port, timeout,
                            timestamp, log)

            # if org_file:
            #   cloned = rename_original_file(org_file, bucket, order)
            #   temp = cloned

            #   log.info('Created backup of the original video.')
            #   # TODO(xames3): Add code to move this file to AWS Glacier.
            #   archived = create_copy(cloned)

            #   log.info('Commencing core processes, estimated time of completion is '
            #            f'{ctc(cloned, sampling_rate)}.')

            #   if motion:
            #     cloned = track_motion(cloned, log=log, debug_mode=False)

            #     if not cloned:
            #       cloned = archived

            #     log.info('Fixing up the symbolic link of the motion detected video.')
            #     shutil.move(cloned, temp)
            #     log.info('Symbolic link has been restored for motion detected video.')
            #     cloned = temp
            #   else:
            #     log.info('Skipping motion analysis.')

            #   log.info(f'Randomly sampling {sampling_rate}% of the original video.')

            #   temp = trim_sample_section(temp, sampling_rate)
            #   junk.append(temp)

            #   if face:
            #     temp = cloned
            #     cloned = redact_faces(cloned, log=log, debug_mode=False)

            #     if not cloned:
            #       cloned = archived

            #     log.info('Fixing up the symbolic link of the redacted video.')
            #     shutil.move(cloned, temp)
            #     log.info('Symbolic link has been restored for the redacted video.')
            #     cloned = temp
            #   else:
            #     log.info('Skipping face redaction.')

            #   if not trim:
            #     trimpress = False

            #   log.info('Renaming original video as per internal nomenclature.')
            #   final = rename_aaaa_file(cloned, video_type(compress, trim, trimpress))
            #   upload.append(final)

            #   if compress:
            #     log.info('Compressing video as required.')
            #     final = compress_video(final, log)

            #     if trimpress:
            #       trimmed = trimming_callable(json_data, final, log)

            #   elif trim:
            #     trimmed = trimming_callable(json_data, final, log)

            #   if trimmed:
            #     upload.extend(trimmed)

            #   try:
            #     create_s3_bucket('AKIAR4DHCUP262T3WIUX',
            #                      'B2ii3+34AigsIx0wB1ZU01WLNY6DYRbZttyeTo+5',
            #                      bucket, log)
            #   except Exception:
            #     pass

            #   log.info('Uploading video to the S3 bucket.')
            #   for idx, file in enumerate(upload):
            #     url = upload_to_bucket('AKIAR4DHCUP262T3WIUX',
            #                            'B2ii3+34AigsIx0wB1ZU01WLNY6DYRbZttyeTo+5',
            #                            bucket, file, log)
            #     urls.append(url)
            #     log.info(f'Uploaded {idx + 1}/{len(upload)} > '
            #              f'{os.path.basename(file)} on to S3 bucket.')

            #   log.info('Exporting public URLs.')
            #   with open(report.format(bucket), 'a', encoding=dev.DEF_CHARSET) as _csv:
            #     writer(_csv, delimiter='\n', quoting=QUOTE_MINIMAL).writerow(urls)

            #   junk.extend(upload)

            #   # smash_db(db_order, upload, urls)
            #   log.info('Written values into the database.')

            #   log.info('Cleaning up the directory.')
            #   for idx, file in enumerate(junk):
            #     os.remove(file)
            #     log.warning(f'Removed file {idx + 1}/{len(junk)} > '
            #                 f'{os.path.basename(file)} from current machine.')

            log.info(f'Processing this order took around {now() - start}.')
    except KeyboardInterrupt:
        log.error('Spinner interrupted.')
    except Exception as error:
        log.exception(error)
        log.critical(
            'Something went wrong while video processing was running.')
async def spin(json_obj: Union[bytes, str], log: logging.Logger) -> None:
    """Spin the Video processing engine."""
    try:
        start = now()
        upload_list, temp_list, trim_upload, urls = [], [], [], []
        original_file = None
        # log.info('Video processing engine started spinning.')
        json_data = json.loads(json_obj)
        log.info(
            f"Video processing engine started spinning for camera {json_data.get('camera_id', 0)}."
        )
        log.info('Parsed consumer JSON request.')
        bucket = bucket_name(json_data.get('country_code', 'xa'),
                             json_data.get('customer_id', 0),
                             json_data.get('contract_id', 0),
                             json_data.get('order_id', 0), log)
        order = order_name(json_data.get('store_id', 0),
                           json_data.get('area_code', 'e'),
                           json_data.get('camera_id', 0), start, log)
        use_stored = json_data.get('use_stored', False)
        if use_stored:
            stored_filename = json_data['sub_json']['stored_filename']
            original_file = os.path.join(downloads, f'{stored_filename}.mp4')
            log.info('Using downloaded video for this order.')
            if not os.path.isfile(original_file):
                log.error('File not selected for processing.')
                raise Exception('[e] File not selected for processing.')
        else:
            log.info('Recording from live camera for this order.')
            trigger = trigger_utc_capture
            original_file = trigger(
                bucket, order, json_data['start_time'], json_data['end_time'],
                json_data.get('camera_timezone',
                              'UTC'), json_data['camera_address'],
                json_data.get('camera_username', 'admin'),
                json_data['camera_password'],
                int(json_data.get('camera_port', 554)),
                float(json_data.get('camera_timeout', 30.0)),
                json_data.get('timestamp_format', '%H:%M:%S'), log)
        cloned_file = rename_original_file(original_file, bucket, order)
        temp_file = str(cloned_file)
        log.info('Created backup of the original video.')
        # TODO(xames3): Add code to move this file to AWS Glacier.
        archived_file = create_copy(cloned_file)
        sampling_rate = float(json_data['sampling_rate'])
        log.info('Commencing core processes, estimated time of completion is '
                 f'{completion_time_calculator(cloned_file, sampling_rate)}.')
        if json_data.get('analyze_motion', False):
            cloned_file = track_motion(cloned_file, log=log, debug_mode=False)
            log.info(
                'Fixing up the symbolic link of the motion detected video.')
            shutil.move(cloned_file, temp_file)
            log.info(
                'Symbolic link has been restored for the motion detected video.'
            )
            cloned_file = temp_file
        else:
            log.info('Skipping motion analysis.')
        log.info(f'Randomly sampling {sampling_rate}% of the original video.')
        temp = trim_sample_section(temp_file, sampling_rate)
        temp_list.append(temp)
        if json_data.get('analyze_face', False):
            temp_file = str(cloned_file)
            cloned_file = redact_faces(cloned_file, log=log, debug_mode=False)
            log.info('Fixing up the symbolic link of the redacted video.')
            shutil.move(cloned_file, temp_file)
            log.info('Symbolic link has been restored for the redacted video.')
            cloned_file = temp_file
        else:
            log.info('Skipping face redaction.')
        perform_compression = json_data.get('perform_compression', True)
        perform_trimming = json_data.get('perform_trimming', True)
        if perform_trimming:
            trim_compressed = json_data.get('trim_compressed', True)
        else:
            trim_compressed = False
        log.info('Renaming original video as per internal nomenclature.')
        final_file = rename_aaaa_file(
            cloned_file,
            video_type(perform_compression, perform_trimming, trim_compressed))
        upload_list.append(final_file)
        if perform_compression:
            log.info('Compressing video as required.')
            final_file = compress_video(final_file, log)
            if trim_compressed:
                trim_upload = trimming_callable(json_data, final_file, log)
        elif perform_trimming:
            trim_upload = trimming_callable(json_data, final_file, log)
        upload_list.extend(trim_upload)
        try:
            create_s3_bucket('AKIAR4DHCUP262T3WIUX',
                             'B2ii3+34AigsIx0wB1ZU01WLNY6DYRbZttyeTo+5',
                             bucket, log)
            log.info('Created bucket on Amazon S3 for this order.')
        except Exception:
            pass
        log.info('Uploading video to the S3 bucket.')
        for idx, file in enumerate(upload_list):
            url = upload_to_bucket('AKIAR4DHCUP262T3WIUX',
                                   'B2ii3+34AigsIx0wB1ZU01WLNY6DYRbZttyeTo+5',
                                   bucket, file, log)
            urls.append(url)
            log.info(f'Uploaded {idx + 1}/{len(upload_list)} > '
                     f'{os.path.basename(file)} on to S3 bucket.')
        log.info('Exporting public URLs.')
        with open(os.path.join(reports, f'{bucket}.csv'),
                  'a',
                  encoding=dev.DEF_CHARSET) as csv_file:
            _file = csv.writer(csv_file,
                               delimiter='\n',
                               quoting=csv.QUOTE_MINIMAL)
            _file.writerow(urls)
        temp_list.extend(upload_list)
        smash_db(json_data.get('order_pk', 0), upload_list, urls)
        log.info('Written values into the database.')
        log.info('Cleaning up the directory.')
        for idx, file in enumerate(temp_list):
            os.remove(file)
            log.warning(f'Removed file {idx + 1}/{len(temp_list)} > '
                        f'{os.path.basename(file)} from current machine.')
        log.info('Total time taken for processing this order was '
                 f'{now() - start}.')
    except KeyboardInterrupt:
        log.error('Video processing engine interrupted.')
        exit(0)
    except Exception as error:
        log.exception(error)
        log.critical(
            'Something went wrong while video processing was running.')