Beispiel #1
0
def runCapture(config, duration=None, video_file=None, nodetect=False, detect_end=False, \
    upload_manager=None, resume_capture=False):
    """ Run capture and compression for the given time.given
    
    Arguments:
        config: [config object] Configuration read from the .config file.

    Keyword arguments:
        duration: [float] Time in seconds to capture. None by default.
        video_file: [str] Path to the video file, if it was given as the video source. None by default.
        nodetect: [bool] If True, detection will not be performed. False by defualt.
        detect_end: [bool] If True, detection will be performed at the end of the night, when capture
            finishes. False by default.
        upload_manager: [UploadManager object] A handle to the UploadManager, which handles uploading files to
            the central server. None by default.
        resume_capture: [bool] Resume capture in the last data directory in CapturedFiles.

    Return:
        night_archive_dir: [str] Path to the archive folder of the processed night.
    """

    global STOP_CAPTURE

    # Check if resuming capture to the last capture directory
    night_data_dir_name = None
    if resume_capture:

        log.info("Resuming capture in the last capture directory...")

        # Find the latest capture directory
        capturedfiles_path = os.path.join(os.path.abspath(config.data_dir),
                                          config.captured_dir)
        most_recent_dir_time = 0
        for dir_name in sorted(os.listdir(capturedfiles_path)):

            dir_path_check = os.path.join(capturedfiles_path, dir_name)

            # Check it's a directory
            if os.path.isdir(dir_path_check):

                # Check if it starts with the correct station code
                if dir_name.startswith(str(config.stationID)):

                    dir_mod_time = os.path.getmtime(dir_path_check)

                    # Check that it is the most recent directory
                    if (night_data_dir_name is None) or (dir_mod_time >
                                                         most_recent_dir_time):
                        night_data_dir_name = dir_name
                        night_data_dir = dir_path_check
                        most_recent_dir_time = dir_mod_time

        if night_data_dir_name is None:
            log.info(
                "Previous capture directory could not be found! Creating a new one..."
            )

        else:
            log.info("Previous capture directory found: {:s}".format(
                night_data_dir))

        # Resume run is finished now, reset resume flag
        cml_args.resume = False

    # Make a name for the capture data directory
    if night_data_dir_name is None:

        # Create a directory for captured files
        night_data_dir_name = str(config.stationID) + '_' \
            + datetime.datetime.utcnow().strftime('%Y%m%d_%H%M%S_%f')

        # Full path to the data directory
        night_data_dir = os.path.join(os.path.abspath(config.data_dir), config.captured_dir, \
            night_data_dir_name)

    # Wait before the capture starts if a time has been given
    if (not resume_capture) and (video_file is None):
        log.info("Waiting {:d} seconds before capture start...".format(
            int(config.capture_wait_seconds)))
        time.sleep(config.capture_wait_seconds)

    # Add a note about Patreon supporters
    print("################################################################")
    print("Thanks to our Patreon supporters in the 'Dinosaur Killer' class:")
    print("- Myron Valenta")
    print("https://www.patreon.com/globalmeteornetwork")
    print("\n\n\n" \
        + "       .:'       .:'        .:'       .:'  \n"\
        + "   _.::'     _.::'      _.::'     _.::'    \n"\
        + "  (_.'      (_.'       (_.'      (_.'      \n"\
        + "                         __                \n"\
        + "                        / _)               \n"\
        + "_\\/_          _/\\/\\/\\_/ /             _\\/_ \n"\
        + "/o\\         _|         /              //o\\ \n"\
        + " |         _|  (  | (  |                |  \n"\
        + "_|____    /__.-'|_|--|_|          ______|__\n")
    print("################################################################")

    # Make a directory for the night
    mkdirP(night_data_dir)

    log.info('Data directory: ' + night_data_dir)

    # Copy the used config file to the capture directory
    if os.path.isfile(config.config_file_name):
        try:
            shutil.copy2(config.config_file_name,
                         os.path.join(night_data_dir, ".config"))
        except:
            log.error("Cannot copy the config file to the capture directory!")

    # Get the platepar file
    platepar, platepar_path, platepar_fmt = getPlatepar(config, night_data_dir)

    # If the platepar is not none, set the FOV from it
    if platepar is not None:
        config.fov_w = platepar.fov_h
        config.fov_h = platepar.fov_v

    log.info('Initializing frame buffers...')
    ### For some reason, the RPi 3 does not like memory chunks which size is the multipier of its L2
    ### cache size (512 kB). When such a memory chunk is provided, the compression becomes 10x slower
    ### then usual. We are applying a dirty fix here where we just add an extra image row and column
    ### if such a memory chunk will be created. The compression is performed, and the image is cropped
    ### back to its original dimensions.
    array_pad = 0

    # Check if the image dimensions are divisible by RPi3 L2 cache size and add padding
    if (256 * config.width * config.height) % (512 * 1024) == 0:
        array_pad = 1

    # Init arrays for parallel compression on 2 cores
    sharedArrayBase = multiprocessing.Array(
        ctypes.c_uint8,
        256 * (config.width + array_pad) * (config.height + array_pad))
    sharedArray = np.ctypeslib.as_array(sharedArrayBase.get_obj())
    sharedArray = sharedArray.reshape(256, (config.height + array_pad),
                                      (config.width + array_pad))
    startTime = multiprocessing.Value('d', 0.0)

    sharedArrayBase2 = multiprocessing.Array(
        ctypes.c_uint8,
        256 * (config.width + array_pad) * (config.height + array_pad))
    sharedArray2 = np.ctypeslib.as_array(sharedArrayBase2.get_obj())
    sharedArray2 = sharedArray2.reshape(256, (config.height + array_pad),
                                        (config.width + array_pad))
    startTime2 = multiprocessing.Value('d', 0.0)

    log.info('Initializing frame buffers done!')

    # Check if the detection should be performed or not
    if nodetect:
        detector = None

    else:

        if detect_end:

            # Delay detection until the end of the night
            delay_detection = duration

        else:
            # Delay the detection for 2 minutes after capture start (helps stability)
            delay_detection = 120

        # Add an additional postprocessing delay
        delay_detection += config.postprocess_delay

        # Set a flag file to indicate that previous files are being loaded (if any)
        capture_resume_file_path = os.path.join(
            config.data_dir, config.capture_resume_flag_file)
        with open(capture_resume_file_path, 'w') as f:
            pass

        # Initialize the detector
        detector = QueuedPool(detectStarsAndMeteors, cores=1, log=log, delay_start=delay_detection, \
            backup_dir=night_data_dir)
        detector.startPool()

        # If the capture is being resumed into the directory, load all previously saved FF files
        if resume_capture:

            # Load all preocessed FF files
            for i, ff_name in enumerate(sorted(os.listdir(night_data_dir))):

                # Every 50 files loaded, update the flag file
                if i % 50 == 0:
                    with open(capture_resume_file_path, 'a') as f:
                        f.write("{:d}\n".format(i))

                # Check if the file is a valid FF files
                ff_path = os.path.join(night_data_dir, ff_name)
                if os.path.isfile(ff_path) and (str(
                        config.stationID) in ff_name) and validFFName(ff_name):

                    # Add the FF file to the detector
                    detector.addJob([night_data_dir, ff_name, config],
                                    wait_time=0.005)
                    log.info(
                        "Added existing FF files for detection: {:s}".format(
                            ff_name))

        # Remove the flag file
        if os.path.isfile(capture_resume_file_path):
            try:
                os.remove(capture_resume_file_path)
            except:
                log.error("There was an error during removing the capture resume flag file: " \
                    + capture_resume_file_path)

    # Initialize buffered capture
    bc = BufferedCapture(sharedArray,
                         startTime,
                         sharedArray2,
                         startTime2,
                         config,
                         video_file=video_file)

    # Initialize the live image viewer
    if config.live_maxpixel_enable:

        # Enable showing the live JPG
        config.live_jpg = True

        live_jpg_path = os.path.join(config.data_dir, 'live.jpg')

        live_view = LiveViewer(live_jpg_path,
                               image=True,
                               slideshow=False,
                               banner_text="Live")
        live_view.start()

    else:
        live_view = None

    # Initialize compression
    compressor = Compressor(night_data_dir,
                            sharedArray,
                            startTime,
                            sharedArray2,
                            startTime2,
                            config,
                            detector=detector)

    # Start buffered capture
    bc.startCapture()

    # Init and start the compression
    compressor.start()

    # Capture until Ctrl+C is pressed
    wait(duration, compressor)

    # If capture was manually stopped, end capture
    if STOP_CAPTURE:
        log.info('Ending capture...')

    # Stop the capture
    log.debug('Stopping capture...')
    bc.stopCapture()
    log.debug('Capture stopped')

    dropped_frames = bc.dropped_frames
    log.info('Total number of late or dropped frames: ' + str(dropped_frames))

    # Stop the compressor
    log.debug('Stopping compression...')
    detector = compressor.stop()

    # Free shared memory after the compressor is done
    try:
        log.debug('Freeing frame buffers...')
        del sharedArrayBase
        del sharedArray
        del sharedArrayBase2
        del sharedArray2

    except Exception as e:
        log.debug('Freeing frame buffers failed with error:' + repr(e))
        log.debug(repr(traceback.format_exception(*sys.exc_info())))

    log.debug('Compression stopped')

    if live_view is not None:

        # Stop the live viewer
        log.debug('Stopping live viewer...')

        live_view.stop()
        live_view.join()
        del live_view
        live_view = None

        log.debug('Live view stopped')

    # If detection should be performed
    if not nodetect:

        try:
            log.info('Finishing up the detection, ' + str(detector.input_queue.qsize()) \
                + ' files to process...')
        except:
            print(
                'Finishing up the detection... error when getting input queue size!'
            )

        # Reset the Ctrl+C to KeyboardInterrupt
        resetSIGINT()

        try:

            # If there are some more files to process, process them on more cores
            if detector.input_queue.qsize() > 0:

                # Let the detector use all cores, but leave 2 free
                available_cores = multiprocessing.cpu_count() - 2

                if available_cores > 1:

                    log.info('Running the detection on {:d} cores...'.format(
                        available_cores))

                    # Start the detector
                    detector.updateCoreNumber(cores=available_cores)

            log.info('Waiting for the detection to finish...')

            # Wait for the detector to finish and close it
            detector.closePool()

            log.info('Detection finished!')

        except KeyboardInterrupt:

            log.info('Ctrl + C pressed, exiting...')

            if upload_manager is not None:

                # Stop the upload manager
                if upload_manager.is_alive():
                    log.debug('Closing upload manager...')
                    upload_manager.stop()
                    del upload_manager

            # Terminate the detector
            if detector is not None:
                del detector

            sys.exit()

        # Set the Ctrl+C back to 'soft' program kill
        setSIGINT()

        ### SAVE DETECTIONS TO FILE

        log.info('Collecting results...')

        # Get the detection results from the queue
        detection_results = detector.getResults()

    else:

        detection_results = []

    # Save detection to disk and archive detection
    night_archive_dir, archive_name, _ = processNight(night_data_dir, config, \
        detection_results=detection_results, nodetect=nodetect)

    # Put the archive up for upload
    if upload_manager is not None:
        log.info('Adding file to upload list: ' + archive_name)
        upload_manager.addFiles([archive_name])
        log.info('File added...')

        # Delay the upload, if the delay is given
        upload_manager.delayNextUpload(delay=60 * config.upload_delay)

    # Delete detector backup files
    if detector is not None:
        detector.deleteBackupFiles()

    # If the capture was run for a limited time, run the upload right away
    if (duration is not None) and (upload_manager is not None):
        log.info('Uploading data before exiting...')
        upload_manager.uploadData()

    # Run the external script
    runExternalScript(night_data_dir, night_archive_dir, config)

    # If capture was manually stopped, end program
    if STOP_CAPTURE:

        log.info('Ending program')

        # Stop the upload manager
        if upload_manager is not None:
            if upload_manager.is_alive():
                upload_manager.stop()
                log.info('Closing upload manager...')

        sys.exit()

    return night_archive_dir
Beispiel #2
0
def processIncompleteCaptures(config, upload_manager):
    """ Reprocess broken capture folders.
    Arguments:
        config: [config object] Configuration read from the .config file.
        upload_manager: [UploadManager object] A handle to the UploadManager, which handles uploading files to
            the central server.
    """

    log.debug('Checking for folders containing partially-processed data')

    # Create a list of capture directories
    captured_dir_list = []
    captured_data_path = os.path.join(config.data_dir, config.captured_dir)
    for captured_dir_name in sorted(os.listdir(captured_data_path)):

        captured_dir_path = os.path.join(captured_data_path, captured_dir_name)

        # Check that the dir stars with the correct station code, that it really is a directory, and that
        #   there are some FF files inside
        if captured_dir_name.startswith(config.stationID):

            if os.path.isdir(captured_dir_path):

                if any([file_name.startswith("FF_{:s}".format(config.stationID)) \
                    for file_name in os.listdir(captured_dir_path)]):

                    captured_dir_list.append(captured_dir_name)

    # Check if there is a processed archived dir for every captured dir
    for captured_subdir in captured_dir_list:

        captured_dir_path = os.path.join(config.data_dir, config.captured_dir,
                                         captured_subdir)
        log.debug("Checking folder: {:s}".format(captured_subdir))

        # Check if there are any backup pickle files in the capture directory
        pickle_files = glob.glob(
            "{:s}/rms_queue_bkup_*.pickle".format(captured_dir_path))
        any_pickle_files = False
        if len(pickle_files) > 0:
            any_pickle_files = True

        # Check if there is an FTPdetectinfo file in the directory, indicating the the folder was fully
        #   processed
        FTPdetectinfo_files = glob.glob(
            '{:s}/FTPdetectinfo_*.txt'.format(captured_dir_path))
        any_ftpdetectinfo_files = False
        if len(FTPdetectinfo_files) > 0:
            any_ftpdetectinfo_files = True

        # Auto reprocess criteria:
        #   - Any backup pickle files
        #   - No pickle and no FTPdetectinfo files
        run_reprocess = False
        if any_pickle_files:
            run_reprocess = True
        else:
            if not any_ftpdetectinfo_files:
                run_reprocess = True

        # Skip the folder if it doesn't need to be reprocessed
        if not run_reprocess:
            log.debug("    ... fully processed!")
            continue

        log.info(
            "Found partially-processed data in {:s}".format(captured_dir_path))
        try:

            # Reprocess the night
            night_archive_dir, archive_name, detector = processNight(
                captured_dir_path, config)

            # Upload the archive, if upload is enabled
            if upload_manager is not None:
                log.info(
                    "Adding file to upload list: {:s}".format(archive_name))
                upload_manager.addFiles([archive_name])
                log.info("File added...")

            # Delete detection backup files
            if detector is not None:
                detector.deleteBackupFiles()

            # Run the external script if running after autoreprocess is enabled
            if config.external_script_run and config.auto_reprocess_external_script_run:
                runExternalScript(captured_dir_path, night_archive_dir, config)

            log.info("Folder {:s} reprocessed with success!".format(
                captured_dir_path))

        except Exception as e:
            log.error(
                "An error occured when trying to reprocess partially processed data!"
            )
            log.error(repr(e))
            log.error(repr(traceback.format_exception(*sys.exc_info())))
Beispiel #3
0
def runCapture(config, duration=None, video_file=None, nodetect=False, detect_end=False, upload_manager=None):
    """ Run capture and compression for the given time.given

    Arguments:
        config: [config object] Configuration read from the .config file

    Keyword arguments:
        duration: [float] Time in seconds to capture. None by default.
        video_file: [str] Path to the video file, if it was given as the video source. None by default.
        nodetect: [bool] If True, detection will not be performed. False by defualt.
        detect_end: [bool] If True, detection will be performed at the end of the night, when capture 
            finishes. False by default.
        upload_manager: [UploadManager object] A handle to the UploadManager, which handles uploading files to
            the central server. None by default.

    Return:
        night_archive_dir: [str] Path to the archive folder of the processed night.

    """

    global STOP_CAPTURE


    # Create a directory for captured files
    night_data_dir_name = str(config.stationID) + '_' + datetime.datetime.utcnow().strftime('%Y%m%d_%H%M%S_%f')

    # Full path to the data directory
    night_data_dir = os.path.join(os.path.abspath(config.data_dir), config.captured_dir, night_data_dir_name)


    # Make a directory for the night
    mkdirP(night_data_dir)

    log.info('Data directory: ' + night_data_dir)


    # Get the platepar file
    platepar, platepar_path, platepar_fmt = getPlatepar(config, night_data_dir)


    log.info('Initializing frame buffers...')
    ### For some reason, the RPi 3 does not like memory chunks which size is the multipier of its L2
    ### cache size (512 kB). When such a memory chunk is provided, the compression becomes 10x slower
    ### then usual. We are applying a dirty fix here where we just add an extra image row and column
    ### if such a memory chunk will be created. The compression is performed, and the image is cropped
    ### back to its original dimensions.
    array_pad = 0

    # Check if the image dimensions are divisible by RPi3 L2 cache size and add padding
    if (256*config.width*config.height)%(512*1024) == 0:
        array_pad = 1


    # Init arrays for parallel compression on 2 cores
    sharedArrayBase = multiprocessing.Array(ctypes.c_uint8, 256*(config.width + array_pad)*(config.height + array_pad))
    sharedArray = np.ctypeslib.as_array(sharedArrayBase.get_obj())
    sharedArray = sharedArray.reshape(256, (config.height + array_pad), (config.width + array_pad))
    startTime = multiprocessing.Value('d', 0.0)
    
    sharedArrayBase2 = multiprocessing.Array(ctypes.c_uint8, 256*(config.width + array_pad)*(config.height + array_pad))
    sharedArray2 = np.ctypeslib.as_array(sharedArrayBase2.get_obj())
    sharedArray2 = sharedArray2.reshape(256, (config.height + array_pad), (config.width + array_pad))
    startTime2 = multiprocessing.Value('d', 0.0)

    log.info('Initializing frame buffers done!')


    # Check if the detection should be performed or not
    if nodetect:
        detector = None

    else:

        if detect_end:

            # Delay detection until the end of the night
            delay_detection = duration

        else:
            # Delay the detection for 2 minutes after capture start
            delay_detection = 120

        # Initialize the detector
        detector = QueuedPool(detectStarsAndMeteors, cores=1, log=log, delay_start=delay_detection, \
            backup_dir=night_data_dir)
        detector.startPool()

    
    # Initialize buffered capture
    bc = BufferedCapture(sharedArray, startTime, sharedArray2, startTime2, config, video_file=video_file)


    # Initialize the live image viewer
    if config.live_maxpixel_enable:
        live_view = LiveViewer(night_data_dir, slideshow=False, banner_text="Live")
        live_view.start()

    else:
        live_view = None

    
    # Initialize compression
    compressor = Compressor(night_data_dir, sharedArray, startTime, sharedArray2, startTime2, config, 
        detector=detector)

    
    # Start buffered capture
    bc.startCapture()

    # Init and start the compression
    compressor.start()

    
    # Capture until Ctrl+C is pressed
    wait(duration, compressor)
        
    # If capture was manually stopped, end capture
    if STOP_CAPTURE:
        log.info('Ending capture...')


    # Stop the capture
    log.debug('Stopping capture...')
    bc.stopCapture()
    log.debug('Capture stopped')

    dropped_frames = bc.dropped_frames
    log.info('Total number of late or dropped frames: ' + str(dropped_frames))


    # Stop the compressor
    log.debug('Stopping compression...')
    detector = compressor.stop()

    # Free shared memory after the compressor is done
    try:
        log.debug('Freeing frame buffers...')
        del sharedArrayBase
        del sharedArray
        del sharedArrayBase2
        del sharedArray2

    except Exception as e:
        log.debug('Freeing frame buffers failed with error:' + repr(e))
        log.debug(repr(traceback.format_exception(*sys.exc_info())))

    log.debug('Compression stopped')


    if live_view is not None:

        # Stop the live viewer
        log.debug('Stopping live viewer...')

        live_view.stop()
        live_view.join()
        del live_view
        live_view = None

        log.debug('Live view stopped')



    # If detection should be performed
    if not nodetect:

        try:
            log.info('Finishing up the detection, ' + str(detector.input_queue.qsize()) \
                + ' files to process...')
        except:
            print('Finishing up the detection... error when getting input queue size!')


        # Reset the Ctrl+C to KeyboardInterrupt
        resetSIGINT()


        try:

            # If there are some more files to process, process them on more cores
            if detector.input_queue.qsize() > 0:

                # Let the detector use all cores, but leave 2 free
                available_cores = multiprocessing.cpu_count() - 2


                if available_cores > 1:

                    log.info('Running the detection on {:d} cores...'.format(available_cores))

                    # Start the detector
                    detector.updateCoreNumber(cores=available_cores)


            log.info('Waiting for the detection to finish...')

            # Wait for the detector to finish and close it
            detector.closePool()

            log.info('Detection finished!')


        except KeyboardInterrupt:

            log.info('Ctrl + C pressed, exiting...')
                
            if upload_manager is not None:

                # Stop the upload manager
                if upload_manager.is_alive():
                    log.debug('Closing upload manager...')
                    upload_manager.stop()
                    del upload_manager
                    

            # Terminate the detector
            if detector is not None:
                del detector

            sys.exit()


        # Set the Ctrl+C back to 'soft' program kill
        setSIGINT()

        ### SAVE DETECTIONS TO FILE


        log.info('Collecting results...')

        # Get the detection results from the queue
        detection_results = detector.getResults()

    else:

        detection_results = []




    # Save detection to disk and archive detection    
    night_archive_dir, archive_name, _ = processNight(night_data_dir, config, \
        detection_results=detection_results, nodetect=nodetect)


    # Put the archive up for upload
    if upload_manager is not None:
        log.info('Adding file to upload list: ' + archive_name)
        upload_manager.addFiles([archive_name])
        log.info('File added...')


    # Delete detector backup files
    if detector is not None:
        detector.deleteBackupFiles()


    # If the capture was run for a limited time, run the upload right away
    if (duration is not None) and (upload_manager is not None):
        log.info('Uploading data before exiting...')
        upload_manager.uploadData()


    # Run the external script
    runExternalScript(night_data_dir, night_archive_dir, config)
    

    # If capture was manually stopped, end program
    if STOP_CAPTURE:

        log.info('Ending program')

        # Stop the upload manager
        if upload_manager is not None:
            if upload_manager.is_alive():
                upload_manager.stop()
                log.info('Closing upload manager...')

        sys.exit()


    return night_archive_dir