예제 #1
0
def runCapture(config,
               duration=None,
               video_file=None,
               nodetect=False,
               detect_end=False,
               upload_manager=None):
    """ Run capture and compression for the given time.given

    Arguments:
        config: [config object] Configuration read from the .config file

    Keyword arguments:
        duration: [float] Time in seconds to capture. None by default.
        video_file: [str] Path to the video file, if it was given as the video source. None by default.
        nodetect: [bool] If True, detection will not be performed. False by defualt.
        detect_end: [bool] If True, detection will be performed at the end of the night, when capture 
            finishes. False by default.
        upload_manager: [UploadManager object] A handle to the UploadManager, which handles uploading files to
            the central server. None by default.

    """

    global STOP_CAPTURE

    # Create a directory for captured files
    night_data_dir_name = str(
        config.stationID) + '_' + datetime.datetime.utcnow().strftime(
            '%Y%m%d_%H%M%S_%f')

    # Full path to the data directory
    night_data_dir = os.path.join(os.path.abspath(config.data_dir),
                                  config.captured_dir, night_data_dir_name)

    # Make a directory for the night
    mkdirP(night_data_dir)

    log.info('Data directory: ' + night_data_dir)

    # Load the default flat field image if it is available
    flat_struct = None

    if config.use_flat:

        # Check if the flat exists
        if os.path.exists(os.path.join(os.getcwd(), config.flat_file)):
            flat_struct = Image.loadFlat(os.getcwd(), config.flat_file)

            log.info('Loaded flat field image: ' +
                     os.path.join(os.getcwd(), config.flat_file))

    # Get the platepar file
    platepar, platepar_path, platepar_fmt = getPlatepar(config)

    log.info('Initializing frame buffers...')
    ### For some reason, the RPi 3 does not like memory chunks which size is the multipier of its L2
    ### cache size (512 kB). When such a memory chunk is provided, the compression becomes 10x slower
    ### then usual. We are applying a dirty fix here where we just add an extra image row and column
    ### if such a memory chunk will be created. The compression is performed, and the image is cropped
    ### back to its original dimensions.
    array_pad = 0

    # Check if the image dimensions are divisible by RPi3 L2 cache size and add padding
    if (256 * config.width * config.height) % (512 * 1024) == 0:
        array_pad = 1

    # Init arrays for parallel compression on 2 cores
    sharedArrayBase = multiprocessing.Array(
        ctypes.c_uint8,
        256 * (config.width + array_pad) * (config.height + array_pad))
    sharedArray = np.ctypeslib.as_array(sharedArrayBase.get_obj())
    sharedArray = sharedArray.reshape(256, (config.height + array_pad),
                                      (config.width + array_pad))
    startTime = multiprocessing.Value('d', 0.0)

    sharedArrayBase2 = multiprocessing.Array(
        ctypes.c_uint8,
        256 * (config.width + array_pad) * (config.height + array_pad))
    sharedArray2 = np.ctypeslib.as_array(sharedArrayBase2.get_obj())
    sharedArray2 = sharedArray2.reshape(256, (config.height + array_pad),
                                        (config.width + array_pad))
    startTime2 = multiprocessing.Value('d', 0.0)

    log.info('Initializing frame buffers done!')

    # Check if the detection should be performed or not
    if nodetect:
        detector = None

    else:

        if detect_end:

            # Delay detection until the end of the night
            delay_detection = duration

        else:
            # Delay the detection for 2 minutes after capture start
            delay_detection = 120

        # Initialize the detector
        detector = QueuedPool(detectStarsAndMeteors,
                              cores=1,
                              log=log,
                              delay_start=delay_detection)
        detector.startPool()

    # Initialize buffered capture
    bc = BufferedCapture(sharedArray,
                         startTime,
                         sharedArray2,
                         startTime2,
                         config,
                         video_file=video_file)

    # Initialize the live image viewer
    live_view = LiveViewer(window_name='Maxpixel')

    # Initialize compression
    compressor = Compressor(night_data_dir,
                            sharedArray,
                            startTime,
                            sharedArray2,
                            startTime2,
                            config,
                            detector=detector,
                            live_view=live_view,
                            flat_struct=flat_struct)

    # Start buffered capture
    bc.startCapture()

    # Start the compression
    compressor.start()

    # Capture until Ctrl+C is pressed
    wait(duration)

    # If capture was manually stopped, end capture
    if STOP_CAPTURE:
        log.info('Ending capture...')

    # Stop the capture
    log.debug('Stopping capture...')
    bc.stopCapture()
    log.debug('Capture stopped')

    dropped_frames = bc.dropped_frames
    log.info('Total number of dropped frames: ' + str(dropped_frames))

    # Stop the compressor
    log.debug('Stopping compression...')
    detector, live_view = compressor.stop()
    log.debug('Compression stopped')

    # Stop the live viewer
    log.debug('Stopping live viewer...')
    live_view.stop()
    del live_view
    log.debug('Live view stopped')

    # Init data lists
    star_list = []
    meteor_list = []
    ff_detected = []

    # If detection should be performed
    if not nodetect:

        log.info('Finishing up the detection, ' +
                 str(detector.input_queue.qsize()) + ' files to process...')

        # Reset the Ctrl+C to KeyboardInterrupt
        resetSIGINT()

        try:

            # If there are some more files to process, process them on more cores
            if detector.input_queue.qsize() > 0:

                # Let the detector use all cores, but leave 1 free
                available_cores = multiprocessing.cpu_count() - 1

                if available_cores > 1:

                    log.info('Running the detection on {:d} cores...'.format(
                        available_cores))

                    # Start the detector
                    detector.updateCoreNumber(cores=available_cores)

            log.info('Waiting for the detection to finish...')

            # Wait for the detector to finish and close it
            detector.closePool()

            log.info('Detection finished!')

        except KeyboardInterrupt:

            log.info('Ctrl + C pressed, exiting...')

            if upload_manager is not None:

                # Stop the upload manager
                if upload_manager.is_alive():
                    log.debug('Closing upload manager...')
                    upload_manager.stop()
                    del upload_manager

            # Terminate the detector
            if detector is not None:
                del detector

            sys.exit()

        # Set the Ctrl+C back to 'soft' program kill
        setSIGINT()

        ### SAVE DETECTIONS TO FILE

        log.info('Collecting results...')

        # Get the detection results from the queue
        detection_results = detector.getResults()

        # Remove all 'None' results, which were errors
        detection_results = [
            res for res in detection_results if res is not None
        ]

        # Count the number of detected meteors
        meteors_num = 0
        for _, _, meteor_data in detection_results:
            for meteor in meteor_data:
                meteors_num += 1

        log.info('TOTAL: ' + str(meteors_num) + ' detected meteors.')

        # Save the detections to a file
        for ff_name, star_data, meteor_data in detection_results:

            x2, y2, background, intensity = star_data

            # Skip if no stars were found
            if not x2:
                continue

            # Construct the table of the star parameters
            star_data = zip(x2, y2, background, intensity)

            # Add star info to the star list
            star_list.append([ff_name, star_data])

            # Handle the detected meteors
            meteor_No = 1
            for meteor in meteor_data:

                rho, theta, centroids = meteor

                # Append to the results list
                meteor_list.append([ff_name, meteor_No, rho, theta, centroids])
                meteor_No += 1

            # Add the FF file to the archive list if a meteor was detected on it
            if meteor_data:
                ff_detected.append(ff_name)

        # Generate the name for the CALSTARS file
        calstars_name = 'CALSTARS_' + "{:s}".format(str(config.stationID)) + '_' \
            + os.path.basename(night_data_dir) + '.txt'

        # Write detected stars to the CALSTARS file
        CALSTARS.writeCALSTARS(star_list, night_data_dir, calstars_name, config.stationID, config.height, \
            config.width)

        # Generate FTPdetectinfo file name
        ftpdetectinfo_name = 'FTPdetectinfo_' + os.path.basename(
            night_data_dir) + '.txt'

        # Write FTPdetectinfo file
        FTPdetectinfo.writeFTPdetectinfo(meteor_list, night_data_dir, ftpdetectinfo_name, night_data_dir, \
            config.stationID, config.fps)

        # Get the platepar file
        platepar, platepar_path, platepar_fmt = getPlatepar(config)

        # Run calibration check and auto astrometry refinement
        if platepar is not None:

            # Read in the CALSTARS file
            calstars_list = CALSTARS.readCALSTARS(night_data_dir,
                                                  calstars_name)

            # Run astrometry check and refinement
            platepar, fit_status = autoCheckFit(config, platepar,
                                                calstars_list)

            # If the fit was sucessful, apply the astrometry to detected meteors
            if fit_status:

                log.info('Astrometric calibration SUCCESSFUL!')

                # Save the refined platepar to the night directory and as default
                platepar.write(os.path.join(night_data_dir,
                                            config.platepar_name),
                               fmt=platepar_fmt)
                platepar.write(platepar_path, fmt=platepar_fmt)

            else:
                log.info(
                    'Astrometric calibration FAILED!, Using old platepar for calibration...'
                )

            # Calculate astrometry for meteor detections
            applyAstrometryFTPdetectinfo(night_data_dir, ftpdetectinfo_name,
                                         platepar_path)

    log.info('Plotting field sums...')

    # Plot field sums to a graph
    plotFieldsums(night_data_dir, config)

    # Archive all fieldsums to one archive
    archiveFieldsums(night_data_dir)

    # List for any extra files which will be copied to the night archive directory. Full paths have to be
    #   given
    extra_files = []

    log.info('Making a flat...')

    # Make a new flat field
    flat_img = makeFlat(night_data_dir, config)

    # If making flat was sucessfull, save it
    if flat_img is not None:

        # Save the flat in the root directory, to keep the operational flat updated
        scipy.misc.imsave(config.flat_file, flat_img)
        flat_path = os.path.join(os.getcwd(), config.flat_file)
        log.info('Flat saved to: ' + flat_path)

        # Copy the flat to the night's directory as well
        extra_files.append(flat_path)

    else:
        log.info('Making flat image FAILED!')

    ### Add extra files to archive

    # Add the platepar to the archive if it exists
    if os.path.exists(platepar_path):
        extra_files.append(platepar_path)

    # Add the config file to the archive too
    extra_files.append(os.path.join(os.getcwd(), '.config'))

    ### ###

    night_archive_dir = os.path.join(os.path.abspath(config.data_dir),
                                     config.archived_dir, night_data_dir_name)

    log.info('Archiving detections to ' + night_archive_dir)

    # Archive the detections
    archive_name = archiveDetections(night_data_dir, night_archive_dir, ff_detected, config, \
        extra_files=extra_files)

    # Put the archive up for upload
    if upload_manager is not None:
        log.info('Adding file on upload list: ' + archive_name)
        upload_manager.addFiles([archive_name])

    # If capture was manually stopped, end program
    if STOP_CAPTURE:

        log.info('Ending program')

        # Stop the upload manager
        if upload_manager is not None:
            if upload_manager.is_alive():
                upload_manager.stop()
                log.info('Closing upload manager...')

        sys.exit()
예제 #2
0
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.

from RMS.Compression import Compressor
import RMS.ConfigReader as cr

import numpy as np
from matplotlib import pyplot as plt

if __name__ == "__main__":

    config = cr.parse(".config")

    frames = np.empty((256, 576, 720), np.uint8)
    for i in range(256):
        frames[i] = np.random.normal(128, 2, (576, 720))

    comp = Compressor(None, None, None, None, None, config)
    compressed, field_intensities = comp.compress(frames)

    plt.hist(compressed[1].ravel(), 256, [0, 256])
    plt.xlim((0, 255))
    plt.title('Randomness histogram')
    plt.xlabel('Frame')
    plt.ylabel('Random value count')
    plt.show()
예제 #3
0
def runCapture(config, duration=None, video_file=None, nodetect=False, detect_end=False, \
    upload_manager=None, resume_capture=False):
    """ Run capture and compression for the given time.given
    
    Arguments:
        config: [config object] Configuration read from the .config file.

    Keyword arguments:
        duration: [float] Time in seconds to capture. None by default.
        video_file: [str] Path to the video file, if it was given as the video source. None by default.
        nodetect: [bool] If True, detection will not be performed. False by defualt.
        detect_end: [bool] If True, detection will be performed at the end of the night, when capture
            finishes. False by default.
        upload_manager: [UploadManager object] A handle to the UploadManager, which handles uploading files to
            the central server. None by default.
        resume_capture: [bool] Resume capture in the last data directory in CapturedFiles.

    Return:
        night_archive_dir: [str] Path to the archive folder of the processed night.
    """

    global STOP_CAPTURE

    # Check if resuming capture to the last capture directory
    night_data_dir_name = None
    if resume_capture:

        log.info("Resuming capture in the last capture directory...")

        # Find the latest capture directory
        capturedfiles_path = os.path.join(os.path.abspath(config.data_dir),
                                          config.captured_dir)
        most_recent_dir_time = 0
        for dir_name in sorted(os.listdir(capturedfiles_path)):

            dir_path_check = os.path.join(capturedfiles_path, dir_name)

            # Check it's a directory
            if os.path.isdir(dir_path_check):

                # Check if it starts with the correct station code
                if dir_name.startswith(str(config.stationID)):

                    dir_mod_time = os.path.getmtime(dir_path_check)

                    # Check that it is the most recent directory
                    if (night_data_dir_name is None) or (dir_mod_time >
                                                         most_recent_dir_time):
                        night_data_dir_name = dir_name
                        night_data_dir = dir_path_check
                        most_recent_dir_time = dir_mod_time

        if night_data_dir_name is None:
            log.info(
                "Previous capture directory could not be found! Creating a new one..."
            )

        else:
            log.info("Previous capture directory found: {:s}".format(
                night_data_dir))

        # Resume run is finished now, reset resume flag
        cml_args.resume = False

    # Make a name for the capture data directory
    if night_data_dir_name is None:

        # Create a directory for captured files
        night_data_dir_name = str(config.stationID) + '_' \
            + datetime.datetime.utcnow().strftime('%Y%m%d_%H%M%S_%f')

        # Full path to the data directory
        night_data_dir = os.path.join(os.path.abspath(config.data_dir), config.captured_dir, \
            night_data_dir_name)

    # Wait before the capture starts if a time has been given
    if (not resume_capture) and (video_file is None):
        log.info("Waiting {:d} seconds before capture start...".format(
            int(config.capture_wait_seconds)))
        time.sleep(config.capture_wait_seconds)

    # Add a note about Patreon supporters
    print("################################################################")
    print("Thanks to our Patreon supporters in the 'Dinosaur Killer' class:")
    print("- Myron Valenta")
    print("https://www.patreon.com/globalmeteornetwork")
    print("\n\n\n" \
        + "       .:'       .:'        .:'       .:'  \n"\
        + "   _.::'     _.::'      _.::'     _.::'    \n"\
        + "  (_.'      (_.'       (_.'      (_.'      \n"\
        + "                         __                \n"\
        + "                        / _)               \n"\
        + "_\\/_          _/\\/\\/\\_/ /             _\\/_ \n"\
        + "/o\\         _|         /              //o\\ \n"\
        + " |         _|  (  | (  |                |  \n"\
        + "_|____    /__.-'|_|--|_|          ______|__\n")
    print("################################################################")

    # Make a directory for the night
    mkdirP(night_data_dir)

    log.info('Data directory: ' + night_data_dir)

    # Copy the used config file to the capture directory
    if os.path.isfile(config.config_file_name):
        try:
            shutil.copy2(config.config_file_name,
                         os.path.join(night_data_dir, ".config"))
        except:
            log.error("Cannot copy the config file to the capture directory!")

    # Get the platepar file
    platepar, platepar_path, platepar_fmt = getPlatepar(config, night_data_dir)

    # If the platepar is not none, set the FOV from it
    if platepar is not None:
        config.fov_w = platepar.fov_h
        config.fov_h = platepar.fov_v

    log.info('Initializing frame buffers...')
    ### For some reason, the RPi 3 does not like memory chunks which size is the multipier of its L2
    ### cache size (512 kB). When such a memory chunk is provided, the compression becomes 10x slower
    ### then usual. We are applying a dirty fix here where we just add an extra image row and column
    ### if such a memory chunk will be created. The compression is performed, and the image is cropped
    ### back to its original dimensions.
    array_pad = 0

    # Check if the image dimensions are divisible by RPi3 L2 cache size and add padding
    if (256 * config.width * config.height) % (512 * 1024) == 0:
        array_pad = 1

    # Init arrays for parallel compression on 2 cores
    sharedArrayBase = multiprocessing.Array(
        ctypes.c_uint8,
        256 * (config.width + array_pad) * (config.height + array_pad))
    sharedArray = np.ctypeslib.as_array(sharedArrayBase.get_obj())
    sharedArray = sharedArray.reshape(256, (config.height + array_pad),
                                      (config.width + array_pad))
    startTime = multiprocessing.Value('d', 0.0)

    sharedArrayBase2 = multiprocessing.Array(
        ctypes.c_uint8,
        256 * (config.width + array_pad) * (config.height + array_pad))
    sharedArray2 = np.ctypeslib.as_array(sharedArrayBase2.get_obj())
    sharedArray2 = sharedArray2.reshape(256, (config.height + array_pad),
                                        (config.width + array_pad))
    startTime2 = multiprocessing.Value('d', 0.0)

    log.info('Initializing frame buffers done!')

    # Check if the detection should be performed or not
    if nodetect:
        detector = None

    else:

        if detect_end:

            # Delay detection until the end of the night
            delay_detection = duration

        else:
            # Delay the detection for 2 minutes after capture start (helps stability)
            delay_detection = 120

        # Add an additional postprocessing delay
        delay_detection += config.postprocess_delay

        # Set a flag file to indicate that previous files are being loaded (if any)
        capture_resume_file_path = os.path.join(
            config.data_dir, config.capture_resume_flag_file)
        with open(capture_resume_file_path, 'w') as f:
            pass

        # Initialize the detector
        detector = QueuedPool(detectStarsAndMeteors, cores=1, log=log, delay_start=delay_detection, \
            backup_dir=night_data_dir)
        detector.startPool()

        # If the capture is being resumed into the directory, load all previously saved FF files
        if resume_capture:

            # Load all preocessed FF files
            for i, ff_name in enumerate(sorted(os.listdir(night_data_dir))):

                # Every 50 files loaded, update the flag file
                if i % 50 == 0:
                    with open(capture_resume_file_path, 'a') as f:
                        f.write("{:d}\n".format(i))

                # Check if the file is a valid FF files
                ff_path = os.path.join(night_data_dir, ff_name)
                if os.path.isfile(ff_path) and (str(
                        config.stationID) in ff_name) and validFFName(ff_name):

                    # Add the FF file to the detector
                    detector.addJob([night_data_dir, ff_name, config],
                                    wait_time=0.005)
                    log.info(
                        "Added existing FF files for detection: {:s}".format(
                            ff_name))

        # Remove the flag file
        if os.path.isfile(capture_resume_file_path):
            try:
                os.remove(capture_resume_file_path)
            except:
                log.error("There was an error during removing the capture resume flag file: " \
                    + capture_resume_file_path)

    # Initialize buffered capture
    bc = BufferedCapture(sharedArray,
                         startTime,
                         sharedArray2,
                         startTime2,
                         config,
                         video_file=video_file)

    # Initialize the live image viewer
    if config.live_maxpixel_enable:

        # Enable showing the live JPG
        config.live_jpg = True

        live_jpg_path = os.path.join(config.data_dir, 'live.jpg')

        live_view = LiveViewer(live_jpg_path,
                               image=True,
                               slideshow=False,
                               banner_text="Live")
        live_view.start()

    else:
        live_view = None

    # Initialize compression
    compressor = Compressor(night_data_dir,
                            sharedArray,
                            startTime,
                            sharedArray2,
                            startTime2,
                            config,
                            detector=detector)

    # Start buffered capture
    bc.startCapture()

    # Init and start the compression
    compressor.start()

    # Capture until Ctrl+C is pressed
    wait(duration, compressor)

    # If capture was manually stopped, end capture
    if STOP_CAPTURE:
        log.info('Ending capture...')

    # Stop the capture
    log.debug('Stopping capture...')
    bc.stopCapture()
    log.debug('Capture stopped')

    dropped_frames = bc.dropped_frames
    log.info('Total number of late or dropped frames: ' + str(dropped_frames))

    # Stop the compressor
    log.debug('Stopping compression...')
    detector = compressor.stop()

    # Free shared memory after the compressor is done
    try:
        log.debug('Freeing frame buffers...')
        del sharedArrayBase
        del sharedArray
        del sharedArrayBase2
        del sharedArray2

    except Exception as e:
        log.debug('Freeing frame buffers failed with error:' + repr(e))
        log.debug(repr(traceback.format_exception(*sys.exc_info())))

    log.debug('Compression stopped')

    if live_view is not None:

        # Stop the live viewer
        log.debug('Stopping live viewer...')

        live_view.stop()
        live_view.join()
        del live_view
        live_view = None

        log.debug('Live view stopped')

    # If detection should be performed
    if not nodetect:

        try:
            log.info('Finishing up the detection, ' + str(detector.input_queue.qsize()) \
                + ' files to process...')
        except:
            print(
                'Finishing up the detection... error when getting input queue size!'
            )

        # Reset the Ctrl+C to KeyboardInterrupt
        resetSIGINT()

        try:

            # If there are some more files to process, process them on more cores
            if detector.input_queue.qsize() > 0:

                # Let the detector use all cores, but leave 2 free
                available_cores = multiprocessing.cpu_count() - 2

                if available_cores > 1:

                    log.info('Running the detection on {:d} cores...'.format(
                        available_cores))

                    # Start the detector
                    detector.updateCoreNumber(cores=available_cores)

            log.info('Waiting for the detection to finish...')

            # Wait for the detector to finish and close it
            detector.closePool()

            log.info('Detection finished!')

        except KeyboardInterrupt:

            log.info('Ctrl + C pressed, exiting...')

            if upload_manager is not None:

                # Stop the upload manager
                if upload_manager.is_alive():
                    log.debug('Closing upload manager...')
                    upload_manager.stop()
                    del upload_manager

            # Terminate the detector
            if detector is not None:
                del detector

            sys.exit()

        # Set the Ctrl+C back to 'soft' program kill
        setSIGINT()

        ### SAVE DETECTIONS TO FILE

        log.info('Collecting results...')

        # Get the detection results from the queue
        detection_results = detector.getResults()

    else:

        detection_results = []

    # Save detection to disk and archive detection
    night_archive_dir, archive_name, _ = processNight(night_data_dir, config, \
        detection_results=detection_results, nodetect=nodetect)

    # Put the archive up for upload
    if upload_manager is not None:
        log.info('Adding file to upload list: ' + archive_name)
        upload_manager.addFiles([archive_name])
        log.info('File added...')

        # Delay the upload, if the delay is given
        upload_manager.delayNextUpload(delay=60 * config.upload_delay)

    # Delete detector backup files
    if detector is not None:
        detector.deleteBackupFiles()

    # If the capture was run for a limited time, run the upload right away
    if (duration is not None) and (upload_manager is not None):
        log.info('Uploading data before exiting...')
        upload_manager.uploadData()

    # Run the external script
    runExternalScript(night_data_dir, night_archive_dir, config)

    # If capture was manually stopped, end program
    if STOP_CAPTURE:

        log.info('Ending program')

        # Stop the upload manager
        if upload_manager is not None:
            if upload_manager.is_alive():
                upload_manager.stop()
                log.info('Closing upload manager...')

        sys.exit()

    return night_archive_dir
예제 #4
0
    ## SAVE the frames to disk
    with open(os.path.join(dir_path, pickle_file), 'w') as f:
        pickle.dump(frames, f)
    ###

    # ## Load the frames from disk
    # with open(os.path.join(dir_path, pickle_file), 'r') as f:
    #     frames = pickle.load(f)
    # ###

    # # Show individual frames
    # for i in range(120, 128):
    #     plt.imshow(frames[i])
    #     plt.show()

    comp = Compressor(dir_path, None, None, None, None, config)

    print('Running compression...')
    t1 = time.time()

    # Run the compression
    compressed, field_intensities = comp.compress(frames)

    print('Time for compression', time.time() - t1)

    t1 = time.time()

    # Save FF file
    comp.saveFF(compressed, 0, 0)

    # Save the extracted intensitites per every field
예제 #5
0
def runCapture(config, duration=None, video_file=None, nodetect=False, detect_end=False, upload_manager=None):
    """ Run capture and compression for the given time.given

    Arguments:
        config: [config object] Configuration read from the .config file

    Keyword arguments:
        duration: [float] Time in seconds to capture. None by default.
        video_file: [str] Path to the video file, if it was given as the video source. None by default.
        nodetect: [bool] If True, detection will not be performed. False by defualt.
        detect_end: [bool] If True, detection will be performed at the end of the night, when capture 
            finishes. False by default.
        upload_manager: [UploadManager object] A handle to the UploadManager, which handles uploading files to
            the central server. None by default.

    Return:
        night_archive_dir: [str] Path to the archive folder of the processed night.

    """

    global STOP_CAPTURE


    # Create a directory for captured files
    night_data_dir_name = str(config.stationID) + '_' + datetime.datetime.utcnow().strftime('%Y%m%d_%H%M%S_%f')

    # Full path to the data directory
    night_data_dir = os.path.join(os.path.abspath(config.data_dir), config.captured_dir, night_data_dir_name)


    # Make a directory for the night
    mkdirP(night_data_dir)

    log.info('Data directory: ' + night_data_dir)


    # Get the platepar file
    platepar, platepar_path, platepar_fmt = getPlatepar(config, night_data_dir)


    log.info('Initializing frame buffers...')
    ### For some reason, the RPi 3 does not like memory chunks which size is the multipier of its L2
    ### cache size (512 kB). When such a memory chunk is provided, the compression becomes 10x slower
    ### then usual. We are applying a dirty fix here where we just add an extra image row and column
    ### if such a memory chunk will be created. The compression is performed, and the image is cropped
    ### back to its original dimensions.
    array_pad = 0

    # Check if the image dimensions are divisible by RPi3 L2 cache size and add padding
    if (256*config.width*config.height)%(512*1024) == 0:
        array_pad = 1


    # Init arrays for parallel compression on 2 cores
    sharedArrayBase = multiprocessing.Array(ctypes.c_uint8, 256*(config.width + array_pad)*(config.height + array_pad))
    sharedArray = np.ctypeslib.as_array(sharedArrayBase.get_obj())
    sharedArray = sharedArray.reshape(256, (config.height + array_pad), (config.width + array_pad))
    startTime = multiprocessing.Value('d', 0.0)
    
    sharedArrayBase2 = multiprocessing.Array(ctypes.c_uint8, 256*(config.width + array_pad)*(config.height + array_pad))
    sharedArray2 = np.ctypeslib.as_array(sharedArrayBase2.get_obj())
    sharedArray2 = sharedArray2.reshape(256, (config.height + array_pad), (config.width + array_pad))
    startTime2 = multiprocessing.Value('d', 0.0)

    log.info('Initializing frame buffers done!')


    # Check if the detection should be performed or not
    if nodetect:
        detector = None

    else:

        if detect_end:

            # Delay detection until the end of the night
            delay_detection = duration

        else:
            # Delay the detection for 2 minutes after capture start
            delay_detection = 120

        # Initialize the detector
        detector = QueuedPool(detectStarsAndMeteors, cores=1, log=log, delay_start=delay_detection, \
            backup_dir=night_data_dir)
        detector.startPool()

    
    # Initialize buffered capture
    bc = BufferedCapture(sharedArray, startTime, sharedArray2, startTime2, config, video_file=video_file)


    # Initialize the live image viewer
    if config.live_maxpixel_enable:
        live_view = LiveViewer(night_data_dir, slideshow=False, banner_text="Live")
        live_view.start()

    else:
        live_view = None

    
    # Initialize compression
    compressor = Compressor(night_data_dir, sharedArray, startTime, sharedArray2, startTime2, config, 
        detector=detector)

    
    # Start buffered capture
    bc.startCapture()

    # Init and start the compression
    compressor.start()

    
    # Capture until Ctrl+C is pressed
    wait(duration, compressor)
        
    # If capture was manually stopped, end capture
    if STOP_CAPTURE:
        log.info('Ending capture...')


    # Stop the capture
    log.debug('Stopping capture...')
    bc.stopCapture()
    log.debug('Capture stopped')

    dropped_frames = bc.dropped_frames
    log.info('Total number of late or dropped frames: ' + str(dropped_frames))


    # Stop the compressor
    log.debug('Stopping compression...')
    detector = compressor.stop()

    # Free shared memory after the compressor is done
    try:
        log.debug('Freeing frame buffers...')
        del sharedArrayBase
        del sharedArray
        del sharedArrayBase2
        del sharedArray2

    except Exception as e:
        log.debug('Freeing frame buffers failed with error:' + repr(e))
        log.debug(repr(traceback.format_exception(*sys.exc_info())))

    log.debug('Compression stopped')


    if live_view is not None:

        # Stop the live viewer
        log.debug('Stopping live viewer...')

        live_view.stop()
        live_view.join()
        del live_view
        live_view = None

        log.debug('Live view stopped')



    # If detection should be performed
    if not nodetect:

        try:
            log.info('Finishing up the detection, ' + str(detector.input_queue.qsize()) \
                + ' files to process...')
        except:
            print('Finishing up the detection... error when getting input queue size!')


        # Reset the Ctrl+C to KeyboardInterrupt
        resetSIGINT()


        try:

            # If there are some more files to process, process them on more cores
            if detector.input_queue.qsize() > 0:

                # Let the detector use all cores, but leave 2 free
                available_cores = multiprocessing.cpu_count() - 2


                if available_cores > 1:

                    log.info('Running the detection on {:d} cores...'.format(available_cores))

                    # Start the detector
                    detector.updateCoreNumber(cores=available_cores)


            log.info('Waiting for the detection to finish...')

            # Wait for the detector to finish and close it
            detector.closePool()

            log.info('Detection finished!')


        except KeyboardInterrupt:

            log.info('Ctrl + C pressed, exiting...')
                
            if upload_manager is not None:

                # Stop the upload manager
                if upload_manager.is_alive():
                    log.debug('Closing upload manager...')
                    upload_manager.stop()
                    del upload_manager
                    

            # Terminate the detector
            if detector is not None:
                del detector

            sys.exit()


        # Set the Ctrl+C back to 'soft' program kill
        setSIGINT()

        ### SAVE DETECTIONS TO FILE


        log.info('Collecting results...')

        # Get the detection results from the queue
        detection_results = detector.getResults()

    else:

        detection_results = []




    # Save detection to disk and archive detection    
    night_archive_dir, archive_name, _ = processNight(night_data_dir, config, \
        detection_results=detection_results, nodetect=nodetect)


    # Put the archive up for upload
    if upload_manager is not None:
        log.info('Adding file to upload list: ' + archive_name)
        upload_manager.addFiles([archive_name])
        log.info('File added...')


    # Delete detector backup files
    if detector is not None:
        detector.deleteBackupFiles()


    # If the capture was run for a limited time, run the upload right away
    if (duration is not None) and (upload_manager is not None):
        log.info('Uploading data before exiting...')
        upload_manager.uploadData()


    # Run the external script
    runExternalScript(night_data_dir, night_archive_dir, config)
    

    # If capture was manually stopped, end program
    if STOP_CAPTURE:

        log.info('Ending program')

        # Stop the upload manager
        if upload_manager is not None:
            if upload_manager.is_alive():
                upload_manager.stop()
                log.info('Closing upload manager...')

        sys.exit()


    return night_archive_dir
예제 #6
0
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.

""" Timings of compression algorithm with various cases.
"""

from __future__ import print_function, division, absolute_import

from RMS.Compression import Compressor
import RMS.ConfigReader as cr
import numpy as np
import time
import sys

config = cr.parse(".config")
comp = Compressor(None, None, None, None, None, config)


# IMAGE SIZE
WIDTH = 1280
HEIGHT = 720

def timing(img):
    t = time.time()
    comp.compress(img)
    return time.time() - t
   
def create(f):

    arr = np.empty((256, HEIGHT, WIDTH), np.uint8)
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
# 
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.

from RMS.Compression import Compressor
import RMS.ConfigReader as cr

import numpy as np
from matplotlib import pyplot as plt

if __name__ == "__main__":

    config = cr.parse(".config")

    frames = np.empty((256, 576, 720), np.uint8)
    for i in range(256):
        frames[i] = np.random.normal(128, 2, (576, 720))
    
    comp = Compressor(None, None, None, None, None, config)
    compressed, field_intensities = comp.compress(frames)
    
    plt.hist(compressed[1].ravel(), 256, [0,256])
    plt.xlim((0, 255))
    plt.title('Randomness histogram')
    plt.xlabel('Frame')
    plt.ylabel('Random value count')
    plt.show()
        pickle.dump(frames, f)
    ###

    # ## Load the frames from disk
    # with open(os.path.join(dir_path, pickle_file), 'r') as f:
    #     frames = pickle.load(f)
    # ###


    # # Show individual frames
    # for i in range(120, 128):
    #     plt.imshow(frames[i])
    #     plt.show()

    
    comp = Compressor(dir_path, None, None, None, None, config)

    print('Running compression...')
    t1 = time.time()

    # Run the compression
    compressed, field_intensities = comp.compress(frames)

    print('Time for compression', time.time() - t1)

    t1 = time.time()

    # Save FF file
    comp.saveFF(compressed, 0, 0)
    
    # Save the extracted intensitites per every field