def detectStarsAndMeteorsDirectory(dir_path, config):
    """ Extract stars and detect meteors on all FF files in the given folder. 

    Arguments:
        dir_path: [str] Path to the directory with FF files.
        config: [Config obj]

    Return:
        calstars_name: [str] Name of the CALSTARS file.
        ftpdetectinfo_name: [str] Name of the FTPdetectinfo file.
        ff_detected: [list] A list of FF files with detections.
    """

    # Get paths to every FF bin file in a directory 
    ff_dir = dir_path
    ff_dir = os.path.abspath(ff_dir)
    ff_list = [ff_name for ff_name in sorted(os.listdir(ff_dir)) if validFFName(ff_name)]


    # Check if there are any file in the directory
    if not len(ff_list):

        print("No files for processing found!")
        return None, None, None, None


    print('Starting detection...')

    # Initialize the detector
    detector = QueuedPool(detectStarsAndMeteors, cores=-1, log=log, backup_dir=ff_dir)

    # Start the detection
    detector.startPool()

    # Give detector jobs
    for ff_name in ff_list:

        while True:
            
            # Add a job as long as there are available workers to receive it
            if detector.available_workers.value() > 0:
                print('Adding for detection:', ff_name)
                detector.addJob([ff_dir, ff_name, config], wait_time=0)
                break
            else:
                time.sleep(0.1)



    log.info('Waiting for the detection to finish...')

    # Wait for the detector to finish and close it
    detector.closePool()

    log.info('Detection finished!')

    log.info('Collecting results...')

    # Get the detection results from the queue
    detection_results = detector.getResults()


    # Save detection to disk
    calstars_name, ftpdetectinfo_name, ff_detected = saveDetections(detection_results, ff_dir, config)


    return calstars_name, ftpdetectinfo_name, ff_detected, detector
Beispiel #2
0
def runCapture(config,
               duration=None,
               video_file=None,
               nodetect=False,
               detect_end=False,
               upload_manager=None):
    """ Run capture and compression for the given time.given

    Arguments:
        config: [config object] Configuration read from the .config file

    Keyword arguments:
        duration: [float] Time in seconds to capture. None by default.
        video_file: [str] Path to the video file, if it was given as the video source. None by default.
        nodetect: [bool] If True, detection will not be performed. False by defualt.
        detect_end: [bool] If True, detection will be performed at the end of the night, when capture 
            finishes. False by default.
        upload_manager: [UploadManager object] A handle to the UploadManager, which handles uploading files to
            the central server. None by default.

    """

    global STOP_CAPTURE

    # Create a directory for captured files
    night_data_dir_name = str(
        config.stationID) + '_' + datetime.datetime.utcnow().strftime(
            '%Y%m%d_%H%M%S_%f')

    # Full path to the data directory
    night_data_dir = os.path.join(os.path.abspath(config.data_dir),
                                  config.captured_dir, night_data_dir_name)

    # Make a directory for the night
    mkdirP(night_data_dir)

    log.info('Data directory: ' + night_data_dir)

    # Load the default flat field image if it is available
    flat_struct = None

    if config.use_flat:

        # Check if the flat exists
        if os.path.exists(os.path.join(os.getcwd(), config.flat_file)):
            flat_struct = Image.loadFlat(os.getcwd(), config.flat_file)

            log.info('Loaded flat field image: ' +
                     os.path.join(os.getcwd(), config.flat_file))

    # Get the platepar file
    platepar, platepar_path, platepar_fmt = getPlatepar(config)

    log.info('Initializing frame buffers...')
    ### For some reason, the RPi 3 does not like memory chunks which size is the multipier of its L2
    ### cache size (512 kB). When such a memory chunk is provided, the compression becomes 10x slower
    ### then usual. We are applying a dirty fix here where we just add an extra image row and column
    ### if such a memory chunk will be created. The compression is performed, and the image is cropped
    ### back to its original dimensions.
    array_pad = 0

    # Check if the image dimensions are divisible by RPi3 L2 cache size and add padding
    if (256 * config.width * config.height) % (512 * 1024) == 0:
        array_pad = 1

    # Init arrays for parallel compression on 2 cores
    sharedArrayBase = multiprocessing.Array(
        ctypes.c_uint8,
        256 * (config.width + array_pad) * (config.height + array_pad))
    sharedArray = np.ctypeslib.as_array(sharedArrayBase.get_obj())
    sharedArray = sharedArray.reshape(256, (config.height + array_pad),
                                      (config.width + array_pad))
    startTime = multiprocessing.Value('d', 0.0)

    sharedArrayBase2 = multiprocessing.Array(
        ctypes.c_uint8,
        256 * (config.width + array_pad) * (config.height + array_pad))
    sharedArray2 = np.ctypeslib.as_array(sharedArrayBase2.get_obj())
    sharedArray2 = sharedArray2.reshape(256, (config.height + array_pad),
                                        (config.width + array_pad))
    startTime2 = multiprocessing.Value('d', 0.0)

    log.info('Initializing frame buffers done!')

    # Check if the detection should be performed or not
    if nodetect:
        detector = None

    else:

        if detect_end:

            # Delay detection until the end of the night
            delay_detection = duration

        else:
            # Delay the detection for 2 minutes after capture start
            delay_detection = 120

        # Initialize the detector
        detector = QueuedPool(detectStarsAndMeteors,
                              cores=1,
                              log=log,
                              delay_start=delay_detection)
        detector.startPool()

    # Initialize buffered capture
    bc = BufferedCapture(sharedArray,
                         startTime,
                         sharedArray2,
                         startTime2,
                         config,
                         video_file=video_file)

    # Initialize the live image viewer
    live_view = LiveViewer(window_name='Maxpixel')

    # Initialize compression
    compressor = Compressor(night_data_dir,
                            sharedArray,
                            startTime,
                            sharedArray2,
                            startTime2,
                            config,
                            detector=detector,
                            live_view=live_view,
                            flat_struct=flat_struct)

    # Start buffered capture
    bc.startCapture()

    # Start the compression
    compressor.start()

    # Capture until Ctrl+C is pressed
    wait(duration)

    # If capture was manually stopped, end capture
    if STOP_CAPTURE:
        log.info('Ending capture...')

    # Stop the capture
    log.debug('Stopping capture...')
    bc.stopCapture()
    log.debug('Capture stopped')

    dropped_frames = bc.dropped_frames
    log.info('Total number of dropped frames: ' + str(dropped_frames))

    # Stop the compressor
    log.debug('Stopping compression...')
    detector, live_view = compressor.stop()
    log.debug('Compression stopped')

    # Stop the live viewer
    log.debug('Stopping live viewer...')
    live_view.stop()
    del live_view
    log.debug('Live view stopped')

    # Init data lists
    star_list = []
    meteor_list = []
    ff_detected = []

    # If detection should be performed
    if not nodetect:

        log.info('Finishing up the detection, ' +
                 str(detector.input_queue.qsize()) + ' files to process...')

        # Reset the Ctrl+C to KeyboardInterrupt
        resetSIGINT()

        try:

            # If there are some more files to process, process them on more cores
            if detector.input_queue.qsize() > 0:

                # Let the detector use all cores, but leave 1 free
                available_cores = multiprocessing.cpu_count() - 1

                if available_cores > 1:

                    log.info('Running the detection on {:d} cores...'.format(
                        available_cores))

                    # Start the detector
                    detector.updateCoreNumber(cores=available_cores)

            log.info('Waiting for the detection to finish...')

            # Wait for the detector to finish and close it
            detector.closePool()

            log.info('Detection finished!')

        except KeyboardInterrupt:

            log.info('Ctrl + C pressed, exiting...')

            if upload_manager is not None:

                # Stop the upload manager
                if upload_manager.is_alive():
                    log.debug('Closing upload manager...')
                    upload_manager.stop()
                    del upload_manager

            # Terminate the detector
            if detector is not None:
                del detector

            sys.exit()

        # Set the Ctrl+C back to 'soft' program kill
        setSIGINT()

        ### SAVE DETECTIONS TO FILE

        log.info('Collecting results...')

        # Get the detection results from the queue
        detection_results = detector.getResults()

        # Remove all 'None' results, which were errors
        detection_results = [
            res for res in detection_results if res is not None
        ]

        # Count the number of detected meteors
        meteors_num = 0
        for _, _, meteor_data in detection_results:
            for meteor in meteor_data:
                meteors_num += 1

        log.info('TOTAL: ' + str(meteors_num) + ' detected meteors.')

        # Save the detections to a file
        for ff_name, star_data, meteor_data in detection_results:

            x2, y2, background, intensity = star_data

            # Skip if no stars were found
            if not x2:
                continue

            # Construct the table of the star parameters
            star_data = zip(x2, y2, background, intensity)

            # Add star info to the star list
            star_list.append([ff_name, star_data])

            # Handle the detected meteors
            meteor_No = 1
            for meteor in meteor_data:

                rho, theta, centroids = meteor

                # Append to the results list
                meteor_list.append([ff_name, meteor_No, rho, theta, centroids])
                meteor_No += 1

            # Add the FF file to the archive list if a meteor was detected on it
            if meteor_data:
                ff_detected.append(ff_name)

        # Generate the name for the CALSTARS file
        calstars_name = 'CALSTARS_' + "{:s}".format(str(config.stationID)) + '_' \
            + os.path.basename(night_data_dir) + '.txt'

        # Write detected stars to the CALSTARS file
        CALSTARS.writeCALSTARS(star_list, night_data_dir, calstars_name, config.stationID, config.height, \
            config.width)

        # Generate FTPdetectinfo file name
        ftpdetectinfo_name = 'FTPdetectinfo_' + os.path.basename(
            night_data_dir) + '.txt'

        # Write FTPdetectinfo file
        FTPdetectinfo.writeFTPdetectinfo(meteor_list, night_data_dir, ftpdetectinfo_name, night_data_dir, \
            config.stationID, config.fps)

        # Get the platepar file
        platepar, platepar_path, platepar_fmt = getPlatepar(config)

        # Run calibration check and auto astrometry refinement
        if platepar is not None:

            # Read in the CALSTARS file
            calstars_list = CALSTARS.readCALSTARS(night_data_dir,
                                                  calstars_name)

            # Run astrometry check and refinement
            platepar, fit_status = autoCheckFit(config, platepar,
                                                calstars_list)

            # If the fit was sucessful, apply the astrometry to detected meteors
            if fit_status:

                log.info('Astrometric calibration SUCCESSFUL!')

                # Save the refined platepar to the night directory and as default
                platepar.write(os.path.join(night_data_dir,
                                            config.platepar_name),
                               fmt=platepar_fmt)
                platepar.write(platepar_path, fmt=platepar_fmt)

            else:
                log.info(
                    'Astrometric calibration FAILED!, Using old platepar for calibration...'
                )

            # Calculate astrometry for meteor detections
            applyAstrometryFTPdetectinfo(night_data_dir, ftpdetectinfo_name,
                                         platepar_path)

    log.info('Plotting field sums...')

    # Plot field sums to a graph
    plotFieldsums(night_data_dir, config)

    # Archive all fieldsums to one archive
    archiveFieldsums(night_data_dir)

    # List for any extra files which will be copied to the night archive directory. Full paths have to be
    #   given
    extra_files = []

    log.info('Making a flat...')

    # Make a new flat field
    flat_img = makeFlat(night_data_dir, config)

    # If making flat was sucessfull, save it
    if flat_img is not None:

        # Save the flat in the root directory, to keep the operational flat updated
        scipy.misc.imsave(config.flat_file, flat_img)
        flat_path = os.path.join(os.getcwd(), config.flat_file)
        log.info('Flat saved to: ' + flat_path)

        # Copy the flat to the night's directory as well
        extra_files.append(flat_path)

    else:
        log.info('Making flat image FAILED!')

    ### Add extra files to archive

    # Add the platepar to the archive if it exists
    if os.path.exists(platepar_path):
        extra_files.append(platepar_path)

    # Add the config file to the archive too
    extra_files.append(os.path.join(os.getcwd(), '.config'))

    ### ###

    night_archive_dir = os.path.join(os.path.abspath(config.data_dir),
                                     config.archived_dir, night_data_dir_name)

    log.info('Archiving detections to ' + night_archive_dir)

    # Archive the detections
    archive_name = archiveDetections(night_data_dir, night_archive_dir, ff_detected, config, \
        extra_files=extra_files)

    # Put the archive up for upload
    if upload_manager is not None:
        log.info('Adding file on upload list: ' + archive_name)
        upload_manager.addFiles([archive_name])

    # If capture was manually stopped, end program
    if STOP_CAPTURE:

        log.info('Ending program')

        # Stop the upload manager
        if upload_manager is not None:
            if upload_manager.is_alive():
                upload_manager.stop()
                log.info('Closing upload manager...')

        sys.exit()
Beispiel #3
0
def detectStarsAndMeteorsDirectory(dir_path, config):
    """ Extract stars and detect meteors on all FF files in the given folder. 

    Arguments:
        dir_path: [str] Path to the directory with FF files.
        config: [Config obj]

    Return:
        calstars_name: [str] Name of the CALSTARS file.
        ftpdetectinfo_name: [str] Name of the FTPdetectinfo file.
        ff_detected: [list] A list of FF files with detections.
    """

    # Get paths to every FF bin file in a directory 
    ff_dir = dir_path
    ff_dir = os.path.abspath(ff_dir)
    ff_list = [ff_name for ff_name in sorted(os.listdir(ff_dir)) if validFFName(ff_name)]


    # Check if there are any file in the directory
    if not len(ff_list):

        print("No files for processing found!")
        return None, None, None, None


    print('Starting detection...')

    # Initialize the detector
    detector = QueuedPool(detectStarsAndMeteors, cores=-1, log=log, backup_dir=ff_dir)

    # Start the detection
    detector.startPool()

    # Give detector jobs
    for ff_name in ff_list:

        while True:
            
            # Add a job as long as there are available workers to receive it
            if detector.available_workers.value() > 0:
                print('Adding for detection:', ff_name)
                detector.addJob([ff_dir, ff_name, config], wait_time=0)
                break
            else:
                time.sleep(0.1)



    log.info('Waiting for the detection to finish...')

    # Wait for the detector to finish and close it
    detector.closePool()

    log.info('Detection finished!')

    log.info('Collecting results...')

    # Get the detection results from the queue
    detection_results = detector.getResults()


    # Save detection to disk
    calstars_name, ftpdetectinfo_name, ff_detected = saveDetections(detection_results, ff_dir, config)


    return calstars_name, ftpdetectinfo_name, ff_detected, detector
Beispiel #4
0
    # Try loading a flat field image
    flat_struct = None

    if config.use_flat:
        
        # Check if there is flat in the data directory
        if os.path.exists(os.path.join(ff_dir, config.flat_file)):
            flat_struct = Image.loadFlat(ff_dir, config.flat_file)

        # Try loading the default flat
        elif os.path.exists(config.flat_file):
            flat_struct = Image.loadFlat(os.getcwd(), config.flat_file)


    # Initialize the detector
    detector = QueuedPool(detectStarsAndMeteors, cores=-1, log=log)

    # Give detector jobs
    for ff_name in ff_list:
        detector.addJob([ff_dir, ff_name, config, flat_struct])


    # Start the detection
    detector.startPool()


    log.info('Waiting for the detection to finish...')

    # Wait for the detector to finish and close it
    detector.closePool()
Beispiel #5
0
def runCapture(config, duration=None, video_file=None, nodetect=False, detect_end=False, \
    upload_manager=None, resume_capture=False):
    """ Run capture and compression for the given time.given
    
    Arguments:
        config: [config object] Configuration read from the .config file.

    Keyword arguments:
        duration: [float] Time in seconds to capture. None by default.
        video_file: [str] Path to the video file, if it was given as the video source. None by default.
        nodetect: [bool] If True, detection will not be performed. False by defualt.
        detect_end: [bool] If True, detection will be performed at the end of the night, when capture
            finishes. False by default.
        upload_manager: [UploadManager object] A handle to the UploadManager, which handles uploading files to
            the central server. None by default.
        resume_capture: [bool] Resume capture in the last data directory in CapturedFiles.

    Return:
        night_archive_dir: [str] Path to the archive folder of the processed night.
    """

    global STOP_CAPTURE

    # Check if resuming capture to the last capture directory
    night_data_dir_name = None
    if resume_capture:

        log.info("Resuming capture in the last capture directory...")

        # Find the latest capture directory
        capturedfiles_path = os.path.join(os.path.abspath(config.data_dir),
                                          config.captured_dir)
        most_recent_dir_time = 0
        for dir_name in sorted(os.listdir(capturedfiles_path)):

            dir_path_check = os.path.join(capturedfiles_path, dir_name)

            # Check it's a directory
            if os.path.isdir(dir_path_check):

                # Check if it starts with the correct station code
                if dir_name.startswith(str(config.stationID)):

                    dir_mod_time = os.path.getmtime(dir_path_check)

                    # Check that it is the most recent directory
                    if (night_data_dir_name is None) or (dir_mod_time >
                                                         most_recent_dir_time):
                        night_data_dir_name = dir_name
                        night_data_dir = dir_path_check
                        most_recent_dir_time = dir_mod_time

        if night_data_dir_name is None:
            log.info(
                "Previous capture directory could not be found! Creating a new one..."
            )

        else:
            log.info("Previous capture directory found: {:s}".format(
                night_data_dir))

        # Resume run is finished now, reset resume flag
        cml_args.resume = False

    # Make a name for the capture data directory
    if night_data_dir_name is None:

        # Create a directory for captured files
        night_data_dir_name = str(config.stationID) + '_' \
            + datetime.datetime.utcnow().strftime('%Y%m%d_%H%M%S_%f')

        # Full path to the data directory
        night_data_dir = os.path.join(os.path.abspath(config.data_dir), config.captured_dir, \
            night_data_dir_name)

    # Wait before the capture starts if a time has been given
    if (not resume_capture) and (video_file is None):
        log.info("Waiting {:d} seconds before capture start...".format(
            int(config.capture_wait_seconds)))
        time.sleep(config.capture_wait_seconds)

    # Add a note about Patreon supporters
    print("################################################################")
    print("Thanks to our Patreon supporters in the 'Dinosaur Killer' class:")
    print("- Myron Valenta")
    print("https://www.patreon.com/globalmeteornetwork")
    print("\n\n\n" \
        + "       .:'       .:'        .:'       .:'  \n"\
        + "   _.::'     _.::'      _.::'     _.::'    \n"\
        + "  (_.'      (_.'       (_.'      (_.'      \n"\
        + "                         __                \n"\
        + "                        / _)               \n"\
        + "_\\/_          _/\\/\\/\\_/ /             _\\/_ \n"\
        + "/o\\         _|         /              //o\\ \n"\
        + " |         _|  (  | (  |                |  \n"\
        + "_|____    /__.-'|_|--|_|          ______|__\n")
    print("################################################################")

    # Make a directory for the night
    mkdirP(night_data_dir)

    log.info('Data directory: ' + night_data_dir)

    # Copy the used config file to the capture directory
    if os.path.isfile(config.config_file_name):
        try:
            shutil.copy2(config.config_file_name,
                         os.path.join(night_data_dir, ".config"))
        except:
            log.error("Cannot copy the config file to the capture directory!")

    # Get the platepar file
    platepar, platepar_path, platepar_fmt = getPlatepar(config, night_data_dir)

    # If the platepar is not none, set the FOV from it
    if platepar is not None:
        config.fov_w = platepar.fov_h
        config.fov_h = platepar.fov_v

    log.info('Initializing frame buffers...')
    ### For some reason, the RPi 3 does not like memory chunks which size is the multipier of its L2
    ### cache size (512 kB). When such a memory chunk is provided, the compression becomes 10x slower
    ### then usual. We are applying a dirty fix here where we just add an extra image row and column
    ### if such a memory chunk will be created. The compression is performed, and the image is cropped
    ### back to its original dimensions.
    array_pad = 0

    # Check if the image dimensions are divisible by RPi3 L2 cache size and add padding
    if (256 * config.width * config.height) % (512 * 1024) == 0:
        array_pad = 1

    # Init arrays for parallel compression on 2 cores
    sharedArrayBase = multiprocessing.Array(
        ctypes.c_uint8,
        256 * (config.width + array_pad) * (config.height + array_pad))
    sharedArray = np.ctypeslib.as_array(sharedArrayBase.get_obj())
    sharedArray = sharedArray.reshape(256, (config.height + array_pad),
                                      (config.width + array_pad))
    startTime = multiprocessing.Value('d', 0.0)

    sharedArrayBase2 = multiprocessing.Array(
        ctypes.c_uint8,
        256 * (config.width + array_pad) * (config.height + array_pad))
    sharedArray2 = np.ctypeslib.as_array(sharedArrayBase2.get_obj())
    sharedArray2 = sharedArray2.reshape(256, (config.height + array_pad),
                                        (config.width + array_pad))
    startTime2 = multiprocessing.Value('d', 0.0)

    log.info('Initializing frame buffers done!')

    # Check if the detection should be performed or not
    if nodetect:
        detector = None

    else:

        if detect_end:

            # Delay detection until the end of the night
            delay_detection = duration

        else:
            # Delay the detection for 2 minutes after capture start (helps stability)
            delay_detection = 120

        # Add an additional postprocessing delay
        delay_detection += config.postprocess_delay

        # Set a flag file to indicate that previous files are being loaded (if any)
        capture_resume_file_path = os.path.join(
            config.data_dir, config.capture_resume_flag_file)
        with open(capture_resume_file_path, 'w') as f:
            pass

        # Initialize the detector
        detector = QueuedPool(detectStarsAndMeteors, cores=1, log=log, delay_start=delay_detection, \
            backup_dir=night_data_dir)
        detector.startPool()

        # If the capture is being resumed into the directory, load all previously saved FF files
        if resume_capture:

            # Load all preocessed FF files
            for i, ff_name in enumerate(sorted(os.listdir(night_data_dir))):

                # Every 50 files loaded, update the flag file
                if i % 50 == 0:
                    with open(capture_resume_file_path, 'a') as f:
                        f.write("{:d}\n".format(i))

                # Check if the file is a valid FF files
                ff_path = os.path.join(night_data_dir, ff_name)
                if os.path.isfile(ff_path) and (str(
                        config.stationID) in ff_name) and validFFName(ff_name):

                    # Add the FF file to the detector
                    detector.addJob([night_data_dir, ff_name, config],
                                    wait_time=0.005)
                    log.info(
                        "Added existing FF files for detection: {:s}".format(
                            ff_name))

        # Remove the flag file
        if os.path.isfile(capture_resume_file_path):
            try:
                os.remove(capture_resume_file_path)
            except:
                log.error("There was an error during removing the capture resume flag file: " \
                    + capture_resume_file_path)

    # Initialize buffered capture
    bc = BufferedCapture(sharedArray,
                         startTime,
                         sharedArray2,
                         startTime2,
                         config,
                         video_file=video_file)

    # Initialize the live image viewer
    if config.live_maxpixel_enable:

        # Enable showing the live JPG
        config.live_jpg = True

        live_jpg_path = os.path.join(config.data_dir, 'live.jpg')

        live_view = LiveViewer(live_jpg_path,
                               image=True,
                               slideshow=False,
                               banner_text="Live")
        live_view.start()

    else:
        live_view = None

    # Initialize compression
    compressor = Compressor(night_data_dir,
                            sharedArray,
                            startTime,
                            sharedArray2,
                            startTime2,
                            config,
                            detector=detector)

    # Start buffered capture
    bc.startCapture()

    # Init and start the compression
    compressor.start()

    # Capture until Ctrl+C is pressed
    wait(duration, compressor)

    # If capture was manually stopped, end capture
    if STOP_CAPTURE:
        log.info('Ending capture...')

    # Stop the capture
    log.debug('Stopping capture...')
    bc.stopCapture()
    log.debug('Capture stopped')

    dropped_frames = bc.dropped_frames
    log.info('Total number of late or dropped frames: ' + str(dropped_frames))

    # Stop the compressor
    log.debug('Stopping compression...')
    detector = compressor.stop()

    # Free shared memory after the compressor is done
    try:
        log.debug('Freeing frame buffers...')
        del sharedArrayBase
        del sharedArray
        del sharedArrayBase2
        del sharedArray2

    except Exception as e:
        log.debug('Freeing frame buffers failed with error:' + repr(e))
        log.debug(repr(traceback.format_exception(*sys.exc_info())))

    log.debug('Compression stopped')

    if live_view is not None:

        # Stop the live viewer
        log.debug('Stopping live viewer...')

        live_view.stop()
        live_view.join()
        del live_view
        live_view = None

        log.debug('Live view stopped')

    # If detection should be performed
    if not nodetect:

        try:
            log.info('Finishing up the detection, ' + str(detector.input_queue.qsize()) \
                + ' files to process...')
        except:
            print(
                'Finishing up the detection... error when getting input queue size!'
            )

        # Reset the Ctrl+C to KeyboardInterrupt
        resetSIGINT()

        try:

            # If there are some more files to process, process them on more cores
            if detector.input_queue.qsize() > 0:

                # Let the detector use all cores, but leave 2 free
                available_cores = multiprocessing.cpu_count() - 2

                if available_cores > 1:

                    log.info('Running the detection on {:d} cores...'.format(
                        available_cores))

                    # Start the detector
                    detector.updateCoreNumber(cores=available_cores)

            log.info('Waiting for the detection to finish...')

            # Wait for the detector to finish and close it
            detector.closePool()

            log.info('Detection finished!')

        except KeyboardInterrupt:

            log.info('Ctrl + C pressed, exiting...')

            if upload_manager is not None:

                # Stop the upload manager
                if upload_manager.is_alive():
                    log.debug('Closing upload manager...')
                    upload_manager.stop()
                    del upload_manager

            # Terminate the detector
            if detector is not None:
                del detector

            sys.exit()

        # Set the Ctrl+C back to 'soft' program kill
        setSIGINT()

        ### SAVE DETECTIONS TO FILE

        log.info('Collecting results...')

        # Get the detection results from the queue
        detection_results = detector.getResults()

    else:

        detection_results = []

    # Save detection to disk and archive detection
    night_archive_dir, archive_name, _ = processNight(night_data_dir, config, \
        detection_results=detection_results, nodetect=nodetect)

    # Put the archive up for upload
    if upload_manager is not None:
        log.info('Adding file to upload list: ' + archive_name)
        upload_manager.addFiles([archive_name])
        log.info('File added...')

        # Delay the upload, if the delay is given
        upload_manager.delayNextUpload(delay=60 * config.upload_delay)

    # Delete detector backup files
    if detector is not None:
        detector.deleteBackupFiles()

    # If the capture was run for a limited time, run the upload right away
    if (duration is not None) and (upload_manager is not None):
        log.info('Uploading data before exiting...')
        upload_manager.uploadData()

    # Run the external script
    runExternalScript(night_data_dir, night_archive_dir, config)

    # If capture was manually stopped, end program
    if STOP_CAPTURE:

        log.info('Ending program')

        # Stop the upload manager
        if upload_manager is not None:
            if upload_manager.is_alive():
                upload_manager.stop()
                log.info('Closing upload manager...')

        sys.exit()

    return night_archive_dir
Beispiel #6
0
def extractStarsAndSave(config, ff_dir):
    """ Extract stars in the given folder and save the CALSTARS file. 
    
    Arguments:
        config: [config object] configuration object (loaded from the .config file)
        ff_dir: [str] Path to directory where FF files are.

    Return:
        star_list: [list] A list of [ff_name, star_data] entries, where star_data contains a list of 
            (column, row, amplitude, intensity, fwhm) values for every star.

    """

    time_start = time.time()

    # Load mask, dark, flat
    mask, dark, flat_struct = loadImageCalibration(ff_dir, config)

    extraction_list = []

    # Go through all files in the directory and add them to the detection list
    for ff_name in sorted(os.listdir(ff_dir)):

        # Check if the given file is a valid FF file
        if not FFfile.validFFName(ff_name):
            continue

        extraction_list.append(ff_name)

    # Run the QueuedPool for detection
    workpool = QueuedPool(extractStars, cores=-1, backup_dir=ff_dir)

    # Add jobs for the pool
    for ff_name in extraction_list:
        print('Adding for extraction:', ff_name)
        workpool.addJob([
            ff_dir, ff_name, config, None, None, None, None, flat_struct, dark,
            mask
        ])

    print('Starting pool...')

    # Start the detection
    workpool.startPool()

    print('Waiting for the detection to finish...')

    # Wait for the detector to finish and close it
    workpool.closePool()

    # Get extraction results
    star_list = []
    for result in workpool.getResults():

        ff_name, x2, y2, amplitude, intensity, fwhm_data = result

        # Skip if no stars were found
        if not x2:
            continue

        # Construct the table of the star parameters
        star_data = list(zip(x2, y2, amplitude, intensity, fwhm_data))

        # Add star info to the star list
        star_list.append([ff_name, star_data])

    dir_name = os.path.basename(os.path.abspath(ff_dir))
    if dir_name.startswith(config.stationID):
        prefix = dir_name
    else:
        prefix = "{:s}_{:s}".format(config.stationID, dir_name)

    # Generate the name for the CALSTARS file
    calstars_name = 'CALSTARS_' + prefix + '.txt'

    # Write detected stars to the CALSTARS file
    CALSTARS.writeCALSTARS(star_list, ff_dir, calstars_name, config.stationID,
                           config.height, config.width)

    # Delete QueudPool backed up files
    workpool.deleteBackupFiles()

    print('Total time taken: {:.2f} s'.format(time.time() - time_start))

    return star_list
Beispiel #7
0
def runCapture(config, duration=None, video_file=None, nodetect=False, detect_end=False, upload_manager=None):
    """ Run capture and compression for the given time.given

    Arguments:
        config: [config object] Configuration read from the .config file

    Keyword arguments:
        duration: [float] Time in seconds to capture. None by default.
        video_file: [str] Path to the video file, if it was given as the video source. None by default.
        nodetect: [bool] If True, detection will not be performed. False by defualt.
        detect_end: [bool] If True, detection will be performed at the end of the night, when capture 
            finishes. False by default.
        upload_manager: [UploadManager object] A handle to the UploadManager, which handles uploading files to
            the central server. None by default.

    Return:
        night_archive_dir: [str] Path to the archive folder of the processed night.

    """

    global STOP_CAPTURE


    # Create a directory for captured files
    night_data_dir_name = str(config.stationID) + '_' + datetime.datetime.utcnow().strftime('%Y%m%d_%H%M%S_%f')

    # Full path to the data directory
    night_data_dir = os.path.join(os.path.abspath(config.data_dir), config.captured_dir, night_data_dir_name)


    # Make a directory for the night
    mkdirP(night_data_dir)

    log.info('Data directory: ' + night_data_dir)


    # Get the platepar file
    platepar, platepar_path, platepar_fmt = getPlatepar(config, night_data_dir)


    log.info('Initializing frame buffers...')
    ### For some reason, the RPi 3 does not like memory chunks which size is the multipier of its L2
    ### cache size (512 kB). When such a memory chunk is provided, the compression becomes 10x slower
    ### then usual. We are applying a dirty fix here where we just add an extra image row and column
    ### if such a memory chunk will be created. The compression is performed, and the image is cropped
    ### back to its original dimensions.
    array_pad = 0

    # Check if the image dimensions are divisible by RPi3 L2 cache size and add padding
    if (256*config.width*config.height)%(512*1024) == 0:
        array_pad = 1


    # Init arrays for parallel compression on 2 cores
    sharedArrayBase = multiprocessing.Array(ctypes.c_uint8, 256*(config.width + array_pad)*(config.height + array_pad))
    sharedArray = np.ctypeslib.as_array(sharedArrayBase.get_obj())
    sharedArray = sharedArray.reshape(256, (config.height + array_pad), (config.width + array_pad))
    startTime = multiprocessing.Value('d', 0.0)
    
    sharedArrayBase2 = multiprocessing.Array(ctypes.c_uint8, 256*(config.width + array_pad)*(config.height + array_pad))
    sharedArray2 = np.ctypeslib.as_array(sharedArrayBase2.get_obj())
    sharedArray2 = sharedArray2.reshape(256, (config.height + array_pad), (config.width + array_pad))
    startTime2 = multiprocessing.Value('d', 0.0)

    log.info('Initializing frame buffers done!')


    # Check if the detection should be performed or not
    if nodetect:
        detector = None

    else:

        if detect_end:

            # Delay detection until the end of the night
            delay_detection = duration

        else:
            # Delay the detection for 2 minutes after capture start
            delay_detection = 120

        # Initialize the detector
        detector = QueuedPool(detectStarsAndMeteors, cores=1, log=log, delay_start=delay_detection, \
            backup_dir=night_data_dir)
        detector.startPool()

    
    # Initialize buffered capture
    bc = BufferedCapture(sharedArray, startTime, sharedArray2, startTime2, config, video_file=video_file)


    # Initialize the live image viewer
    if config.live_maxpixel_enable:
        live_view = LiveViewer(night_data_dir, slideshow=False, banner_text="Live")
        live_view.start()

    else:
        live_view = None

    
    # Initialize compression
    compressor = Compressor(night_data_dir, sharedArray, startTime, sharedArray2, startTime2, config, 
        detector=detector)

    
    # Start buffered capture
    bc.startCapture()

    # Init and start the compression
    compressor.start()

    
    # Capture until Ctrl+C is pressed
    wait(duration, compressor)
        
    # If capture was manually stopped, end capture
    if STOP_CAPTURE:
        log.info('Ending capture...')


    # Stop the capture
    log.debug('Stopping capture...')
    bc.stopCapture()
    log.debug('Capture stopped')

    dropped_frames = bc.dropped_frames
    log.info('Total number of late or dropped frames: ' + str(dropped_frames))


    # Stop the compressor
    log.debug('Stopping compression...')
    detector = compressor.stop()

    # Free shared memory after the compressor is done
    try:
        log.debug('Freeing frame buffers...')
        del sharedArrayBase
        del sharedArray
        del sharedArrayBase2
        del sharedArray2

    except Exception as e:
        log.debug('Freeing frame buffers failed with error:' + repr(e))
        log.debug(repr(traceback.format_exception(*sys.exc_info())))

    log.debug('Compression stopped')


    if live_view is not None:

        # Stop the live viewer
        log.debug('Stopping live viewer...')

        live_view.stop()
        live_view.join()
        del live_view
        live_view = None

        log.debug('Live view stopped')



    # If detection should be performed
    if not nodetect:

        try:
            log.info('Finishing up the detection, ' + str(detector.input_queue.qsize()) \
                + ' files to process...')
        except:
            print('Finishing up the detection... error when getting input queue size!')


        # Reset the Ctrl+C to KeyboardInterrupt
        resetSIGINT()


        try:

            # If there are some more files to process, process them on more cores
            if detector.input_queue.qsize() > 0:

                # Let the detector use all cores, but leave 2 free
                available_cores = multiprocessing.cpu_count() - 2


                if available_cores > 1:

                    log.info('Running the detection on {:d} cores...'.format(available_cores))

                    # Start the detector
                    detector.updateCoreNumber(cores=available_cores)


            log.info('Waiting for the detection to finish...')

            # Wait for the detector to finish and close it
            detector.closePool()

            log.info('Detection finished!')


        except KeyboardInterrupt:

            log.info('Ctrl + C pressed, exiting...')
                
            if upload_manager is not None:

                # Stop the upload manager
                if upload_manager.is_alive():
                    log.debug('Closing upload manager...')
                    upload_manager.stop()
                    del upload_manager
                    

            # Terminate the detector
            if detector is not None:
                del detector

            sys.exit()


        # Set the Ctrl+C back to 'soft' program kill
        setSIGINT()

        ### SAVE DETECTIONS TO FILE


        log.info('Collecting results...')

        # Get the detection results from the queue
        detection_results = detector.getResults()

    else:

        detection_results = []




    # Save detection to disk and archive detection    
    night_archive_dir, archive_name, _ = processNight(night_data_dir, config, \
        detection_results=detection_results, nodetect=nodetect)


    # Put the archive up for upload
    if upload_manager is not None:
        log.info('Adding file to upload list: ' + archive_name)
        upload_manager.addFiles([archive_name])
        log.info('File added...')


    # Delete detector backup files
    if detector is not None:
        detector.deleteBackupFiles()


    # If the capture was run for a limited time, run the upload right away
    if (duration is not None) and (upload_manager is not None):
        log.info('Uploading data before exiting...')
        upload_manager.uploadData()


    # Run the external script
    runExternalScript(night_data_dir, night_archive_dir, config)
    

    # If capture was manually stopped, end program
    if STOP_CAPTURE:

        log.info('Ending program')

        # Stop the upload manager
        if upload_manager is not None:
            if upload_manager.is_alive():
                upload_manager.stop()
                log.info('Closing upload manager...')

        sys.exit()


    return night_archive_dir
Beispiel #8
0
    extraction_list = []

    # Go through all files in the directory and add them to the detection list
    for ff_name in sorted(ff_list):

        # Check if the given file is a valid FF file
        if not FFfile.validFFName(ff_name):
            continue

        #print(ff_name)
        extraction_list.append(ff_name)

    star_list = []

    # Run the QueuedPool for detection
    workpool = QueuedPool(extractStars, cores=-1, backup_dir=ff_dir)

    # Add jobs for the pool
    for ff_name in extraction_list:
        print('Adding for extraction:', ff_name)
        workpool.addJob([
            ff_dir, ff_name, config, None, None, None, None, flat_struct, dark,
            mask
        ])

    print('Starting pool...')

    # Start the detection
    workpool.startPool()

    print('Waiting for the detection to finish...')
    # Go through all files in the directory and add them to the detection list
    for ff_name in sorted(ff_list):

        # Check if the given file is a valid FF file
        if not FFfile.validFFName(ff_name):
            continue

        #print(ff_name)
        extraction_list.append(ff_name)


    star_list = []

    # Run the QueuedPool for detection
    workpool = QueuedPool(extractStars, cores=-1, backup_dir=ff_dir)


    # Add jobs for the pool
    for ff_name in extraction_list:
        print('Adding for extraction:', ff_name)
        workpool.addJob([ff_dir, ff_name, config, flat_struct])


    print('Starting pool...')

    # Start the detection
    workpool.startPool()


    print('Waiting for the detection to finish...')