Пример #1
0
def savePickle(obj, dir_path, file_name):
    """ Dump the given object into a file using Python 'pickling'. The file can be loaded into Python
        ('unpickled') afterwards for further use.

    Arguments:
    	obj: [object] Object which will be pickled.
        dir_path: [str] Path of the directory where the pickle file will be stored.
        file_name: [str] Name of the file where the object will be stored.

    """

    mkdirP(dir_path)

    with open(os.path.join(dir_path, file_name), 'wb') as f:
        pickle.dump(obj, f, protocol=2)
Пример #2
0
def savePickle(obj, dir_path, file_name):
    """ Dump the given object into a file using Python 'pickling'. The file can be loaded into Python
        ('unpickled') afterwards for further use.

    Arguments:
    	obj: [object] Object which will be pickled.
        dir_path: [str] Path of the directory where the pickle file will be stored.
        file_name: [str] Name of the file where the object will be stored.

    """

    mkdirP(dir_path)

    with open(os.path.join(dir_path, file_name), 'wb') as f:
        pickle.dump(obj, f, protocol=2)
Пример #3
0
def initLogging(config, log_file_prefix=""):
    """ Initializes the logger. 
    
    Arguments:
        log_file_prefix: [str] String which will be prefixed to the log file. Empty string by default.

    """

    # Path to the directory with log files
    log_path = os.path.join(config.data_dir, config.log_dir)

    # Make directories
    mkdirP(config.data_dir)
    mkdirP(log_path)

    # Generate a file name for the log file
    log_file_name = log_file_prefix + "log_" + str(
        config.stationID) + "_" + datetime.datetime.utcnow().strftime(
            '%Y%m%d_%H%M%S.%f') + ".log"

    # Init logging
    log = logging.getLogger('logger')
    log.setLevel(logging.INFO)
    log.setLevel(logging.DEBUG)

    # Make a new log file each day
    handler = logging.handlers.TimedRotatingFileHandler(os.path.join(log_path, log_file_name), when='D', \
        interval=1)
    handler.setLevel(logging.INFO)
    handler.setLevel(logging.DEBUG)

    # Set the log formatting
    formatter = logging.Formatter(
        fmt=
        '%(asctime)s-%(levelname)s-%(module)s-line:%(lineno)d - %(message)s',
        datefmt='%Y/%m/%d %H:%M:%S')
    handler.setFormatter(formatter)
    log.addHandler(handler)

    # Stream all logs to stdout as well
    ch = logging.StreamHandler(sys.stdout)
    ch.setLevel(logging.DEBUG)
    formatter = logging.Formatter(
        fmt=
        '%(asctime)s-%(levelname)s-%(module)s-line:%(lineno)d - %(message)s',
        datefmt='%Y/%m/%d %H:%M:%S')
    ch.setFormatter(formatter)
    log.addHandler(ch)
Пример #4
0
def initLogging(config, log_file_prefix=""):
    """ Initializes the logger. 
    
    Arguments:
        log_file_prefix: [str] String which will be prefixed to the log file. Empty string by default.

    """

    # Path to the directory with log files
    log_path = os.path.join(config.data_dir, config.log_dir)

    # Make directories
    mkdirP(config.data_dir)
    mkdirP(log_path)

    # Generate a file name for the log file
    log_file_name = log_file_prefix + "log_" + datetime.datetime.utcnow().strftime('%Y%m%d_%H%M%S.%f') + ".log"
        
    # Init logging
    log = logging.getLogger('logger')
    log.setLevel(logging.INFO)
    log.setLevel(logging.DEBUG)

    # Make a new log file each day
    handler = logging.handlers.TimedRotatingFileHandler(os.path.join(log_path, log_file_name), when='D', \
        interval=1) 
    handler.setLevel(logging.INFO)
    handler.setLevel(logging.DEBUG)

    # Set the log formatting
    formatter = logging.Formatter(fmt='%(asctime)s-%(levelname)s-%(module)s-line:%(lineno)d - %(message)s', 
        datefmt='%Y/%m/%d %H:%M:%S')
    handler.setFormatter(formatter)
    log.addHandler(handler)

    # Stream all logs to stdout as well
    ch = logging.StreamHandler(sys.stdout)
    ch.setLevel(logging.DEBUG)
    formatter = logging.Formatter(fmt='%(asctime)s-%(levelname)s-%(module)s-line:%(lineno)d - %(message)s', 
        datefmt='%Y/%m/%d %H:%M:%S')
    ch.setFormatter(formatter)
    log.addHandler(ch)
Пример #5
0
    def saveQueue(self, overwrite=False):
        """ Save the list of file to upload to disk, for bookkeeping in case of a power failure. 
    
        Keyword arguments:
            overwrite: [bool] If True, the holding file will be overwritten. Otherwise (default), the entries
                that are not in the file will be added at the end of the file.
        """

        # Convert the queue to a list
        file_list = [file_name for file_name in self.file_queue.queue]

        # If overwrite is true, save the queue to the holding file completely
        if overwrite:

            # Make the data directory if it doesn't exist
            mkdirP(self.config.data_dir)

            # Create the queue file
            with open(self.upload_queue_file_path, 'w') as f:
                for file_name in file_list:
                    f.write(file_name + '\n')

        else:

            # Load the list from the file and make sure to write only the entries not already in the file

            # Get a list of entries in the holding file
            existing_list = []
            with open(self.upload_queue_file_path) as f:
                for file_name in f:
                    file_name = file_name.replace('\n', '').replace('\r', '')
                    existing_list.append(file_name)

            # Save to disk only those entires which are not already there
            with open(self.upload_queue_file_path, 'a') as f:
                for file_name in file_list:
                    if file_name not in existing_list:
                        f.write(file_name + '\n')
Пример #6
0
def runCapture(config,
               duration=None,
               video_file=None,
               nodetect=False,
               detect_end=False,
               upload_manager=None):
    """ Run capture and compression for the given time.given

    Arguments:
        config: [config object] Configuration read from the .config file

    Keyword arguments:
        duration: [float] Time in seconds to capture. None by default.
        video_file: [str] Path to the video file, if it was given as the video source. None by default.
        nodetect: [bool] If True, detection will not be performed. False by defualt.
        detect_end: [bool] If True, detection will be performed at the end of the night, when capture 
            finishes. False by default.
        upload_manager: [UploadManager object] A handle to the UploadManager, which handles uploading files to
            the central server. None by default.

    """

    global STOP_CAPTURE

    # Create a directory for captured files
    night_data_dir_name = str(
        config.stationID) + '_' + datetime.datetime.utcnow().strftime(
            '%Y%m%d_%H%M%S_%f')

    # Full path to the data directory
    night_data_dir = os.path.join(os.path.abspath(config.data_dir),
                                  config.captured_dir, night_data_dir_name)

    # Make a directory for the night
    mkdirP(night_data_dir)

    log.info('Data directory: ' + night_data_dir)

    # Load the default flat field image if it is available
    flat_struct = None

    if config.use_flat:

        # Check if the flat exists
        if os.path.exists(os.path.join(os.getcwd(), config.flat_file)):
            flat_struct = Image.loadFlat(os.getcwd(), config.flat_file)

            log.info('Loaded flat field image: ' +
                     os.path.join(os.getcwd(), config.flat_file))

    # Get the platepar file
    platepar, platepar_path, platepar_fmt = getPlatepar(config)

    log.info('Initializing frame buffers...')
    ### For some reason, the RPi 3 does not like memory chunks which size is the multipier of its L2
    ### cache size (512 kB). When such a memory chunk is provided, the compression becomes 10x slower
    ### then usual. We are applying a dirty fix here where we just add an extra image row and column
    ### if such a memory chunk will be created. The compression is performed, and the image is cropped
    ### back to its original dimensions.
    array_pad = 0

    # Check if the image dimensions are divisible by RPi3 L2 cache size and add padding
    if (256 * config.width * config.height) % (512 * 1024) == 0:
        array_pad = 1

    # Init arrays for parallel compression on 2 cores
    sharedArrayBase = multiprocessing.Array(
        ctypes.c_uint8,
        256 * (config.width + array_pad) * (config.height + array_pad))
    sharedArray = np.ctypeslib.as_array(sharedArrayBase.get_obj())
    sharedArray = sharedArray.reshape(256, (config.height + array_pad),
                                      (config.width + array_pad))
    startTime = multiprocessing.Value('d', 0.0)

    sharedArrayBase2 = multiprocessing.Array(
        ctypes.c_uint8,
        256 * (config.width + array_pad) * (config.height + array_pad))
    sharedArray2 = np.ctypeslib.as_array(sharedArrayBase2.get_obj())
    sharedArray2 = sharedArray2.reshape(256, (config.height + array_pad),
                                        (config.width + array_pad))
    startTime2 = multiprocessing.Value('d', 0.0)

    log.info('Initializing frame buffers done!')

    # Check if the detection should be performed or not
    if nodetect:
        detector = None

    else:

        if detect_end:

            # Delay detection until the end of the night
            delay_detection = duration

        else:
            # Delay the detection for 2 minutes after capture start
            delay_detection = 120

        # Initialize the detector
        detector = QueuedPool(detectStarsAndMeteors,
                              cores=1,
                              log=log,
                              delay_start=delay_detection)
        detector.startPool()

    # Initialize buffered capture
    bc = BufferedCapture(sharedArray,
                         startTime,
                         sharedArray2,
                         startTime2,
                         config,
                         video_file=video_file)

    # Initialize the live image viewer
    live_view = LiveViewer(window_name='Maxpixel')

    # Initialize compression
    compressor = Compressor(night_data_dir,
                            sharedArray,
                            startTime,
                            sharedArray2,
                            startTime2,
                            config,
                            detector=detector,
                            live_view=live_view,
                            flat_struct=flat_struct)

    # Start buffered capture
    bc.startCapture()

    # Start the compression
    compressor.start()

    # Capture until Ctrl+C is pressed
    wait(duration)

    # If capture was manually stopped, end capture
    if STOP_CAPTURE:
        log.info('Ending capture...')

    # Stop the capture
    log.debug('Stopping capture...')
    bc.stopCapture()
    log.debug('Capture stopped')

    dropped_frames = bc.dropped_frames
    log.info('Total number of dropped frames: ' + str(dropped_frames))

    # Stop the compressor
    log.debug('Stopping compression...')
    detector, live_view = compressor.stop()
    log.debug('Compression stopped')

    # Stop the live viewer
    log.debug('Stopping live viewer...')
    live_view.stop()
    del live_view
    log.debug('Live view stopped')

    # Init data lists
    star_list = []
    meteor_list = []
    ff_detected = []

    # If detection should be performed
    if not nodetect:

        log.info('Finishing up the detection, ' +
                 str(detector.input_queue.qsize()) + ' files to process...')

        # Reset the Ctrl+C to KeyboardInterrupt
        resetSIGINT()

        try:

            # If there are some more files to process, process them on more cores
            if detector.input_queue.qsize() > 0:

                # Let the detector use all cores, but leave 1 free
                available_cores = multiprocessing.cpu_count() - 1

                if available_cores > 1:

                    log.info('Running the detection on {:d} cores...'.format(
                        available_cores))

                    # Start the detector
                    detector.updateCoreNumber(cores=available_cores)

            log.info('Waiting for the detection to finish...')

            # Wait for the detector to finish and close it
            detector.closePool()

            log.info('Detection finished!')

        except KeyboardInterrupt:

            log.info('Ctrl + C pressed, exiting...')

            if upload_manager is not None:

                # Stop the upload manager
                if upload_manager.is_alive():
                    log.debug('Closing upload manager...')
                    upload_manager.stop()
                    del upload_manager

            # Terminate the detector
            if detector is not None:
                del detector

            sys.exit()

        # Set the Ctrl+C back to 'soft' program kill
        setSIGINT()

        ### SAVE DETECTIONS TO FILE

        log.info('Collecting results...')

        # Get the detection results from the queue
        detection_results = detector.getResults()

        # Remove all 'None' results, which were errors
        detection_results = [
            res for res in detection_results if res is not None
        ]

        # Count the number of detected meteors
        meteors_num = 0
        for _, _, meteor_data in detection_results:
            for meteor in meteor_data:
                meteors_num += 1

        log.info('TOTAL: ' + str(meteors_num) + ' detected meteors.')

        # Save the detections to a file
        for ff_name, star_data, meteor_data in detection_results:

            x2, y2, background, intensity = star_data

            # Skip if no stars were found
            if not x2:
                continue

            # Construct the table of the star parameters
            star_data = zip(x2, y2, background, intensity)

            # Add star info to the star list
            star_list.append([ff_name, star_data])

            # Handle the detected meteors
            meteor_No = 1
            for meteor in meteor_data:

                rho, theta, centroids = meteor

                # Append to the results list
                meteor_list.append([ff_name, meteor_No, rho, theta, centroids])
                meteor_No += 1

            # Add the FF file to the archive list if a meteor was detected on it
            if meteor_data:
                ff_detected.append(ff_name)

        # Generate the name for the CALSTARS file
        calstars_name = 'CALSTARS_' + "{:s}".format(str(config.stationID)) + '_' \
            + os.path.basename(night_data_dir) + '.txt'

        # Write detected stars to the CALSTARS file
        CALSTARS.writeCALSTARS(star_list, night_data_dir, calstars_name, config.stationID, config.height, \
            config.width)

        # Generate FTPdetectinfo file name
        ftpdetectinfo_name = 'FTPdetectinfo_' + os.path.basename(
            night_data_dir) + '.txt'

        # Write FTPdetectinfo file
        FTPdetectinfo.writeFTPdetectinfo(meteor_list, night_data_dir, ftpdetectinfo_name, night_data_dir, \
            config.stationID, config.fps)

        # Get the platepar file
        platepar, platepar_path, platepar_fmt = getPlatepar(config)

        # Run calibration check and auto astrometry refinement
        if platepar is not None:

            # Read in the CALSTARS file
            calstars_list = CALSTARS.readCALSTARS(night_data_dir,
                                                  calstars_name)

            # Run astrometry check and refinement
            platepar, fit_status = autoCheckFit(config, platepar,
                                                calstars_list)

            # If the fit was sucessful, apply the astrometry to detected meteors
            if fit_status:

                log.info('Astrometric calibration SUCCESSFUL!')

                # Save the refined platepar to the night directory and as default
                platepar.write(os.path.join(night_data_dir,
                                            config.platepar_name),
                               fmt=platepar_fmt)
                platepar.write(platepar_path, fmt=platepar_fmt)

            else:
                log.info(
                    'Astrometric calibration FAILED!, Using old platepar for calibration...'
                )

            # Calculate astrometry for meteor detections
            applyAstrometryFTPdetectinfo(night_data_dir, ftpdetectinfo_name,
                                         platepar_path)

    log.info('Plotting field sums...')

    # Plot field sums to a graph
    plotFieldsums(night_data_dir, config)

    # Archive all fieldsums to one archive
    archiveFieldsums(night_data_dir)

    # List for any extra files which will be copied to the night archive directory. Full paths have to be
    #   given
    extra_files = []

    log.info('Making a flat...')

    # Make a new flat field
    flat_img = makeFlat(night_data_dir, config)

    # If making flat was sucessfull, save it
    if flat_img is not None:

        # Save the flat in the root directory, to keep the operational flat updated
        scipy.misc.imsave(config.flat_file, flat_img)
        flat_path = os.path.join(os.getcwd(), config.flat_file)
        log.info('Flat saved to: ' + flat_path)

        # Copy the flat to the night's directory as well
        extra_files.append(flat_path)

    else:
        log.info('Making flat image FAILED!')

    ### Add extra files to archive

    # Add the platepar to the archive if it exists
    if os.path.exists(platepar_path):
        extra_files.append(platepar_path)

    # Add the config file to the archive too
    extra_files.append(os.path.join(os.getcwd(), '.config'))

    ### ###

    night_archive_dir = os.path.join(os.path.abspath(config.data_dir),
                                     config.archived_dir, night_data_dir_name)

    log.info('Archiving detections to ' + night_archive_dir)

    # Archive the detections
    archive_name = archiveDetections(night_data_dir, night_archive_dir, ff_detected, config, \
        extra_files=extra_files)

    # Put the archive up for upload
    if upload_manager is not None:
        log.info('Adding file on upload list: ' + archive_name)
        upload_manager.addFiles([archive_name])

    # If capture was manually stopped, end program
    if STOP_CAPTURE:

        log.info('Ending program')

        # Stop the upload manager
        if upload_manager is not None:
            if upload_manager.is_alive():
                upload_manager.stop()
                log.info('Closing upload manager...')

        sys.exit()
Пример #7
0
    ######

    # Initialize the logger
    initLogging()

    # Get the logger handle
    log = logging.getLogger("logger")

    log.info('Program start')

    # Change the Ctrl+C action to the special handle
    setSIGINT()

    # Make the data directories
    root_dir = os.path.abspath(config.data_dir)
    mkdirP(root_dir)
    mkdirP(os.path.join(root_dir, config.captured_dir))
    mkdirP(os.path.join(root_dir, config.archived_dir))

    # If the duration of capture was given, capture right away for a specified time
    if cml_args.duration:

        try:
            # Get the duration in seconds
            duration = float(cml_args.duration) * 60 * 60

        except:
            log.error('Given duration is not a proper number of hours!')

        log.info('Freeing up disk space...')
Пример #8
0
def generateMP4s(dir_path, ftpfile_name):
    t1 = datetime.datetime.utcnow()

    # Load the font for labeling
    try:
        font = ImageFont.truetype("/usr/share/fonts/dejavu/DejaVuSans.ttf", 18)
    except:
        font = ImageFont.load_default()

    print("Preparing files for the timelapse...")
    # load the ftpfile so we know which frames we want
    meteor_list = FTPdetectinfo.readFTPdetectinfo(dir_path, ftpfile_name)
    for meteor in meteor_list:
        ff_name, _, _, n_segments, _, _, _, _, _, _, _, \
            meteor_meas = meteor
        # determine which frames we want

        first_frame = int(meteor_meas[0][1]) - 30
        last_frame = first_frame + 60
        if first_frame < 0:
            first_frame = 0
        if (n_segments > 1):
            lastseg = int(n_segments) - 1
            last_frame = int(meteor_meas[lastseg][1]) + 30
        #if last_frame > 255 :
        #    last_frame = 255
        if last_frame < first_frame + 60:
            last_frame = first_frame + 60

        print(ff_name, ' frames ', first_frame, last_frame)

        # Read the FF file
        ff = readFF(dir_path, ff_name)

        # Skip the file if it could not be read
        if ff is None:
            continue

        # Create temporary directory
        dir_tmp_path = os.path.join(dir_path, "temp_img_dir")

        if os.path.exists(dir_tmp_path):
            shutil.rmtree(dir_tmp_path)
            print("Deleted directory : " + dir_tmp_path)

        mkdirP(dir_tmp_path)
        print("Created directory : " + dir_tmp_path)

        # extract the individual frames
        name_time_list = f2f.FFtoFrames(dir_path + '/' + ff_name, dir_tmp_path,
                                        'jpg', -1, first_frame, last_frame)

        # Get id cam from the file name
        # e.g.  FF499_20170626_020520_353_0005120.bin
        # or FF_CA0001_20170626_020520_353_0005120.fits

        file_split = ff_name.split('_')

        # Check the number of list elements, and the new fits format has one more underscore
        i = 0
        if len(file_split[0]) == 2:
            i = 1
        camid = file_split[i]

        font = cv2.FONT_HERSHEY_SIMPLEX

        # add datestamp to each frame
        for img_file_name, timestamp in name_time_list:
            img = cv2.imread(os.path.join(dir_tmp_path, img_file_name))

            # Draw text to image
            text = camid + " " + timestamp.strftime(
                "%Y-%m-%d %H:%M:%S") + " UTC"
            cv2.putText(img, text, (10, ff.nrows - 6), font, 0.4,
                        (255, 255, 255), 1, cv2.LINE_AA)

            # Save the labelled image to disk
            cv2.imwrite(os.path.join(dir_tmp_path, img_file_name), img,
                        [cv2.IMWRITE_JPEG_QUALITY, 100])

        ffbasename = os.path.splitext(ff_name)[0]
        mp4_path = ffbasename + ".mp4"
        temp_img_path = os.path.join(dir_tmp_path, ffbasename + "_%03d.jpg")

        # If running on Windows, use ffmpeg.exe
        if platform.system() == 'Windows':

            # ffmpeg.exe path
            root = os.path.dirname(__file__)
            ffmpeg_path = os.path.join(root, "ffmpeg.exe")
            # Construct the ecommand for ffmpeg
            com = ffmpeg_path + " -y -f image2 -pattern_type sequence -start_number " + str(
                first_frame) + " -i " + temp_img_path + " " + mp4_path
            print("Creating timelapse using ffmpeg...")
        else:
            # If avconv is not found, try using ffmpeg
            software_name = "avconv"
            print("Checking if avconv is available...")
            if os.system(software_name + " --help > /dev/null"):
                software_name = "ffmpeg"
                # Construct the ecommand for ffmpeg
                com = software_name + " -y -f image2 -pattern_type sequence -start_number " + str(
                    first_frame) + " -i " + temp_img_path + " " + mp4_path
                print("Creating timelapse using ffmpeg...")
            else:
                print("Creating timelapse using avconv...")
                com = "cd " + dir_path + ";" \
                    + software_name + " -v quiet -r 30 -y -start_number " + str(first_frame) + " -i " + temp_img_path \
                    + " -vcodec libx264 -pix_fmt yuv420p -crf 25 -movflags faststart -g 15 -vf \"hqdn3d=4:3:6:4.5,lutyuv=y=gammaval(0.97)\" " \
                    + mp4_path

        #print(com)
        subprocess.call(com, shell=True, cwd=dir_path)

        #Delete temporary directory and files inside
        if os.path.exists(dir_tmp_path):
            try:
                shutil.rmtree(dir_tmp_path)
            except:
                # may occasionally fail due to ffmpeg thread still terminating
                # so catch this and wait a bit
                time.sleep(2)
                shutil.rmtree(dir_tmp_path)

            print("Deleted temporary directory : " + dir_tmp_path)

    print("Total time:", datetime.datetime.utcnow() - t1)
Пример #9
0
def runCapture(config, duration=None, video_file=None, nodetect=False, detect_end=False, \
    upload_manager=None, resume_capture=False):
    """ Run capture and compression for the given time.given
    
    Arguments:
        config: [config object] Configuration read from the .config file.

    Keyword arguments:
        duration: [float] Time in seconds to capture. None by default.
        video_file: [str] Path to the video file, if it was given as the video source. None by default.
        nodetect: [bool] If True, detection will not be performed. False by defualt.
        detect_end: [bool] If True, detection will be performed at the end of the night, when capture
            finishes. False by default.
        upload_manager: [UploadManager object] A handle to the UploadManager, which handles uploading files to
            the central server. None by default.
        resume_capture: [bool] Resume capture in the last data directory in CapturedFiles.

    Return:
        night_archive_dir: [str] Path to the archive folder of the processed night.
    """

    global STOP_CAPTURE

    # Check if resuming capture to the last capture directory
    night_data_dir_name = None
    if resume_capture:

        log.info("Resuming capture in the last capture directory...")

        # Find the latest capture directory
        capturedfiles_path = os.path.join(os.path.abspath(config.data_dir),
                                          config.captured_dir)
        most_recent_dir_time = 0
        for dir_name in sorted(os.listdir(capturedfiles_path)):

            dir_path_check = os.path.join(capturedfiles_path, dir_name)

            # Check it's a directory
            if os.path.isdir(dir_path_check):

                # Check if it starts with the correct station code
                if dir_name.startswith(str(config.stationID)):

                    dir_mod_time = os.path.getmtime(dir_path_check)

                    # Check that it is the most recent directory
                    if (night_data_dir_name is None) or (dir_mod_time >
                                                         most_recent_dir_time):
                        night_data_dir_name = dir_name
                        night_data_dir = dir_path_check
                        most_recent_dir_time = dir_mod_time

        if night_data_dir_name is None:
            log.info(
                "Previous capture directory could not be found! Creating a new one..."
            )

        else:
            log.info("Previous capture directory found: {:s}".format(
                night_data_dir))

        # Resume run is finished now, reset resume flag
        cml_args.resume = False

    # Make a name for the capture data directory
    if night_data_dir_name is None:

        # Create a directory for captured files
        night_data_dir_name = str(config.stationID) + '_' \
            + datetime.datetime.utcnow().strftime('%Y%m%d_%H%M%S_%f')

        # Full path to the data directory
        night_data_dir = os.path.join(os.path.abspath(config.data_dir), config.captured_dir, \
            night_data_dir_name)

    # Wait before the capture starts if a time has been given
    if (not resume_capture) and (video_file is None):
        log.info("Waiting {:d} seconds before capture start...".format(
            int(config.capture_wait_seconds)))
        time.sleep(config.capture_wait_seconds)

    # Add a note about Patreon supporters
    print("################################################################")
    print("Thanks to our Patreon supporters in the 'Dinosaur Killer' class:")
    print("- Myron Valenta")
    print("https://www.patreon.com/globalmeteornetwork")
    print("\n\n\n" \
        + "       .:'       .:'        .:'       .:'  \n"\
        + "   _.::'     _.::'      _.::'     _.::'    \n"\
        + "  (_.'      (_.'       (_.'      (_.'      \n"\
        + "                         __                \n"\
        + "                        / _)               \n"\
        + "_\\/_          _/\\/\\/\\_/ /             _\\/_ \n"\
        + "/o\\         _|         /              //o\\ \n"\
        + " |         _|  (  | (  |                |  \n"\
        + "_|____    /__.-'|_|--|_|          ______|__\n")
    print("################################################################")

    # Make a directory for the night
    mkdirP(night_data_dir)

    log.info('Data directory: ' + night_data_dir)

    # Copy the used config file to the capture directory
    if os.path.isfile(config.config_file_name):
        try:
            shutil.copy2(config.config_file_name,
                         os.path.join(night_data_dir, ".config"))
        except:
            log.error("Cannot copy the config file to the capture directory!")

    # Get the platepar file
    platepar, platepar_path, platepar_fmt = getPlatepar(config, night_data_dir)

    # If the platepar is not none, set the FOV from it
    if platepar is not None:
        config.fov_w = platepar.fov_h
        config.fov_h = platepar.fov_v

    log.info('Initializing frame buffers...')
    ### For some reason, the RPi 3 does not like memory chunks which size is the multipier of its L2
    ### cache size (512 kB). When such a memory chunk is provided, the compression becomes 10x slower
    ### then usual. We are applying a dirty fix here where we just add an extra image row and column
    ### if such a memory chunk will be created. The compression is performed, and the image is cropped
    ### back to its original dimensions.
    array_pad = 0

    # Check if the image dimensions are divisible by RPi3 L2 cache size and add padding
    if (256 * config.width * config.height) % (512 * 1024) == 0:
        array_pad = 1

    # Init arrays for parallel compression on 2 cores
    sharedArrayBase = multiprocessing.Array(
        ctypes.c_uint8,
        256 * (config.width + array_pad) * (config.height + array_pad))
    sharedArray = np.ctypeslib.as_array(sharedArrayBase.get_obj())
    sharedArray = sharedArray.reshape(256, (config.height + array_pad),
                                      (config.width + array_pad))
    startTime = multiprocessing.Value('d', 0.0)

    sharedArrayBase2 = multiprocessing.Array(
        ctypes.c_uint8,
        256 * (config.width + array_pad) * (config.height + array_pad))
    sharedArray2 = np.ctypeslib.as_array(sharedArrayBase2.get_obj())
    sharedArray2 = sharedArray2.reshape(256, (config.height + array_pad),
                                        (config.width + array_pad))
    startTime2 = multiprocessing.Value('d', 0.0)

    log.info('Initializing frame buffers done!')

    # Check if the detection should be performed or not
    if nodetect:
        detector = None

    else:

        if detect_end:

            # Delay detection until the end of the night
            delay_detection = duration

        else:
            # Delay the detection for 2 minutes after capture start (helps stability)
            delay_detection = 120

        # Add an additional postprocessing delay
        delay_detection += config.postprocess_delay

        # Set a flag file to indicate that previous files are being loaded (if any)
        capture_resume_file_path = os.path.join(
            config.data_dir, config.capture_resume_flag_file)
        with open(capture_resume_file_path, 'w') as f:
            pass

        # Initialize the detector
        detector = QueuedPool(detectStarsAndMeteors, cores=1, log=log, delay_start=delay_detection, \
            backup_dir=night_data_dir)
        detector.startPool()

        # If the capture is being resumed into the directory, load all previously saved FF files
        if resume_capture:

            # Load all preocessed FF files
            for i, ff_name in enumerate(sorted(os.listdir(night_data_dir))):

                # Every 50 files loaded, update the flag file
                if i % 50 == 0:
                    with open(capture_resume_file_path, 'a') as f:
                        f.write("{:d}\n".format(i))

                # Check if the file is a valid FF files
                ff_path = os.path.join(night_data_dir, ff_name)
                if os.path.isfile(ff_path) and (str(
                        config.stationID) in ff_name) and validFFName(ff_name):

                    # Add the FF file to the detector
                    detector.addJob([night_data_dir, ff_name, config],
                                    wait_time=0.005)
                    log.info(
                        "Added existing FF files for detection: {:s}".format(
                            ff_name))

        # Remove the flag file
        if os.path.isfile(capture_resume_file_path):
            try:
                os.remove(capture_resume_file_path)
            except:
                log.error("There was an error during removing the capture resume flag file: " \
                    + capture_resume_file_path)

    # Initialize buffered capture
    bc = BufferedCapture(sharedArray,
                         startTime,
                         sharedArray2,
                         startTime2,
                         config,
                         video_file=video_file)

    # Initialize the live image viewer
    if config.live_maxpixel_enable:

        # Enable showing the live JPG
        config.live_jpg = True

        live_jpg_path = os.path.join(config.data_dir, 'live.jpg')

        live_view = LiveViewer(live_jpg_path,
                               image=True,
                               slideshow=False,
                               banner_text="Live")
        live_view.start()

    else:
        live_view = None

    # Initialize compression
    compressor = Compressor(night_data_dir,
                            sharedArray,
                            startTime,
                            sharedArray2,
                            startTime2,
                            config,
                            detector=detector)

    # Start buffered capture
    bc.startCapture()

    # Init and start the compression
    compressor.start()

    # Capture until Ctrl+C is pressed
    wait(duration, compressor)

    # If capture was manually stopped, end capture
    if STOP_CAPTURE:
        log.info('Ending capture...')

    # Stop the capture
    log.debug('Stopping capture...')
    bc.stopCapture()
    log.debug('Capture stopped')

    dropped_frames = bc.dropped_frames
    log.info('Total number of late or dropped frames: ' + str(dropped_frames))

    # Stop the compressor
    log.debug('Stopping compression...')
    detector = compressor.stop()

    # Free shared memory after the compressor is done
    try:
        log.debug('Freeing frame buffers...')
        del sharedArrayBase
        del sharedArray
        del sharedArrayBase2
        del sharedArray2

    except Exception as e:
        log.debug('Freeing frame buffers failed with error:' + repr(e))
        log.debug(repr(traceback.format_exception(*sys.exc_info())))

    log.debug('Compression stopped')

    if live_view is not None:

        # Stop the live viewer
        log.debug('Stopping live viewer...')

        live_view.stop()
        live_view.join()
        del live_view
        live_view = None

        log.debug('Live view stopped')

    # If detection should be performed
    if not nodetect:

        try:
            log.info('Finishing up the detection, ' + str(detector.input_queue.qsize()) \
                + ' files to process...')
        except:
            print(
                'Finishing up the detection... error when getting input queue size!'
            )

        # Reset the Ctrl+C to KeyboardInterrupt
        resetSIGINT()

        try:

            # If there are some more files to process, process them on more cores
            if detector.input_queue.qsize() > 0:

                # Let the detector use all cores, but leave 2 free
                available_cores = multiprocessing.cpu_count() - 2

                if available_cores > 1:

                    log.info('Running the detection on {:d} cores...'.format(
                        available_cores))

                    # Start the detector
                    detector.updateCoreNumber(cores=available_cores)

            log.info('Waiting for the detection to finish...')

            # Wait for the detector to finish and close it
            detector.closePool()

            log.info('Detection finished!')

        except KeyboardInterrupt:

            log.info('Ctrl + C pressed, exiting...')

            if upload_manager is not None:

                # Stop the upload manager
                if upload_manager.is_alive():
                    log.debug('Closing upload manager...')
                    upload_manager.stop()
                    del upload_manager

            # Terminate the detector
            if detector is not None:
                del detector

            sys.exit()

        # Set the Ctrl+C back to 'soft' program kill
        setSIGINT()

        ### SAVE DETECTIONS TO FILE

        log.info('Collecting results...')

        # Get the detection results from the queue
        detection_results = detector.getResults()

    else:

        detection_results = []

    # Save detection to disk and archive detection
    night_archive_dir, archive_name, _ = processNight(night_data_dir, config, \
        detection_results=detection_results, nodetect=nodetect)

    # Put the archive up for upload
    if upload_manager is not None:
        log.info('Adding file to upload list: ' + archive_name)
        upload_manager.addFiles([archive_name])
        log.info('File added...')

        # Delay the upload, if the delay is given
        upload_manager.delayNextUpload(delay=60 * config.upload_delay)

    # Delete detector backup files
    if detector is not None:
        detector.deleteBackupFiles()

    # If the capture was run for a limited time, run the upload right away
    if (duration is not None) and (upload_manager is not None):
        log.info('Uploading data before exiting...')
        upload_manager.uploadData()

    # Run the external script
    runExternalScript(night_data_dir, night_archive_dir, config)

    # If capture was manually stopped, end program
    if STOP_CAPTURE:

        log.info('Ending program')

        # Stop the upload manager
        if upload_manager is not None:
            if upload_manager.is_alive():
                upload_manager.stop()
                log.info('Closing upload manager...')

        sys.exit()

    return night_archive_dir
Пример #10
0
    try:
        nframes = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    except:
        try:
            nframes = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
        except:
            nframes = 10**6

    # Compute the number padding
    npad = int(math.log10(nframes)) + 1

    # PNG output dir
    out_dir = os.path.abspath(cml_args.output_dir[0])

    # Make a save directory
    mkdirP(out_dir)

    c = 0

    # Save all frames to disk
    while (cap.isOpened()):

        # Read a frame
        ret, frame = cap.read()

        # Break the loop if all frames were read
        if not ret:
            break

        # Convert a frame to grayscale
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
Пример #11
0
def FFtoFrames(file_path,
               out_dir,
               file_format,
               deinterlace_mode,
               first_frame=0,
               last_frame=255):
    #########################

    # Load the configuration file
    config = cr.parse(".config")

    # Read the deinterlace
    #   -1 - no deinterlace
    #    0 - odd first
    #    1 - even first

    if deinterlace_mode not in (-1, 0, 1):
        print('Unknown deinterlace mode:', deinterlace_mode)
        sys.exit()

    # Check if the file exists
    if not os.path.isfile(file_path):

        print('The file {:s} does not exist!'.format(file_path))
        sys.exit()

    # Check if the output directory exists, make it if it doesn't
    if not os.path.exists(out_dir):

        print('Making directory: out_dir')
        mkdirP(out_dir)

    # Open the FF file
    dir_path, file_name = os.path.split(file_path)
    ff = readFF(dir_path, file_name)

    # Take the FPS from the FF file, if available
    if hasattr(ff, 'fps'):
        fps = ff.fps

    # Take the FPS from the config file, if it was not given as an argument
    if fps is None:
        fps = config.fps

    # Try to read the number of frames from the FF file itself
    if ff.nframes > 0:
        nframes = ff.nframes

    else:
        nframes = 256

    # Construct a file name for saving
    if file_format == 'pngm':

        # If the METAL type PNG file is given, make the file name 'dump'
        file_name_saving = 'dump'

    else:

        file_name_saving = file_name.replace('.fits', '').replace('.bin', '')

    frame_name_time_list = []

    # Get the initial time of the FF file
    ff_dt = filenameToDatetime(file_name)

    # Go through all frames
    for i in range(first_frame, last_frame + 1):
        # Reconstruct individual frames

        frame = reconstructFrame(ff, i, avepixel=True)
        # Deinterlace the frame if necessary, odd first
        if deinterlace_mode == 0:

            frame_odd = deinterlaceOdd(frame)
            frame_name, frame_dt = saveFrame(frame_odd,
                                             i,
                                             out_dir,
                                             file_name_saving,
                                             file_format,
                                             ff_dt,
                                             fps,
                                             half_frame=0)
            frame_name_time_list.append([frame_name, frame_dt])

            frame_even = deinterlaceEven(frame)
            frame_name, frame_dt = saveFrame(frame_even,
                                             i,
                                             out_dir,
                                             file_name_saving,
                                             file_format,
                                             ff_dt,
                                             fps,
                                             half_frame=1)
            frame_name_time_list.append([frame_name, frame_dt])

        # Even first
        elif deinterlace_mode == 1:

            frame_even = deinterlaceEven(frame)
            frame_name, frame_dt = saveFrame(frame_even,
                                             i,
                                             out_dir,
                                             file_name_saving,
                                             file_format,
                                             ff_dt,
                                             fps,
                                             half_frame=0)
            frame_name_time_list.append([frame_name, frame_dt])

            frame_odd = deinterlaceOdd(frame)
            frame_name, frame_dt = saveFrame(frame_odd,
                                             i,
                                             out_dir,
                                             file_name_saving,
                                             file_format,
                                             ff_dt,
                                             fps,
                                             half_frame=1)
            frame_name_time_list.append([frame_name, frame_dt])

        # No deinterlace
        else:
            frame_name, frame_dt = saveFrame(frame, i - first_frame, out_dir,
                                             file_name_saving, file_format,
                                             ff_dt, fps)
            frame_name_time_list.append([frame_name, frame_dt])

    # If the frames are saved for METAL, the times have to be given in a separate file
    if file_format == 'pngm':

        with open(os.path.join(out_dir, 'frtime.txt'), 'w') as f:

            # Write all frames and times in a file
            for frame_name, frame_dt in frame_name_time_list:
                # 20180117:01:08:29.8342
                f.write('{:s} {:s}\n'.format(
                    frame_name, frame_dt.strftime("%Y%m%d:%H:%M:%S.%f")))

    return frame_name_time_list
Пример #12
0
def generateTimelapse(dir_path, nodel):

    t1 = datetime.datetime.utcnow()

    # Load the font for labeling
    try:
        font = ImageFont.truetype("/usr/share/fonts/dejavu/DejaVuSans.ttf", 18)
    except:
        font = ImageFont.load_default()

    # Create temporary directory
    dir_tmp_path = os.path.join(dir_path, "temp_img_dir")

    if os.path.exists(dir_tmp_path):
        shutil.rmtree(dir_tmp_path)
        print("Deleted directory : " + dir_tmp_path)

    mkdirP(dir_tmp_path)
    print("Created directory : " + dir_tmp_path)

    print("Preparing files for the timelapse...")
    c = 0

    ff_list = [
        ff_name for ff_name in sorted(os.listdir(dir_path))
        if validFFName(ff_name)
    ]

    for file_name in ff_list:

        # Read the FF file
        ff = readFF(dir_path, file_name)

        # Skip the file if it could not be read
        if ff is None:
            continue

        # Get the timestamp from the FF name
        timestamp = filenameToDatetime(file_name).strftime("%Y-%m-%d %H:%M:%S")

        # Get id cam from the file name
        # e.g.  FF499_20170626_020520_353_0005120.bin
        # or FF_CA0001_20170626_020520_353_0005120.fits

        file_split = file_name.split('_')

        # Check the number of list elements, and the new fits format has one more underscore
        i = 0
        if len(file_split[0]) == 2:
            i = 1
        camid = file_split[i]

        # Make a filename for the image, continuous count %04d
        img_file_name = 'temp_{:04d}.jpg'.format(c)

        img = ff.maxpixel

        # Draw text to image
        font = cv2.FONT_HERSHEY_SIMPLEX
        text = camid + " " + timestamp + " UTC"
        cv2.putText(img, text, (10, ff.nrows - 6), font, 0.4, (255, 255, 255),
                    1, cv2.LINE_AA)

        # Save the labelled image to disk
        cv2.imwrite(os.path.join(dir_tmp_path, img_file_name), img,
                    [cv2.IMWRITE_JPEG_QUALITY, 100])

        c = c + 1

        # Print elapsed time
        if c % 30 == 0:
            print("{:>5d}/{:>5d}, Elapsed: {:s}".format(c + 1, len(ff_list), \
                str(datetime.datetime.utcnow() - t1)), end="\r")
            sys.stdout.flush()

    # If running on Linux, use avconv
    if platform.system() == 'Linux':

        # If avconv is not found, try using ffmpeg. In case of using ffmpeg,
        # use parameter -nostdin to avoid it being stuck waiting for user input
        software_name = "avconv"
        nostdin = ""
        print("Checking if avconv is available...")
        if os.system(software_name + " --help > /dev/null"):
            software_name = "ffmpeg"
            nostdin = " -nostdin "

        # Construct the command for avconv
        mp4_path = os.path.join(dir_path, os.path.basename(dir_path) + ".mp4")
        temp_img_path = os.path.basename(
            dir_tmp_path) + os.sep + "temp_%04d.jpg"
        com = "cd " + dir_path + ";" \
            + software_name + nostdin + " -v quiet -r "+ str(fps) +" -y -i " + temp_img_path \
            + " -vcodec libx264 -pix_fmt yuv420p -crf 25 -movflags faststart -g 15 -vf \"hqdn3d=4:3:6:4.5,lutyuv=y=gammaval(0.77)\" " \
            + mp4_path

        print("Creating timelapse using {:s}...".format(software_name))
        print(com)
        subprocess.call([com], shell=True)

    # If running on Windows, use ffmpeg.exe
    elif platform.system() == 'Windows':

        # ffmpeg.exe path
        root = os.path.dirname(__file__)
        ffmpeg_path = os.path.join(root, "ffmpeg.exe")

        # Construct the ecommand for ffmpeg
        mp4_path = os.path.basename(dir_path) + ".mp4"
        temp_img_path = os.path.join(os.path.basename(dir_tmp_path),
                                     "temp_%04d.jpg")
        com = ffmpeg_path + " -v quiet -r " + str(
            fps
        ) + " -i " + temp_img_path + " -c:v libx264 -pix_fmt yuv420p -an -crf 25 -g 15 -vf \"hqdn3d=4:3:6:4.5,lutyuv=y=gammaval(0.77)\" -movflags faststart -y " + mp4_path

        print("Creating timelapse using ffmpeg...")
        print(com)
        subprocess.call(com, shell=True, cwd=dir_path)

    else:
        print(
            "generateTimelapse only works on Linux or Windows the video could not be encoded"
        )

    #Delete temporary directory and files inside
    if os.path.exists(dir_tmp_path) and not nodel:
        shutil.rmtree(dir_tmp_path)
        print("Deleted temporary directory : " + dir_tmp_path)

    print("Total time:", datetime.datetime.utcnow() - t1)
Пример #13
0
def runCapture(config, duration=None, video_file=None, nodetect=False, detect_end=False, upload_manager=None):
    """ Run capture and compression for the given time.given

    Arguments:
        config: [config object] Configuration read from the .config file

    Keyword arguments:
        duration: [float] Time in seconds to capture. None by default.
        video_file: [str] Path to the video file, if it was given as the video source. None by default.
        nodetect: [bool] If True, detection will not be performed. False by defualt.
        detect_end: [bool] If True, detection will be performed at the end of the night, when capture 
            finishes. False by default.
        upload_manager: [UploadManager object] A handle to the UploadManager, which handles uploading files to
            the central server. None by default.

    Return:
        night_archive_dir: [str] Path to the archive folder of the processed night.

    """

    global STOP_CAPTURE


    # Create a directory for captured files
    night_data_dir_name = str(config.stationID) + '_' + datetime.datetime.utcnow().strftime('%Y%m%d_%H%M%S_%f')

    # Full path to the data directory
    night_data_dir = os.path.join(os.path.abspath(config.data_dir), config.captured_dir, night_data_dir_name)


    # Make a directory for the night
    mkdirP(night_data_dir)

    log.info('Data directory: ' + night_data_dir)


    # Get the platepar file
    platepar, platepar_path, platepar_fmt = getPlatepar(config, night_data_dir)


    log.info('Initializing frame buffers...')
    ### For some reason, the RPi 3 does not like memory chunks which size is the multipier of its L2
    ### cache size (512 kB). When such a memory chunk is provided, the compression becomes 10x slower
    ### then usual. We are applying a dirty fix here where we just add an extra image row and column
    ### if such a memory chunk will be created. The compression is performed, and the image is cropped
    ### back to its original dimensions.
    array_pad = 0

    # Check if the image dimensions are divisible by RPi3 L2 cache size and add padding
    if (256*config.width*config.height)%(512*1024) == 0:
        array_pad = 1


    # Init arrays for parallel compression on 2 cores
    sharedArrayBase = multiprocessing.Array(ctypes.c_uint8, 256*(config.width + array_pad)*(config.height + array_pad))
    sharedArray = np.ctypeslib.as_array(sharedArrayBase.get_obj())
    sharedArray = sharedArray.reshape(256, (config.height + array_pad), (config.width + array_pad))
    startTime = multiprocessing.Value('d', 0.0)
    
    sharedArrayBase2 = multiprocessing.Array(ctypes.c_uint8, 256*(config.width + array_pad)*(config.height + array_pad))
    sharedArray2 = np.ctypeslib.as_array(sharedArrayBase2.get_obj())
    sharedArray2 = sharedArray2.reshape(256, (config.height + array_pad), (config.width + array_pad))
    startTime2 = multiprocessing.Value('d', 0.0)

    log.info('Initializing frame buffers done!')


    # Check if the detection should be performed or not
    if nodetect:
        detector = None

    else:

        if detect_end:

            # Delay detection until the end of the night
            delay_detection = duration

        else:
            # Delay the detection for 2 minutes after capture start
            delay_detection = 120

        # Initialize the detector
        detector = QueuedPool(detectStarsAndMeteors, cores=1, log=log, delay_start=delay_detection, \
            backup_dir=night_data_dir)
        detector.startPool()

    
    # Initialize buffered capture
    bc = BufferedCapture(sharedArray, startTime, sharedArray2, startTime2, config, video_file=video_file)


    # Initialize the live image viewer
    if config.live_maxpixel_enable:
        live_view = LiveViewer(night_data_dir, slideshow=False, banner_text="Live")
        live_view.start()

    else:
        live_view = None

    
    # Initialize compression
    compressor = Compressor(night_data_dir, sharedArray, startTime, sharedArray2, startTime2, config, 
        detector=detector)

    
    # Start buffered capture
    bc.startCapture()

    # Init and start the compression
    compressor.start()

    
    # Capture until Ctrl+C is pressed
    wait(duration, compressor)
        
    # If capture was manually stopped, end capture
    if STOP_CAPTURE:
        log.info('Ending capture...')


    # Stop the capture
    log.debug('Stopping capture...')
    bc.stopCapture()
    log.debug('Capture stopped')

    dropped_frames = bc.dropped_frames
    log.info('Total number of late or dropped frames: ' + str(dropped_frames))


    # Stop the compressor
    log.debug('Stopping compression...')
    detector = compressor.stop()

    # Free shared memory after the compressor is done
    try:
        log.debug('Freeing frame buffers...')
        del sharedArrayBase
        del sharedArray
        del sharedArrayBase2
        del sharedArray2

    except Exception as e:
        log.debug('Freeing frame buffers failed with error:' + repr(e))
        log.debug(repr(traceback.format_exception(*sys.exc_info())))

    log.debug('Compression stopped')


    if live_view is not None:

        # Stop the live viewer
        log.debug('Stopping live viewer...')

        live_view.stop()
        live_view.join()
        del live_view
        live_view = None

        log.debug('Live view stopped')



    # If detection should be performed
    if not nodetect:

        try:
            log.info('Finishing up the detection, ' + str(detector.input_queue.qsize()) \
                + ' files to process...')
        except:
            print('Finishing up the detection... error when getting input queue size!')


        # Reset the Ctrl+C to KeyboardInterrupt
        resetSIGINT()


        try:

            # If there are some more files to process, process them on more cores
            if detector.input_queue.qsize() > 0:

                # Let the detector use all cores, but leave 2 free
                available_cores = multiprocessing.cpu_count() - 2


                if available_cores > 1:

                    log.info('Running the detection on {:d} cores...'.format(available_cores))

                    # Start the detector
                    detector.updateCoreNumber(cores=available_cores)


            log.info('Waiting for the detection to finish...')

            # Wait for the detector to finish and close it
            detector.closePool()

            log.info('Detection finished!')


        except KeyboardInterrupt:

            log.info('Ctrl + C pressed, exiting...')
                
            if upload_manager is not None:

                # Stop the upload manager
                if upload_manager.is_alive():
                    log.debug('Closing upload manager...')
                    upload_manager.stop()
                    del upload_manager
                    

            # Terminate the detector
            if detector is not None:
                del detector

            sys.exit()


        # Set the Ctrl+C back to 'soft' program kill
        setSIGINT()

        ### SAVE DETECTIONS TO FILE


        log.info('Collecting results...')

        # Get the detection results from the queue
        detection_results = detector.getResults()

    else:

        detection_results = []




    # Save detection to disk and archive detection    
    night_archive_dir, archive_name, _ = processNight(night_data_dir, config, \
        detection_results=detection_results, nodetect=nodetect)


    # Put the archive up for upload
    if upload_manager is not None:
        log.info('Adding file to upload list: ' + archive_name)
        upload_manager.addFiles([archive_name])
        log.info('File added...')


    # Delete detector backup files
    if detector is not None:
        detector.deleteBackupFiles()


    # If the capture was run for a limited time, run the upload right away
    if (duration is not None) and (upload_manager is not None):
        log.info('Uploading data before exiting...')
        upload_manager.uploadData()


    # Run the external script
    runExternalScript(night_data_dir, night_archive_dir, config)
    

    # If capture was manually stopped, end program
    if STOP_CAPTURE:

        log.info('Ending program')

        # Stop the upload manager
        if upload_manager is not None:
            if upload_manager.is_alive():
                upload_manager.stop()
                log.info('Closing upload manager...')

        sys.exit()


    return night_archive_dir
Пример #14
0
        sys.exit()



    # Check if the file exists
    if not os.path.isfile(file_path):

        print('The file {:s} does not exist!'.format(file_path))
        sys.exit()


    # Check if the output directory exists, make it if it doesn't
    if not os.path.exists(out_dir):

        print('Making directory: out_dir')
        mkdirP(out_dir)



    # Open the FF file
    dir_path, file_name = os.path.split(file_path)
    ff = readFF(dir_path, file_name)




    # Take the FPS from the FF file, if available
    if hasattr(ff, 'fps'):
        fps = ff.fps

    # Take the FPS from the config file, if it was not given as an argument
Пример #15
0
    t1 = datetime.datetime.utcnow()

    # Load the font for labeling
    try:
        font = ImageFont.truetype("/usr/share/fonts/dejavu/DejaVuSans.ttf", 18)
    except:
        font = ImageFont.load_default()

    # Create temporary directory
    dir_tmp_path = os.path.join(dir_path, "temp_img_dir")

    if os.path.exists(dir_tmp_path):
        shutil.rmtree(dir_tmp_path)
        print("Deleted directory : " + dir_tmp_path)
		
    mkdirP(dir_tmp_path)
    print("Created directory : " + dir_tmp_path)
    
    print("Preparing files for the timelapse...")
    c = 0

    ff_list = [ff_name for ff_name in sorted(os.listdir(dir_path)) if validFFName(ff_name)]

    for file_name in ff_list:

        # Read the FF file
        ff = readFF(dir_path, file_name)

        # Skip the file if it could not be read
        if ff is None:
            continue