Beispiel #1
0
def imcombine_data(datas, operation="nanmean"):

    # Allocate enough shared memory to load a single OTA from all files. The shared part is
    # important to make communication between the main and the slave processes possible.
    size_x, size_y = datas[0].shape[0], datas[0].shape[1]
    total_pixels = size_x*size_y*len(datas)
    # print "total pixel count",total_pixels
    shmem_buffer = SharedMemory(ctypes.c_float, (size_x, size_y, len(datas)))
    # multiprocessing.RawArray(ctypes.c_float, total_pixels) #size_x*size_y*len(datas))

    # Extract the shared memory buffer as numpy array to make things easier
    buffer = shmem_buffer.to_ndarray()
    # shmem_as_ndarray(shmem_buffer).reshape((size_x, size_y, len(datas)))

    # Set the full buffer to NaN
    buffer[:,:,:] = numpy.NaN

    # Now open all the other files, look for the right extension, and copy their image data to buffer
    for data_id in range(len(datas)):
        # stdout_write("copying %d" % (data_id))
        buffer[:,:,data_id] = datas[data_id][:,:]

    sizes = (size_x, size_y, len(datas))
    combined = imcombine_sharedmem_data(shmem_buffer, operation, sizes)
    shmem_buffer.free()

    del shmem_buffer
    return combined
Beispiel #2
0
    def __init__(self):
        self.ets2telemetry = None
        self.interval = 25e-3
        self.shared_memory = SharedMemory()

        self.threading_event = threading.Event()
        self.elapsed()
Beispiel #3
0
def imcombine_data(datas, operation="nanmean"):

    # Allocate enough shared memory to load a single OTA from all files. The shared part is
    # important to make communication between the main and the slave processes possible.
    size_x, size_y = datas[0].shape[0], datas[0].shape[1]
    total_pixels = size_x * size_y * len(datas)
    # print "total pixel count",total_pixels
    shmem_buffer = SharedMemory(ctypes.c_float, (size_x, size_y, len(datas)))
    # multiprocessing.RawArray(ctypes.c_float, total_pixels) #size_x*size_y*len(datas))

    # Extract the shared memory buffer as numpy array to make things easier
    buffer = shmem_buffer.to_ndarray()
    # shmem_as_ndarray(shmem_buffer).reshape((size_x, size_y, len(datas)))

    # Set the full buffer to NaN
    buffer[:, :, :] = numpy.NaN

    # Now open all the other files, look for the right extension, and copy their image data to buffer
    for data_id in range(len(datas)):
        # stdout_write("copying %d" % (data_id))
        buffer[:, :, data_id] = datas[data_id][:, :]

    sizes = (size_x, size_y, len(datas))
    combined = imcombine_sharedmem_data(shmem_buffer, operation, sizes)
    shmem_buffer.free()

    del shmem_buffer
    return combined
Beispiel #4
0
class Ets2SdkTelemetry:
    def __init__(self):
        self.ets2telemetry = None
        self.interval = 25e-3
        self.shared_memory = SharedMemory()

        self.threading_event = threading.Event()
        self.elapsed()

    def elapsed(self):
        raw_data = self.shared_memory.update()
        self.ets2telemetry = Ets2Telemetry(raw_data)

        if not self.threading_event.isSet():
            threading.Timer(self.interval, self.elapsed).start()
Beispiel #5
0
def imcombine_subprocess(extension, filelist, shape, operation, queue, verbose,
                         subtract=None, scale=None):

    logger = logging.getLogger("ImCombine")

    #
    # Allocate enough shared momory to hold all frames
    #
    size_x, size_y, n_frames = shape[0], shape[1], shape[2]
    shmem_buffer = SharedMemory(ctypes.c_float, (size_x, size_y, n_frames))
    # multiprocessing.RawArray(ctypes.c_float, size_x*size_y*n_frames) #len(filelist))

    # Extract the shared memory buffer as numpy array to make things easier
    # buffer = shmem_as_ndarray(shmem_buffer).reshape((size_x, size_y, n_frames))
    buffer = shmem_buffer.to_ndarray()

    # Set the full buffer to NaN
    buffer[:,:,:] = numpy.NaN

    # Now open all files, look for the right extension, and copy their image data to buffer
    cur_frame = 0
    for file_number in range(len(filelist)):
        filename = filelist[file_number]
        hdulist = pyfits.open(filename)
        for i, ext in enumerate(hdulist):
            if (not is_image_extension(ext)):
                continue
            fppos = ext.name #header['EXTNAME']

            if (not fppos == extension):
                continue

            # Get data for the right extension in this frame
            framedata = ext.data[:,:]
            
            # optionally, apply the scaling and subtraction correction
            if (subtract is not None):
                try:
                    framedata -= float(subtract)
                except ValueError:
                    if (subtract in hdulist[0].header):
                        # print "subtracting",hdulist[0].header[subtract]
                        framedata -= hdulist[0].header[subtract]                
            if (scale is not None):
                try:
                    framedata *= float(scale)
                except ValueError:
                    if (scale in hdulist[0].header):
                        framedata *= hdulist[0].header[scale]         

            # store the (corrected) image data for parallel processing
            buffer[:,:,cur_frame] = framedata
            cur_frame += 1
            break

        ext.data = None
        hdulist.close()
        del hdulist
        if (verbose): stdout_write("\n   Added file %s ..." % (filename))

    if (n_frames > 1):
        # stdout_write("\n   Starting imcombine for real ...")
        combined = imcombine_sharedmem_data(shmem_buffer, operation=operation, sizes=(size_x, size_y, n_frames))
    else:
        logger.debug("Only a single frame contributes to this OTA, skipping combine and copying input to output")
        combined = numpy.array(buffer[:,:,0])

    shmem_buffer.free()

    # put the imcombine'd data into the queue to return them to the main process
    queue.put(combined)

    # and kill this process, returning all its memory
    sys.exit(0)
Beispiel #6
0
def imcombine_sharedmem_data(shmem_buffer, operation, sizes):

    size_x, size_y, n_frames = sizes
    shmem_results = SharedMemory(ctypes.c_float, (size_x,size_y))
    # multiprocessing.RawArray(ctypes.c_float, size_x*size_y)

    logger = logging.getLogger("CombineMgr")

    #
    # Set up the parallel processing environment
    #
    queue = multiprocessing.JoinableQueue()
    return_queue = multiprocessing.Queue()

    # Now compute median/average/sum/etc
    # buffer = shmem_as_ndarray(shmem_buffer).reshape((size_x, size_y, n_frames))
    buffer = shmem_buffer.to_ndarray()
    for line in range(buffer.shape[0]):
        #print "Adding line",line,"to queue"
        queue.put(line)
        
    lines_done = numpy.zeros((buffer.shape[0]), dtype=numpy.bool)
    lines_read = 0

    #result_buffer = numpy.zeros(shape=(buffer.shape[0], buffer.shape[1]), dtype=numpy.float32)
    processes = []
    for i in range(number_cpus):
        worker_args = (queue, return_queue,
                       shmem_buffer, shmem_results,
                       size_x, size_y, n_frames, operation)
        p = multiprocessing.Process(target=parallel_compute, args=worker_args)
        p.start()
        processes.append(p)

    while (lines_read < buffer.shape[0] and numpy.sum(lines_done) < buffer.shape[0]):
        try:
            line = return_queue.get(timeout=5)
            lines_read += 1
            try:
                lines_done[line] = True
            except:
                pass
        except Queue.Empty:
            logger.error("Encountered timeout while combinging data")
            # something bad has happened to one of the workers
            # find one of the lines that has not been processed yet
            missing_lines = (numpy.arange(buffer.shape[0]))[~lines_done]
            logger.info("Re-queuing %d lines for processing" % (missing_lines.shape[0]))
            for line in missing_lines:
                queue.put(line)
        except:
            podi_logging.log_exception()

       
        
    # Tell all workers to shut down when no more data is left to work on
    logger.debug("telling all workers to shut down!")
    for i in range(number_cpus):
        logger.debug("telling all worker %d to shut down!" % (i))
        queue.put((None))

    # Once all command are sent out to the workers, join them to speed things up
    logger.debug("Terminating workers!")
    for p in processes:
        p.terminate()
        p.join(timeout=1)

    results = numpy.copy(shmem_results.to_ndarray()) #).reshape((size_x, size_y)))
    shmem_results.free()

    del shmem_results
    del queue
    del buffer

    return results
Beispiel #7
0
def imcombine_subprocess(extension,
                         filelist,
                         shape,
                         operation,
                         queue,
                         verbose,
                         subtract=None,
                         scale=None):

    logger = logging.getLogger("ImCombine")

    #
    # Allocate enough shared momory to hold all frames
    #
    size_x, size_y, n_frames = shape[0], shape[1], shape[2]
    shmem_buffer = SharedMemory(ctypes.c_float, (size_x, size_y, n_frames))
    # multiprocessing.RawArray(ctypes.c_float, size_x*size_y*n_frames) #len(filelist))

    # Extract the shared memory buffer as numpy array to make things easier
    # buffer = shmem_as_ndarray(shmem_buffer).reshape((size_x, size_y, n_frames))
    buffer = shmem_buffer.to_ndarray()

    # Set the full buffer to NaN
    buffer[:, :, :] = numpy.NaN

    # Now open all files, look for the right extension, and copy their image data to buffer
    cur_frame = 0
    for file_number in range(len(filelist)):
        filename = filelist[file_number]
        hdulist = pyfits.open(filename)
        for i, ext in enumerate(hdulist):
            if (not is_image_extension(ext)):
                continue
            fppos = ext.name  #header['EXTNAME']

            if (not fppos == extension):
                continue

            # Get data for the right extension in this frame
            framedata = ext.data[:, :]

            # optionally, apply the scaling and subtraction correction
            if (subtract is not None):
                try:
                    framedata -= float(subtract)
                except ValueError:
                    if (subtract in hdulist[0].header):
                        # print "subtracting",hdulist[0].header[subtract]
                        framedata -= hdulist[0].header[subtract]
            if (scale is not None):
                try:
                    framedata *= float(scale)
                except ValueError:
                    if (scale in hdulist[0].header):
                        framedata *= hdulist[0].header[scale]

            # store the (corrected) image data for parallel processing
            buffer[:, :, cur_frame] = framedata
            cur_frame += 1
            break

        ext.data = None
        hdulist.close()
        del hdulist
        if (verbose): stdout_write("\n   Added file %s ..." % (filename))

    if (n_frames > 1):
        # stdout_write("\n   Starting imcombine for real ...")
        combined = imcombine_sharedmem_data(shmem_buffer,
                                            operation=operation,
                                            sizes=(size_x, size_y, n_frames))
    else:
        logger.debug(
            "Only a single frame contributes to this OTA, skipping combine and copying input to output"
        )
        combined = numpy.array(buffer[:, :, 0])

    shmem_buffer.free()

    # put the imcombine'd data into the queue to return them to the main process
    queue.put(combined)

    # and kill this process, returning all its memory
    sys.exit(0)
Beispiel #8
0
def imcombine_sharedmem_data(shmem_buffer, operation, sizes):

    size_x, size_y, n_frames = sizes
    shmem_results = SharedMemory(ctypes.c_float, (size_x, size_y))
    # multiprocessing.RawArray(ctypes.c_float, size_x*size_y)

    logger = logging.getLogger("CombineMgr")

    #
    # Set up the parallel processing environment
    #
    queue = multiprocessing.JoinableQueue()
    return_queue = multiprocessing.Queue()

    # Now compute median/average/sum/etc
    # buffer = shmem_as_ndarray(shmem_buffer).reshape((size_x, size_y, n_frames))
    buffer = shmem_buffer.to_ndarray()
    for line in range(buffer.shape[0]):
        #print "Adding line",line,"to queue"
        queue.put(line)

    lines_done = numpy.zeros((buffer.shape[0]), dtype=numpy.bool)
    lines_read = 0

    #result_buffer = numpy.zeros(shape=(buffer.shape[0], buffer.shape[1]), dtype=numpy.float32)
    processes = []
    for i in range(number_cpus):
        worker_args = (queue, return_queue, shmem_buffer, shmem_results,
                       size_x, size_y, n_frames, operation)
        p = multiprocessing.Process(target=parallel_compute, args=worker_args)
        p.start()
        processes.append(p)

    while (lines_read < buffer.shape[0]
           and numpy.sum(lines_done) < buffer.shape[0]):
        try:
            line = return_queue.get(timeout=5)
            lines_read += 1
            try:
                lines_done[line] = True
            except:
                pass
        except Queue.Empty:
            logger.error("Encountered timeout while combinging data")
            # something bad has happened to one of the workers
            # find one of the lines that has not been processed yet
            missing_lines = (numpy.arange(buffer.shape[0]))[~lines_done]
            logger.info("Re-queuing %d lines for processing" %
                        (missing_lines.shape[0]))
            for line in missing_lines:
                queue.put(line)
        except:
            podi_logging.log_exception()

    # Tell all workers to shut down when no more data is left to work on
    logger.debug("telling all workers to shut down!")
    for i in range(number_cpus):
        logger.debug("telling all worker %d to shut down!" % (i))
        queue.put((None))

    # Once all command are sent out to the workers, join them to speed things up
    logger.debug("Terminating workers!")
    for p in processes:
        p.terminate()
        p.join(timeout=1)

    results = numpy.copy(
        shmem_results.to_ndarray())  #).reshape((size_x, size_y)))
    shmem_results.free()

    del shmem_results
    del queue
    del buffer

    return results