Example #1
0
def publisher(queue, event, port):
    """ Simple method that starts a publisher on the port 5555.

    :param multiprocessing.Queue queue: Queue of messages to be broadcasted
    :param multiprocessing.Event event: Event to stop the publisher
    :param int port: port in which to broadcast data
    .. TODO:: The publisher's port should be determined in a configuration file.
    """
    logger = get_logger(name=__name__)
    port_pub = port
    context = zmq.Context()
    socket = context.socket(zmq.PUB)
    socket.bind("tcp://*:%s" % port_pub)
    sleep(1)  # It takes a time for subscribers to propagate to the publisher.
    # Without this sleep the first packages may be lost
    logger.info('Bound socket on {}'.format(port_pub))
    while not event.is_set():
        while not queue.empty():
            data = queue.get(
            )  # Should be a dictionary {'topic': topic, 'data': data}
            logger.debug('Sending {} on {}'.format(data['data'],
                                                   data['topic']))
            socket.send_string(data['topic'], zmq.SNDMORE)
            socket.send_pyobj(data['data'])
            if general_stop_event.is_set():
                break
        sleep(0.05)  # Sleeps 5 milliseconds to be polite with the CPU

    sleep(1)  # Gives enough time to the subscribers to update their status
    socket.close()
    logger.info('Stopped the publisher')
Example #2
0
    def accumulate_links(self):
        self._accumulate_links_event.clear()
        socket = subscribe(self.publisher.port, 'particle_links')
        while not self._accumulate_links_event.is_set():
            if general_stop_event.is_set():
                break

            topic = socket.recv_string()
            data = socket.recv_pyobj()
            if self.locations.shape[0] == 0:
                self.locations = data[0]
            else:
                self.locations = self.locations.append(data[0])
Example #3
0
    def accumulate_links(self):
        """ Asynchronous method to store the links in this class. It looked like a good idea to keep this information in
        a single location, regardless of whether another process is listening on the topic. This in principle can be
        used to analyse data retrospectively.

        .. todo:: Still needs to clear the memory after several calls. Need to fit better in the architecture of the
            program
        """
        self._accumulate_links_event.clear()
        socket = subscribe(self.publisher.port, 'particle_links')
        while not self._accumulate_links_event.is_set():
            if general_stop_event.is_set():
                break

            topic = socket.recv_string()
            data = socket.recv_pyobj()
            if self.locations.shape[0] == 0:
                self.locations = data[0]
            else:
                self.locations = self.locations.append(data[0])
Example #4
0
    def calculate_histogram(self):
        self.calculating_histograms = True
        locations = self.locations.copy()
        t1 = tp.filter_stubs(locations, self.config['process']['min_traj_length'])
        # t2 = t1[((t1['mass'] > self.config['process']['min_mass']) & (t1['size'] < self.config['process']['max_size']) &
        #          (t1['ecc'] < self.config['process']['max_ecc']))]
        im = tp.imsd(t1, self.config['process']['um_pixel'], self.config['process']['fps'])
        self.histogram_values = []
        for pcle in im:
            if general_stop_event.is_set():
                break

            data = im[pcle]
            t = data.index[~np.isnan(data.values)]
            val = data.values[~np.isnan(data.values)]
            try:
                slope, intercept, r, p, stderr = stats.linregress(np.log(t), np.log(val))
                self.histogram_values.append([slope, intercept])
            except:
                pass
        self.calculating_histograms = False
        self.publisher.publish('histogram', self.histogram_values)
Example #5
0
    def calculate_histogram(self):
        """ Starts a new thread to calculate the histogram of fit-parameters based on the mean-squared displacement of
        individual particles. It publishes the data on topic `histogram`.

        .. warning:: This method is incredibly expensive. Since it runs on a thread it can block other pieces of code,
        especially the GUI, which runs on the same process.

        .. TODO:: The histogram loops over all the particles. It would be better to skeep particles for which there is
            no new data

        .. TODO:: Make this method able to run on a separate process. So far is not possible because it relies on data
            stored on the class itself (`self.locations`).
        """
        self.calculating_histograms = True
        locations = self.locations.copy()
        t1 = tp.filter_stubs(locations,
                             self.config['process']['min_traj_length'])
        t2 = t1[((t1['mass'] > self.config['process']['min_mass']) &
                 (t1['size'] < self.config['process']['max_size']) &
                 (t1['ecc'] < self.config['process']['max_ecc']))]
        im = tp.imsd(t2, self.config['process']['um_pixel'],
                     self.config['process']['fps'])
        self.histogram_values = []
        for pcle in im:
            if general_stop_event.is_set():
                break

            data = im[pcle]
            t = data.index[~np.isnan(data.values)]
            val = data.values[~np.isnan(data.values)]
            try:
                slope, intercept, r, p, stderr = stats.linregress(
                    np.log(t), np.log(val))
                self.histogram_values.append([slope, intercept])
            except:
                pass
        self.calculating_histograms = False
        self.publisher.publish('histogram', self.histogram_values)