Exemplo n.º 1
0
def trigger_expose_bulb(camera,
                        bulb,
                        start_time=(monotonic_time() + 0.05),
                        meta=[],
                        download_timeout=3.5):
    end_time = start_time + bulb

    monotonic_alarm(start_time)
    gph_cmd(camera.bulb_begin)

    monotonic_alarm(end_time)
    gph_cmd(camera.bulb_end)

    o = gph_cmd('wait-event-and-download %is' % int(floor(download_timeout)),
                timeout=download_timeout)  # Should download the image

    s = filter(lambda x: x.startswith(_saving_file_response), o)
    if len(s) != 1:
        print("Couldn't retrieve file at the end of bulb exposure.")
        raise IOError

    filename = s[0][len(_saving_file_response):]

    exifd = ImageMetadata(filename)
    exifd.read()

    # Add a piece of debug info to exif header
    meta.append(('BulbHoldTime', bulb))
    meta.append(('TriggerStartTime', start_time))
    for (name, value) in meta:
        tag = 'Xmp.xmp.GPhotolapser.' + name
        exifd[tag] = XmpTag(tag, value=str(value))
    exifd.write()

    return filename
Exemplo n.º 2
0
    def run(self):
        session = Session()
        last_id = Log.get_last_id(session, simulate_time=self.simulate_time)
        if last_id is None:
            last_id = 0
        last_id -= BUFFER_LEN

        time_delta = None
        if self.simulate_time:
            time_delta = monotonic_time() - self.simulate_time

        while not self.closing:
            query = session.query(Log).filter(Log.id > last_id).filter(Log.interesting == True).order_by(Log.id)
            if time_delta is not None:
                log2 = session.query(Log).filter(Log.interesting == True).filter(Log.timestamp <= monotonic_time() - time_delta).order_by(Log.timestamp.desc()).first()
                query = query.filter(Log.id <= log2.id)
            new_data = query.all()
            if len(new_data) > 0:
                last_id = new_data[-1].id

            #print >> sys.stderr, "Read %d records" % (len(new_data))
            self.buffer += new_data
            self.buffer = self.buffer[-BUFFER_LEN:]
            session.expunge_all()
            session.rollback()
            time.sleep(SLEEP_TIME)

        session.rollback()
        session.close()
Exemplo n.º 3
0
def trigger_capture(camera,
                    shutter,
                    start_time=(monotonic_time() + 0.05),
                    meta=[],
                    download_timeout=5.0):
    monotonic_alarm(start_time)
    o = gph_cmd('capture-image-and-download',
                timeout=shutter + download_timeout)

    s = filter(lambda x: x.startswith(_saving_file_response), o)
    if len(s) != 1:
        print("Couldn't retrieve file at the end of capture.")
        raise IOError

    filename = s[0][len(_saving_file_response):]

    exifd = ImageMetadata(filename)
    exifd.read()

    # Add a piece of debug info to exif header
    meta.append(('TriggerStartTime', start_time))
    for (name, value) in meta:
        tag = 'Xmp.xmp.GPhotolapser.' + name
        exifd[tag] = XmpTag(tag, value=str(value))
    exifd.write()

    return filename
Exemplo n.º 4
0
def sighup_handler(signum, frame):
    if not args.cfgfile:
        print('No configuration file to reload (received SIGHUP).')

    cv = cfgs['cycle']

    args.cfgfile.seek(0)
    cfg_load(fp=args.cfgfile)

    if cv != cfgs['cycle']:
        print('Cycle length changed, updating reftime.')
        cycle_reftime = monotonic_time()
    print('Reloaded configurations.')
Exemplo n.º 5
0
	def _start_generator(self, condition):
		state = self._dbusservice['/State']
		remote_state = self._get_remote_switch_state()

		# This function will start the generator in the case generator not
		# already running. When differs, the RunningByCondition is updated
		if state == States.STOPPED or remote_state != state:
			self._dbusservice['/State'] = States.RUNNING
			self._update_remote_switch()
			self._starttime = monotonic_time.monotonic_time().to_seconds_double()
			self.log_info('Starting generator by %s condition' % condition)
		elif self._dbusservice['/RunningByCondition'] != condition:
			self.log_info('Generator previously running by %s condition is now running by %s condition'
						% (self._dbusservice['/RunningByCondition'], condition))

		self._dbusservice['/RunningByCondition'] = condition
		self._dbusservice['/RunningByConditionCode'] = RunningConditions.lookup(condition)
Exemplo n.º 6
0
    def run(self):
        first_time = True
        while not self.stop:

            # Wait for some time (but not the first time)
            if not first_time:
                time.sleep(REQUEST_SLEEP)
            else:
                first_time = False

            # Perform the HTTP request
            try:
                response = urllib2.urlopen(REQUEST_URL % {'last_timestamp': self.last_timestamp if self.last_timestamp is not None else 0.0},
                                           timeout=REQUEST_TIMEOUT)
                ref_time = monotonic_time()
            except urllib2.URLError:
                print "Request failed or timed out"
                continue

            # Load JSON data
            try:
                data = json.load(response)
            except:
                print "Could not parse JSON"
                continue

            # Compute timing data and push data in the queue
            try:
                queue_refill = len(self.frames) == 0
                self.qle.parse_frames(data['data'], ref_time, self.last_timestamp, queue_refill)
                for frame in data['data']:
                    self.frames.append(frame)
                if len(data['data']) > 0:
                    self.last_timestamp = data['data'][-1]['timestamp']
            except:
                print "Malformed JSON data"
                raise
                continue

            print "Last timestamp: %r" % (self.last_timestamp)
Exemplo n.º 7
0
    def actual_run(self):
        first_timestamp = None

        # If the input is from a camera, wait a bit to allow it to
        # initialize.
        if not self.from_file:
            time.sleep(1)

        start_time = time.time()
        start_monotonic_time = monotonic_time()
        self.last_stats = start_monotonic_time

        while self.running:
            # Retrieve a frame; frame grabbing and actual retrieval
            # are separated in the hope to take timings as precise as
            # possible.
            if self.from_file:
                timestamp = self.cap.get(cv.CV_CAP_PROP_POS_MSEC) / 1000.0
            retval = self.cap.grab()
            current_time = time.time()
            current_monotonic_time = monotonic_time()
            if not retval:
                self.queue.put(FrameInfo(False, None, None, None))
                return
            retval, frame = self.cap.retrieve()
            if not retval:
                self.queue.put(FrameInfo(False, None, None, None))
                return
            self.count += 1

            # Retrieve timing information: if frame is taken from a
            # file, we have to take timing for the file itself. If we
            # are rate limiting, then this already provides
            # synchronization and we can take the current time as
            # playback time. If not, we arbitrarily set playback time
            # following the timestamp.
            if self.from_file:
                if self.rate_limited:
                    playback_time = current_time
                else:
                    playback_time = start_time + timestamp

            # If the frame is taken from a camera, then there does not
            # appear to be any reliable way to have timing information
            # directly from the framework. We have to measure
            # everything, hoping that the operating system does not
            # introduce too much noise.
            else:
                playback_time = current_time
                timestamp = current_monotonic_time
                if first_timestamp is None:
                    first_timestamp = timestamp
                timestamp -= first_timestamp

            # Print statistics every now and then
            if current_monotonic_time > self.last_stats + self.stats_interval:
                self.last_stats = current_monotonic_time
                logger.info("Queue size: %d", self.queue.qsize())
                # TODO: we may want to add more

            # Push the frame to the queue
            if self.can_drop_frames and self.count % (self.queue.qsize() / self.base_size + 1) != 0:
                logger.debug("Dropped frame with timestamp %f and playback time %f", timestamp, playback_time)
            else:
                logger.debug("Produced frame with timestamp %f and playback time %f", timestamp, playback_time)
                self.queue.put(FrameInfo(True, timestamp, playback_time, frame), block=True)

            # Limit frame rate
            if self.rate_limited:
                target_timestamp = self.cap.get(cv.CV_CAP_PROP_POS_MSEC) / 1000.0
                target_time = start_monotonic_time + target_timestamp
                if current_monotonic_time < target_time:
                    wait_for = target_time - current_monotonic_time
                    time.sleep(wait_for)
                    logger.debug("Waited for %f seconds.", wait_for)
Exemplo n.º 8
0
    def pick_frame(self):
        # Measure how much time was elapsed
        new_time = monotonic_time()
        if self.real_time is None:
            self.real_time = new_time
        elapsed = new_time - self.real_time
        self.real_time = new_time

        # Estimate the target queue length, compare it with the actual
        # length and decide the time warping factor (FIXME: actual_len
        # is slightly wrong, it implicitly discards one frame)
        target_len = self.qle.get_length_estimate()
        if len(self.queue) > 0:
            actual_len = self.queue[-1]["timestamp"] - self.queue[0]["timestamp"]
        else:
            actual_len = 0.0
        warping = 1.0 + FramePicker.WARP_COEFF * float(actual_len - target_len) / target_len
        if warping < 1.0 - FramePicker.MAX_WARP_OFFSET:
            warping = 1.0 - FramePicker.MAX_WARP_OFFSET
        if warping > 1.0 + FramePicker.MAX_WARP_OFFSET:
            warping = 1.0 + FramePicker.MAX_WARP_OFFSET

        # If we have a lock, adjust the frame time and take the
        # appropriate frame
        if self.frame_time is not None:
            self.frame_time += warping * elapsed

        # If not, directly aim to the appropriate queue length
        else:
            # If we have enough buffer, we just cut the excess part;
            # if not, we better wait to receive some, so we leave
            # frame_time to None
            if actual_len >= target_len:
                self.frame_time = self.queue[-1]["timestamp"] - target_len

        # Discard frames until we arrive at frame_time: if we run out
        # of frames we temporarily suspend playback
        ret = None
        if self.frame_time is not None:
            while True:
                try:
                    frame = self.queue[0]
                except IndexError:
                    self.frame_time = None
                    print "Queue empty!"
                    break

                if self.frame_time < frame["timestamp"]:
                    ret = frame
                    break

                self.queue.popleft()

        # If the selected frame is too much in the future, make a time
        # jump (otherwise there will be a static frame sitting on the
        # screen for a lot of time)
        if ret is not None and ret["timestamp"] > self.frame_time + FramePicker.MAX_SKIP:
            self.frame_time = ret["timestamp"]

        print "Queue length: %f (%d), target length: %f, warping: %f, frame time: %s, frame timestamp: %s, total latency: %s" % (actual_len, len(self.queue), target_len, warping, self.frame_time, ret["timestamp"] if ret is not None else None, time.time() - ret["timestamp"] if ret is not None else None)

        return ret
Exemplo n.º 9
0
    def _evaluate_startstop_conditions(self):
        if self.get_error() != Errors.NONE:
            # First evaluation after an error, log it
            if self._errorstate == 0:
                self._errorstate = 1
                self._dbusservice['/State'] = States.ERROR
                self.log_info('Error: #%i - %s, stop controlling remote.' %
                              (self.get_error(),
                               Errors.get_description(self.get_error())))
        elif self._errorstate == 1:
            # Error cleared
            self._errorstate = 0
            self.log_info(
                'Error state cleared, taking control of remote switch.')

        # Conditions will be evaluated in this order
        conditions = [
            'soc', 'acload', 'batterycurrent', 'batteryvoltage',
            'inverterhightemp', 'inverteroverload', 'stoponac1'
        ]
        start = False
        startbycondition = None
        activecondition = self._dbusservice['/RunningByCondition']
        today = calendar.timegm(datetime.date.today().timetuple())
        self._timer_runnning = False
        values = self._get_updated_values()
        connection_lost = False

        self._check_quiet_hours()

        # New day, register it
        if self._last_counters_check < today and self._dbusservice[
                '/State'] == States.STOPPED:
            self._last_counters_check = today
            self._update_accumulated_time()

        # Update current and accumulated runtime.
        # By performance reasons, accumulated runtime is only updated
        # once per 60s. When the generator stops is also updated.
        if self._dbusservice['/State'] == States.RUNNING:
            mtime = monotonic_time.monotonic_time().to_seconds_double()
            if (mtime - self._starttime) - self._last_runtime_update >= 60:
                self._dbusservice['/Runtime'] = int(mtime - self._starttime)
                self._update_accumulated_time()
            elif self._last_runtime_update == 0:
                self._dbusservice['/Runtime'] = int(mtime - self._starttime)

        if self._evaluate_manual_start():
            startbycondition = 'manual'
            start = True

        # Conditions will only be evaluated if the autostart functionality is enabled
        if self._settings['autostart'] == 1:

            if self._evaluate_testrun_condition():
                startbycondition = 'testrun'
                start = True

            # Evaluate value conditions
            for condition in conditions:
                start = self._evaluate_condition(
                    self._condition_stack[condition],
                    values[condition]) or start
                startbycondition = condition if start and startbycondition is None else startbycondition
                # Connection lost is set to true if the number of retries of one or more enabled conditions
                # >= RETRIES_ON_ERROR
                if self._condition_stack[condition]['enabled']:
                    connection_lost = self._condition_stack[condition][
                        'retries'] >= self.RETRIES_ON_ERROR

            if self._condition_stack['stoponac1'][
                    'reached'] and startbycondition not in [
                        'manual', 'testrun'
                    ]:
                start = False
                if self._dbusservice[
                        '/State'] == States.RUNNING and activecondition not in [
                            'manual', 'testrun'
                        ]:
                    self.log_info('AC input 1 available, stopping')

            # If none condition is reached check if connection is lost and start/keep running the generator
            # depending on '/OnLossCommunication' setting
            if not start and connection_lost:
                # Start always
                if self._settings['onlosscommunication'] == 1:
                    start = True
                    startbycondition = 'lossofcommunication'
                # Keep running if generator already started
                if self._dbusservice[
                        '/State'] == States.RUNNING and self._settings[
                            'onlosscommunication'] == 2:
                    start = True
                    startbycondition = 'lossofcommunication'

        if not start and self._errorstate:
            self._stop_generator()

        if self._errorstate:
            return

        if start:
            self._start_generator(startbycondition)
        elif (self._dbusservice['/Runtime'] >=
              self._settings['minimumruntime'] * 60
              or activecondition == 'manual'):
            self._stop_generator()
Exemplo n.º 10
0
 def _get_monotonic_seconds(self):
     return monotonic_time.monotonic_time().to_seconds_double()
Exemplo n.º 11
0
 def toc(self, name):
     assert name in self.timings
     assert self.timings[name][1] is None
     self.timings[name][1] = monotonic_time()
     timings_logger.debug("Frame %d, %s took %f msecs", self.frame_num, name, 1000.0 * (self.timings[name][1] - self.timings[name][0]))
Exemplo n.º 12
0
 def tic(self, name):
     assert name not in self.timings
     self.timings[name] = [monotonic_time(), None]
Exemplo n.º 13
0
                )  # Release Full
        gph_cmd('wait-event-and-download 3s',
                timeout=4)  # Should download the image
    except IOError:
        print('IOError on gphoto2 --- camera probably unplugged.')
    gph_close()
    exit(0)


# Activate signal for terminating the daemon
signal.signal(signal.SIGTERM, daemon_end)

# Reference time for precise cycles. The offset of '+ 5' makes sure that the
# beginning of the first cycle starts sooner than cfgs['cycle'] seconds.
#
cycle_reftime = monotonic_time() + 5


def sighup_handler(signum, frame):
    if not args.cfgfile:
        print('No configuration file to reload (received SIGHUP).')

    cv = cfgs['cycle']

    args.cfgfile.seek(0)
    cfg_load(fp=args.cfgfile)

    if cv != cfgs['cycle']:
        print('Cycle length changed, updating reftime.')
        cycle_reftime = monotonic_time()
    print('Reloaded configurations.')