def logs(dt): start_timestamp = dt.floor("day").to("UTC").timestamp end_timestamp = dt.ceil("day").to("UTC").timestamp conn = _connection() with closing(conn.cursor()) as cursor: cursor.arraysize = 5 cursor.execute( "SELECT dt, state, reason FROM log WHERE dt BETWEEN %s AND %s ORDER BY dt" % (start_timestamp, end_timestamp)) result = [] while True: rows = cursor.fetchmany() if not rows: break for row in rows: result.append( DateLog( arrow.get(row[0]).to(cfg.time_zone), row[1], row[2])) logger.debug(result[-1]) return result
def consistency_check(): conn = _connection() logger.info("checking database consistency") found_issues = False # duplicate start_time with closing(conn.cursor()) as cursor: cursor.execute( "SELECT start_time FROM active GROUP BY start_time HAVING COUNT(*) > 1" ) duplicate_start_times = [r[0] for r in cursor.fetchall()] if duplicate_start_times: found_issues = True logger.warning("found %s with duplicates", plural(len(duplicate_start_times), "time")) duplicate_row_ids = [] for start_time in duplicate_start_times: start_dt = arrow.get(start_time).to(cfg.time_zone) with closing(conn.cursor()) as cursor: cursor.execute( "SELECT ROWID, end_time FROM active WHERE start_time = %s" % start_time) prev_end_time = None inexact_duplicates = [] for row_id, end_time in cursor.fetchall(): end_dt = arrow.get(end_time).to(cfg.time_zone) dtr = DateRange(start_dt, end_dt) logger.debug(dtr.date_time_str) # delete exact duplicates if end_time == prev_end_time: duplicate_row_ids.append(row_id) else: inexact_duplicates.append((row_id, dtr)) prev_end_time = end_time # keep just the shortest range for r in inexact_duplicates: print(r[1].date_time_str) inexact_duplicates.sort(key=lambda r: r[1].minutes) inexact_duplicates.pop(0) duplicate_row_ids.extend([r[0] for r in inexact_duplicates]) if duplicate_row_ids: conn.execute("DELETE FROM active WHERE ROWID IN (%s)" % ",".join([str(i) for i in duplicate_row_ids])) conn.commit() logger.info("deleted %s", plural(len(duplicate_row_ids), "identical item")) if not found_issues: logger.info("no issues found")
def init_schema(conn=None): logger.debug("initialising database") if not conn: conn = _connection() conn.execute('PRAGMA encoding = "UTF-8"') conn.execute(""" CREATE TABLE IF NOT EXISTS active ( start_time INTEGER NOT NULL, end_time INTEGER NOT NULL ) """) conn.execute(""" CREATE TABLE IF NOT EXISTS edits ( dt INTEGER NOT NULL, minutes INTEGER NOT NULL, reason TEXT NOT NULL ) """) conn.execute(""" CREATE TABLE IF NOT EXISTS log ( dt INTEGER NOT NULL, state TEXT NOT NULL, reason TEXT NOT NULL ) """)
def _prepare_subprocess_args(command, kwargs): command = [str(el) for el in command] if kwargs.get("cwd", ".") != ".": logger.debug("$ cd '%s'", kwargs["cwd"]) logger.debug("$ %s", _command_shell_str(command)) kwargs.setdefault("encoding", "UTF-8") return command, kwargs
def check_output(command, debug_log_output=True, **kwargs): command, kwargs = _prepare_subprocess_args(command, kwargs) output = subprocess.check_output(command, **kwargs) if debug_log_output and logger.level == logging.DEBUG: for line in output.splitlines(): logger.debug("> %s", line) return output
def set_away(*, away=False, back=False, reason=None): assert away or back assert reason if away: logger.debug("setting away: %s", reason) database.log_state_change("away", reason) database.remove_empty_ranges() cfg.away_now_file.touch(exist_ok=True) cfg.is_away_file.touch(exist_ok=True) if cfg.on_away_file.exists(): logger.debug("executing: %s", cfg.on_away_file) subprocess.Popen([str(cfg.on_away_file)], cwd=cfg.dot_path) else: logger.debug("setting back: %s", reason) database.log_state_change("back", reason) with suppress(FileNotFoundError): cfg.is_away_file.unlink() with suppress(FileNotFoundError): cfg.away_now_file.unlink() if cfg.on_back_file.exists(): logger.debug("executing: %s", cfg.on_back_file) subprocess.Popen([str(cfg.on_back_file)], cwd=cfg.dot_path)
def _connection(): if not G.conn: filename = cfg.db_file logger.debug("database: %s", filename) empty_db = not filename.exists() G.conn = sqlite3.connect(filename) G.conn.execute("PRAGMA foreign_keys = off") G.conn.execute("PRAGMA temp_store = MEMORY") if logger.level == logging.DEBUG: G.conn.set_trace_callback(lambda sql: logger.debug("SQL> %s", sql)) if empty_db: init_schema(G.conn) return G.conn
def _load_settings(): # load settings, with fallback to default values try: logger.debug("loading %s", settings_file) with open(settings_file) as f: settings = json.load(f) except FileNotFoundError: logger.debug("settings not found") settings = {} settings.setdefault("work_week", 40) # hours settings.setdefault("auto_away_time", 30) # minutes return settings
def _install_state_scripts(): import shutil import cfg from harness import logger if cfg.on_away_file.exists() or cfg.on_back_file.exists(): return logger.info("creating on-away and on-back scripts in ~/.timetracker") scripts_path = cfg.src_path / "scripts" for src_file in scripts_path.glob("*"): dst_file = cfg.dot_path / src_file.name if not dst_file.exists(): logger.debug("%s -> %s", src_file, dst_file) shutil.copy(str(src_file), str(dst_file))
def daemon(): G.lock_fh = open(cfg.lock_file, "w") try: fcntl.lockf(G.lock_fh, fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError as e: if e.errno not in (errno.EACCES, errno.EAGAIN): raise raise Error("daemon already running") logger.info("daemon started (pid %s)", os.getpid()) atexit.register(exit_handler) cfg.pid_file.write_text("%s\n" % os.getpid()) while True: start_time = time.time() invoke(["update"]) delay = 60 - (time.time() - start_time) if delay > 0: logger.debug("sleeping for %ss", delay) time.sleep(delay)
def log_active(): conn = _connection() dt = arrow.now(tz=cfg.time_zone) dt = dt.floor("minute").to("UTC") # look for an existing range (fuzz to a few minutes) with closing(conn.cursor()) as cursor: cursor.execute( "SELECT start_time,end_time FROM active WHERE end_time BETWEEN ? AND ?", (dt.shift(minutes=-2).timestamp, dt.shift(minutes=1).timestamp), ) existing = cursor.fetchone() if existing: # update existing if logger.level == logging.DEBUG: start_dt = arrow.get(existing[0]) end_dt = arrow.get(existing[1]) logger.debug( "existing range: %s - %s (%s - %s)", start_dt, end_dt, start_dt.timestamp, end_dt.timestamp, ) if dt.timestamp != existing[1]: conn.execute( "UPDATE active SET end_time=? WHERE start_time=?", (dt.timestamp, existing[0]), ) conn.commit() else: # insert new range logger.debug("creating new range: %s (%s)", dt, dt.timestamp) conn.execute( "INSERT INTO active(start_time, end_time) VALUES (?, ?)", (dt.timestamp, dt.timestamp), ) conn.commit()
def edits(dt): ymd = dt.format("YYYYMMDD") conn = _connection() with closing(conn.cursor()) as cursor: cursor.arraysize = 5 cursor.execute( "SELECT minutes, reason FROM edits WHERE dt = %s ORDER BY ROWID" % ymd) result = [] while True: rows = cursor.fetchmany() if not rows: break for row in rows: result.append(DateEdit(ymd, row[0], row[1])) logger.debug(result[-1]) return result
def check_outputs(command, **kwargs): assert "stdout" not in kwargs assert "stderr" not in kwargs command, kwargs = _prepare_subprocess_args(command, kwargs) process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs) stdout, stderr = process.communicate() if logger.level == logging.DEBUG: for line in stdout.splitlines(): logger.debug("out> %s", line) for line in stderr.splitlines(): logger.debug("err> %s", line) retcode = process.poll() if retcode: stdout = "%s\n%s" % (stdout.rstrip(), stderr.rstrip()) raise subprocess.CalledProcessError(retcode, command, output=stdout) return stdout, stderr
import json from datetime import datetime from pathlib import Path from dateutil import tz from harness import logger # paths dot_path = Path("~/.timetracker").expanduser() if not dot_path.exists(): logger.debug("creating %s", dot_path) dot_path.mkdir() root_path = Path(__file__).parent.parent.resolve() src_path = root_path / "src" settings_file = dot_path / "settings.json" terminal_notifier = (src_path / "time-tracker.app/Contents/Resources/" "terminal-notifier.app/Contents/MacOS/terminal-notifier") def _load_settings(): # load settings, with fallback to default values try: logger.debug("loading %s", settings_file) with open(settings_file) as f: settings = json.load(f) except FileNotFoundError: logger.debug("settings not found") settings = {}
def active_ranges(start_dt, end_dt): conn = _connection() start_time = start_dt.to("UTC").timestamp end_time = end_dt.to("UTC").timestamp logger.debug("%s (%s) - %s (%s)", start_dt, start_time, end_dt, end_time) where = "(%s)" % ") OR (".join([ " AND ".join([ "(%s BETWEEN start_time AND end_time)" % start_time, "(%s BETWEEN start_time AND end_time)" % end_time, ]), " AND ".join([ "(%s BETWEEN start_time AND end_time)" % start_time, "(%s > end_time)" % end_time, ]), " AND ".join([ "(%s < start_time)" % start_time, "(%s BETWEEN start_time AND end_time)" % end_time, ]), " AND ".join( ["(start_time > %s)" % start_time, "(end_time < %s)" % end_time]), ]) with closing(conn.cursor()) as cursor: cursor.arraysize = 10 cursor.execute( "SELECT start_time, end_time FROM active WHERE %s ORDER BY start_time" % where) result = [] while True: rows = cursor.fetchmany() if not rows: break for row in rows: row_start_dt = arrow.get(row[0]).to(cfg.time_zone) row_end_dt = arrow.get(row[1]).to(cfg.time_zone) logger.debug( "%s - %s (%s)", row_start_dt.format("YYYY-MM-DD HH:mm"), row_end_dt.format("YYYY-MM-DD HH:mm"), hms((row_end_dt - row_start_dt).total_seconds()), ) # truncate if the range extends beyond requested start or end if row_start_dt < start_dt: logger.debug( "truncating start %s to %s", row_start_dt.format("YYYY-MM-DD HH:mm"), start_dt.format("YYYY-MM-DD HH:mm"), ) row_start_dt = start_dt.clone() if row_end_dt > end_dt: logger.debug( "trimming end %s to %s", row_end_dt.format("YYYY-MM-DD HH:mm"), end_dt.format("YYYY-MM-DD HH:mm"), ) row_end_dt = end_dt.clone() result.append(DateRange(row_start_dt, row_end_dt)) # split ranges which span midnight for dtr in result: while dtr.start_ymd != dtr.end_ymd: new_end = dtr.start_dt.clone().ceil("day") new_start = new_end.clone().shift(minutes=1) old_end = dtr.end_dt dtr.end_dt = new_end result.append(DateRange(new_start, old_end)) result.sort() return result
def update(): idle_time = state.idle_time() away_now_file = state.away_now_file() is_away = state.is_away() logger.debug("idle time: %ss", idle_time) logger.debug("is away: %s", is_away) # check if there's a request to manually set as away if away_now_file: logger.debug("found away_now file") # if we're already away we can delete the away-now file to enable auto-back if is_away: if idle_time >= cfg.auto_away_time * 60: logger.debug("removing away_now file") away_now_file.unlink() else: logger.debug("nothing to do") # otherwise mark as away else: ui.notify(away=True) state.set_away(away=True, reason="away-now requested") return # auto-back if is_away and idle_time <= cfg.idle_check_time: logger.debug("state: auto back") # must be back state.set_away(back=True, reason="idle for %s" % mmss(idle_time)) ui.notify(back=True) database.log_active() # auto-away elif idle_time >= cfg.auto_away_time * 60: logger.debug("state: auto away") if not is_away: state.set_away(away=True, reason="idle for %s" % mmss(idle_time)) ui.notify(away=True) # away-now file is stale if away_now_file: away_now_file.unlink() # not away else: logger.debug("state: not away") database.log_active()