def enqueue(self, data, job_id=None, file_id=None, md5=None, bytes_total=None): """ Enqueue task with specified data. """ jsonified_data = json.dumps(data) with self.storage.transaction() as cursor: apsw_helpers.query(cursor, ''' INSERT INTO %s (created, data, job_id, file_id, md5, bytes_total) VALUES (datetime(:now, "unixepoch"), :data, :job_id, :file_id, :md5, :bytes_total) ''' % self.table_name, now=unix_timestamp(datetime.utcnow()), data=jsonified_data, job_id=job_id, file_id=file_id, md5=md5, bytes_total=bytes_total) # Return the number of rows we inserted. return 1
def ping(self): """ Notify the queue that this task is still active. """ if self.finished is not None: raise AlreadyFinished() with self.storage.cursor() as cursor: affected_row = apsw_helpers.get(cursor, ''' SELECT * from %s WHERE id = :task_id AND execution_id = :execution_id AND last_contact > datetime(:now, 'unixepoch', '-%s second') ''' % (self._queue.table_name, self._queue.execution_ttl), now=unix_timestamp(datetime.utcnow()), task_id=self.task_id, execution_id=self.execution_id) if not affected_row: raise TaskDoesNotExist() with self.storage.transaction() as cursor: apsw_helpers.query(cursor, ''' UPDATE %s SET last_contact=datetime(:now, 'unixepoch'), update_count=update_count + 1 WHERE id = :task_id AND execution_id = :execution_id AND last_contact > datetime(:now, 'unixepoch', '-%s second') ''' % (self._queue.table_name, self._queue.execution_ttl), now=unix_timestamp(datetime.utcnow()), task_id=self.task_id, execution_id=self.execution_id)
def bulk_finish(self, result='cancelled', extra_predicate=None): extra_predicate_sql, extra_predicate_args = ( self._build_extra_predicate(extra_predicate)) with self.storage.transaction() as cursor: now = unix_timestamp(datetime.utcnow()) affected_rows = apsw_helpers.query(cursor, ''' SELECT * from %s WHERE finished IS NULL %s ''' % (self.table_name, extra_predicate_sql), **extra_predicate_args) apsw_helpers.query(cursor, ''' UPDATE %s SET execution_id = 0, last_contact = datetime(:now, 'unixepoch'), update_count = update_count + 1, steps = '[]', started = datetime(:now, 'unixepoch'), finished = datetime(:now, 'unixepoch'), result = :result WHERE finished IS NULL %s ''' % (self.table_name, extra_predicate_sql), now=now, result=result, **extra_predicate_args) return len(affected_rows)
def _save(self, finished=None, steps=None, result=None, data=None): finished = finished if finished is not None else self.finished with self.storage.transaction() as cursor: apsw_helpers.query(cursor, ''' UPDATE %s SET last_contact=datetime(:now, 'unixepoch'), update_count=update_count + 1, steps=:steps, finished=datetime(:finished, 'unixepoch'), result=:result, bytes_downloaded=:bytes_downloaded, download_rate=:download_rate, data=:data WHERE id = :task_id AND execution_id = :execution_id AND last_contact > datetime(:now, 'unixepoch', '-%s second') ''' % (self._queue.table_name, self._queue.execution_ttl), now=unix_timestamp(datetime.utcnow()), task_id=self.task_id, execution_id=self.execution_id, steps=json.dumps(steps if steps is not None else self.steps), finished=unix_timestamp(finished) if finished else None, result=result if result is not None else self.result, bytes_downloaded=self.bytes_downloaded, download_rate=self.download_rate, data=json.dumps(data if data is not None else self.data)) affected_row = apsw_helpers.get(cursor, ''' SELECT * from %s WHERE id = :task_id AND execution_id = :execution_id AND last_contact > datetime(:now, 'unixepoch', '-%s second') ''' % (self._queue.table_name, self._queue.execution_ttl), now=unix_timestamp(datetime.utcnow()), task_id=self.task_id, execution_id=self.execution_id) if not affected_row: raise TaskDoesNotExist() else: if steps is not None: self.steps = steps if finished is not None: self.finished = finished if result is not None: self.result = result if data is not None: self.data = data
def _dequeue_task(self, extra_predicate=None): execution_id = uuid.uuid1().hex extra_predicate_sql, extra_predicate_args = ( self._build_extra_predicate(extra_predicate)) task_id = None with self.storage.transaction() as cursor: while task_id is None: possible_tasks = self._query_queued( cursor, 'id, created, data', limit=5, extra_predicate=extra_predicate) if not possible_tasks: # nothing to dequeue return None for possible_task in possible_tasks: # attempt to claim the task now = unix_timestamp(datetime.utcnow()) apsw_helpers.query(cursor, ''' UPDATE %s SET execution_id = :execution_id, last_contact = datetime(:now, 'unixepoch'), update_count = update_count + 1, started = datetime(:now, 'unixepoch'), steps = '[]' WHERE id = :task_id AND finished IS NULL AND ( execution_id IS NULL OR last_contact <= datetime(:now, 'unixepoch', '-%s second') ) %s ''' % (self.table_name, self.execution_ttl, extra_predicate_sql), now=now, execution_id=execution_id, task_id=possible_task.id, **extra_predicate_args) task_id = possible_task.id break return self.TaskHandlerClass(execution_id=execution_id, task_id=task_id, queue=self)
def run(self): self.logger = log.get_logger('CancelJob') self.tasks = Tasks() rows_affected = 0 if self.options.multiple: rows_affected = self.tasks.bulk_finish(extra_predicate=("job_id LIKE :job_id", { 'job_id': self.options.job_id + '%%' })) else: loader_storage = LoaderStorage() with loader_storage.transaction() as cursor: jobs = apsw_helpers.query(cursor, ''' SELECT id FROM jobs WHERE id LIKE :job_id ''', job_id=self.options.job_id + '%') if len(jobs) > 1: print len(jobs), 'jobs match this job ID:' print '\n'.join([ row.id for row in jobs ]) print 'Please use a more specific prefix or specify the `--multiple` flag if you' print 'would like to cancel more than one job.' sys.exit(1) elif len(jobs) == 0: print '0 jobs match this job ID.' sys.exit(1) else: rows_affected = self.tasks.bulk_finish(extra_predicate=("job_id = :job_id", { 'job_id': jobs[0].id })) job_suffix = '(s)' if self.options.multiple else '' task_suffix = 's' if not rows_affected == 1 else '' print CANCEL_JOB_MESSAGE % (job_suffix, self.options.job_id, rows_affected, task_suffix)
def ready(self): """ Returns True if the tables have been setup, False otherwise """ with self.storage.cursor() as cursor: rows = apsw_helpers.query( cursor, 'SELECT name FROM sqlite_master WHERE type = "table"') tables = [row.name for row in rows] return all([table_name in tables for table_name in self._tables])
def check_bootstrapped(): loader_storage = storage.LoaderStorage() with loader_storage.cursor() as cursor: rows = apsw_helpers.query( cursor, 'SELECT name FROM sqlite_master WHERE type = "table"') tables = [row.name for row in rows] return all([model in tables for model in MODELS.keys()])
def _query_queued(self, cursor, projection, limit=None, extra_predicate=None): extra_predicate_sql, extra_predicate_args = ( self._build_extra_predicate(extra_predicate)) result = apsw_helpers.query( cursor, ''' SELECT %s FROM %s WHERE finished IS NULL AND ( execution_id IS NULL OR last_contact <= datetime(:now, 'unixepoch', '-%s second') ) %s ORDER BY created ASC LIMIT :limit ''' % (projection, self.table_name, self.execution_ttl, extra_predicate_sql), now=unix_timestamp(datetime.utcnow()), limit=sys.maxsize if limit is None else limit, **extra_predicate_args) return result
def requeue(self): if self._running_steps() != 0: raise StepRunning() if self.finished is not None: raise AlreadyFinished() data = copy.deepcopy(self.data) self.bytes_downloaded = None self.download_rate = None data.pop('time_left', None) with self._queue.storage.transaction() as cursor: affected_row = apsw_helpers.get(cursor, ''' SELECT * from %s WHERE id = :task_id AND execution_id = :execution_id AND last_contact > datetime(:now, 'unixepoch', '-%s second') ''' % (self._queue.table_name, self._queue.execution_ttl), now=unix_timestamp(datetime.utcnow()), task_id=self.task_id, execution_id=self.execution_id) if not affected_row: raise TaskDoesNotExist() apsw_helpers.query(cursor, ''' UPDATE %s SET last_contact=NULL, update_count=update_count + 1, started=NULL, steps=NULL, execution_id=NULL, finished=NULL, data=:data, result=NULL WHERE id = :task_id AND execution_id = :execution_id AND last_contact > datetime(:now, 'unixepoch', '-%s second') ''' % (self._queue.table_name, self._queue.execution_ttl), data=json.dumps(data), now=unix_timestamp(datetime.utcnow()), task_id=self.task_id, execution_id=self.execution_id)
def online_servers(self): with self.storage.cursor() as cursor: servers = apsw_helpers.query(cursor, ''' SELECT pid from servers WHERE last_contact >= DATETIME('now', '-%s second') ORDER BY last_contact DESC LIMIT 1 ''' % (SERVER_TTL,)) return servers
def bulk_finish(self, result='cancelled', extra_predicate=None): extra_predicate_sql, extra_predicate_args = ( self._build_extra_predicate(extra_predicate)) with self.storage.transaction() as cursor: now = unix_timestamp(datetime.utcnow()) affected_rows = apsw_helpers.query(cursor, ''' SELECT * from %s WHERE finished IS NULL AND ( execution_id IS NULL OR last_contact <= datetime(:now, 'unixepoch', '-%s second') ) %s ''' % (self.table_name, self.execution_ttl, extra_predicate_sql), now=now, **extra_predicate_args) apsw_helpers.query(cursor, ''' UPDATE %s SET execution_id = 0, last_contact = datetime(:now, 'unixepoch'), update_count = update_count + 1, steps = '[]', started = datetime(:now, 'unixepoch'), finished = datetime(:now, 'unixepoch'), result = :result WHERE finished IS NULL AND ( execution_id IS NULL OR last_contact <= datetime(:now, 'unixepoch', '-%s second') ) %s ''' % (self.table_name, self.execution_ttl, extra_predicate_sql), now=now, result=result, **extra_predicate_args) return len(affected_rows)
def _dequeue_task(self, extra_predicate=None): execution_id = uuid.uuid1().hex extra_predicate_sql, extra_predicate_args = ( self._build_extra_predicate(extra_predicate)) task_id = None with self.storage.transaction() as cursor: while task_id is None: possible_tasks = self._query_queued(cursor, 'id, created, data', limit=5, extra_predicate=extra_predicate) if not possible_tasks: # nothing to dequeue return None for possible_task in possible_tasks: # attempt to claim the task now = unix_timestamp(datetime.utcnow()) apsw_helpers.query(cursor, ''' UPDATE %s SET execution_id = :execution_id, last_contact = datetime(:now, 'unixepoch'), update_count = update_count + 1, started = datetime(:now, 'unixepoch'), steps = '[]' WHERE id = :task_id AND finished IS NULL AND ( execution_id IS NULL OR last_contact <= datetime(:now, 'unixepoch', '-%s second') ) %s ''' % (self.table_name, self.execution_ttl, extra_predicate_sql), now=now, execution_id=execution_id, task_id=possible_task.id, **extra_predicate_args) task_id = possible_task.id break return self.TaskHandlerClass(execution_id=execution_id, task_id=task_id, queue=self)
def requeue(self): if self._running_steps() != 0: raise StepRunning() if self.finished is not None: raise AlreadyFinished() with self.storage.cursor() as cursor: affected_row = apsw_helpers.get(cursor, ''' SELECT * from %s WHERE id = :task_id AND execution_id = :execution_id AND last_contact > datetime(:now, 'unixepoch', '-%s second') ''' % (self._queue.table_name, self._queue.execution_ttl), now=unix_timestamp(datetime.utcnow()), task_id=self.task_id, execution_id=self.execution_id) if affected_row is None: raise TaskDoesNotExist() with self.storage.transaction() as cursor: apsw_helpers.query(cursor, ''' UPDATE %s SET last_contact=NULL, update_count=update_count + 1, started=NULL, steps=NULL, execution_id=NULL, finished=NULL, result=NULL WHERE id = :task_id ''' % self._queue.table_name, task_id=self.task_id)
def query_target(self, host, port, database, table): with self.storage.cursor() as cursor: result = apsw_helpers.query(cursor, 'SELECT id, spec FROM jobs') ret = [] for job in result: spec = json.loads(job.spec) if spec['connection']['host'] != host: continue if spec['connection']['port'] != port: continue if spec['target']['database'] != database: continue if spec['target']['table'] != table: continue ret.append(Job(spec, job.id)) return ret
def _query_queued(self, cursor, projection, limit=None, extra_predicate=None): extra_predicate_sql, extra_predicate_args = ( self._build_extra_predicate(extra_predicate)) result = apsw_helpers.query(cursor, ''' SELECT %s FROM %s WHERE finished IS NULL AND ( execution_id IS NULL OR last_contact <= datetime(:now, 'unixepoch', '-%s second') ) %s ORDER BY created ASC LIMIT :limit ''' % (projection, self.table_name, self.execution_ttl, extra_predicate_sql), now=unix_timestamp(datetime.utcnow()), limit=sys.maxsize if limit is None else limit, **extra_predicate_args) return result
def get_tasks_in_state(self, state, extra_predicate=None): extra_predicate_sql, extra_predicate_args = ( self._build_extra_predicate(extra_predicate)) query_params = api.shared.TaskState.projection_params() if len(state) == 1: state_list = "('" + str(state[0]) + "')" else: state_list = str(tuple(str(v) for v in state )) query_params.update(extra_predicate_args) with self.storage.cursor() as cursor: rows = apsw_helpers.query(cursor, ''' SELECT * FROM %s WHERE %s IN %s %s ORDER BY id ASC ''' % (self.table_name, api.shared.TaskState.PROJECTION, state_list, extra_predicate_sql), **query_params) return [ api.shared.task_load_row(row) for row in rows ]
def run(self): self.logger = log.get_logger('CancelJob') self.tasks = Tasks() rows_affected = 0 if self.options.multiple: rows_affected = self.tasks.bulk_finish( extra_predicate=("job_id LIKE :job_id", { 'job_id': self.options.job_id + '%%' })) else: loader_storage = LoaderStorage() with loader_storage.transaction() as cursor: jobs = apsw_helpers.query(cursor, ''' SELECT id FROM jobs WHERE id LIKE :job_id ''', job_id=self.options.job_id + '%') if len(jobs) > 1: print len(jobs), 'jobs match this job ID:' print '\n'.join([row.id for row in jobs]) print 'Please use a more specific prefix or specify the `--multiple` flag if you' print 'would like to cancel more than one job.' sys.exit(1) elif len(jobs) == 0: print '0 jobs match this job ID.' sys.exit(1) else: rows_affected = self.tasks.bulk_finish( extra_predicate=("job_id = :job_id", { 'job_id': jobs[0].id })) job_suffix = '(s)' if self.options.multiple else '' task_suffix = 's' if not rows_affected == 1 else '' print CANCEL_JOB_MESSAGE % (job_suffix, self.options.job_id, rows_affected, task_suffix)
def _db_query(self, *args, **kwargs): return self.__db_caller( lambda c: apsw_helpers.query(c, *args, **kwargs))
def all(self): with self.storage.cursor() as cursor: result = apsw_helpers.query( cursor, 'SELECT id, spec FROM jobs ORDER BY created ASC') return [Job(json.loads(job.spec), job.id) for job in result]
def ready(self): """ Returns True if the tables have been setup, False otherwise """ with self.storage.cursor() as cursor: rows = apsw_helpers.query(cursor, 'SELECT name FROM sqlite_master WHERE type = "table"') tables = [row.name for row in rows] return all([table_name in tables for table_name in self._tables])
def _db_query(self, *args, **kwargs): return self.__db_caller(lambda c: apsw_helpers.query(c, *args, **kwargs))