def push(self, *objects, args=(), kwargs={}, **options): """Push objects onto the async stack. Arguments: objects {*args of objects} - This can be several objects as parameters into this method. options {**kwargs of options} - Additional options for async driver """ from config.database import DB as schema callback = options.get("callback", "handle") wait = options.get("wait", None) connection = options.get("connection", None) if connection: schema = schema.connection(connection) if wait: wait = parse_human_time(wait).to_datetime_string() for job in objects: if schema.get_schema_builder().has_table("queue_jobs"): payload = pickle.dumps( {"obj": job, "args": args, "kwargs": kwargs, "callback": callback} ) schema.table("queue_jobs").insert( { "name": str(job), "serialized": payload, "created_at": pendulum.now().to_datetime_string(), "attempts": 0, "ran_at": None, "wait_until": wait, } )
def run_failed_jobs(self): from config.database import DB as schema try: self.success('Attempting to send failed jobs back to the queue ...') for job in schema.table('failed_jobs').get(): payload = pickle.loads(job.payload) schema.table('failed_jobs').where('payload', job.payload).delete() self.push(payload['obj'], args=payload['args'], callback=payload['callback']) except Exception: self.danger('Could not get the failed_jobs table')
def run_failed_jobs(self): from config.database import DB as schema try: self.success("Attempting to send failed jobs back to the queue ...") for job in schema.table("failed_jobs").get(): payload = pickle.loads(job.payload) schema.table("failed_jobs").where("payload", job.payload).delete() self.push( payload["obj"], args=payload["args"], callback=payload["callback"] ) except Exception: self.danger("Could not get the failed_jobs table")
def add_to_failed_queue_table(self, payload, driver='amqp'): from config.database import DB as schema from config import queue if 'amqp' in queue.DRIVERS: listening_channel = queue.DRIVERS['amqp']['channel'] else: listening_channel = 'default' if schema.get_schema_builder().has_table('failed_jobs'): schema.table('failed_jobs').insert({ 'driver': driver, 'channel': listening_channel, 'payload': pickle.dumps(payload), 'failed_at': pendulum.now() })
def assertDatabaseNotHas(self, schema, value): from config.database import DB table = schema.split('.')[0] column = schema.split('.')[1] self.assertFalse(DB.table(table).where(column, value).first())
def foreign_data(self, table_name): row = self.get_model_row_by_table_name(table_name) try: return DB.table(table_name).select( 'id', row['foreign_display'] + ' as data').get().serialize() except: return []
def add_to_failed_queue_table(self, payload, driver="amqp"): from config.database import DB as schema from config import queue if "amqp" in queue.DRIVERS: listening_channel = queue.DRIVERS["amqp"]["channel"] else: listening_channel = "default" if schema.get_schema_builder().has_table("failed_jobs"): schema.table("failed_jobs").insert( { "driver": driver, "channel": listening_channel, "payload": pickle.dumps(payload), "failed_at": pendulum.now(), } )
def push(self, *objects, args=(), kwargs={}, **options): """Push objects onto the async stack. Arguments: objects {*args of objects} - This can be several objects as parameters into this method. options {**kwargs of options} - Additional options for async driver """ from config.database import DB as schema from config import queue callback = options.get('callback', 'handle') wait = options.get('wait', None) connection = options.get('connection', None) if connection: schema = schema.connection(connection) if wait: wait = parse_human_time(wait).to_datetime_string() for job in objects: if schema.get_schema_builder().has_table('queue_jobs'): payload = pickle.dumps({ 'obj': job, 'args': args, 'callback': callback }) schema.table('queue_jobs').insert({ 'name': str(job), 'serialized': payload, 'created_at': pendulum.now().to_datetime_string(), 'attempts': 0, 'ran_at': None, 'wait_until': wait, })
def consume(self, channel, **options): # skipcq from config.database import DB as schema, DATABASES from wsgi import container if not channel or channel == "default": channel = DATABASES["default"] self.info( '[*] Waiting to process jobs from the "queue_jobs" table on the "{}" connection. To exit press CTRL + C' .format(channel)) schema = schema.connection(channel) while True: builder = schema.table("queue_jobs") jobs = builder.where_null("ran_at").where( schema.table("queue_jobs").where_null('wait_until').or_where( 'wait_until', '<=', pendulum.now().to_datetime_string())).limit(1).get() if not jobs.count(): time.sleep(5) for job in jobs: builder.where("id", job["id"]).update({ "ran_at": pendulum.now().to_datetime_string(), }) unserialized = pickle.loads(job.serialized) obj = unserialized["obj"] args = unserialized["args"] callback = unserialized["callback"] ran = job.attempts try: try: if inspect.isclass(obj): obj = container.resolve(obj) getattr(obj, callback)(*args) except AttributeError: obj(*args) try: # attempts = 1 builder.where("id", job["id"]).update({ "ran_at": pendulum.now().to_datetime_string(), "attempts": job["attempts"] + 1, }) self.success("[\u2713] Job Successfully Processed") except UnicodeEncodeError: self.success("[Y] Job Successfully Processed") except Exception as e: # skipcq self.danger("Job Failed: {}".format(str(e))) if not obj.run_again_on_fail: # ch.basic_ack(delivery_tag=method.delivery_tag) builder.where("id", job["id"]).update({ "ran_at": pendulum.now().to_datetime_string(), "failed": 1, "attempts": job["attempts"] + 1, }) if ran < obj.run_times and isinstance(obj, Queueable): time.sleep(1) builder.where("id", job["id"]).update( {"attempts": job["attempts"] + 1}) continue else: builder.where("id", job["id"]).update({ "attempts": job["attempts"] + 1, "ran_at": pendulum.now().to_datetime_string(), "failed": 1, }) if hasattr(obj, "failed"): getattr(obj, "failed")(unserialized, str(e)) self.add_to_failed_queue_table(unserialized, driver="database") time.sleep(5)
def consume(self, channel, fair=False, **options): from config.database import DB as schema, DATABASES from config import queue from wsgi import container if not channel or channel == 'default': channel = DATABASES['default'] self.info( '[*] Waiting to process jobs from the "queue_jobs" table on the "{}" connection. To exit press CTRL + C' .format(channel)) schema = schema.connection(channel) while True: jobs = schema.table('queue_jobs').where('ran_at', None).get() if not jobs.count(): time.sleep(5) for job in jobs: unserialized = pickle.loads(job.serialized) obj = unserialized['obj'] args = unserialized['args'] callback = unserialized['callback'] ran = job.attempts wait_time = job['wait_until'] if isinstance(wait_time, str): wait_time = pendulum.parse(job['wait_until']) else: wait_time = pendulum.instance(job['wait_until']) # print(job['wait_until'], wait_time.is_future()) if job['wait_until'] and wait_time.is_future(): continue try: try: if inspect.isclass(obj): obj = container.resolve(obj) getattr(obj, callback)(*args) except AttributeError: obj(*args) try: # attempts = 1 schema.table('queue_jobs').where( 'id', job['id']).update({ 'ran_at': pendulum.now().to_datetime_string(), 'attempts': job['attempts'] + 1, }) self.success('[\u2713] Job Successfully Processed') except UnicodeEncodeError: self.success('[Y] Job Successfully Processed') except Exception as e: self.danger('Job Failed: {}'.format(str(e))) if not obj.run_again_on_fail: # ch.basic_ack(delivery_tag=method.delivery_tag) schema.table('queue_jobs').where( 'id', job['id']).update({ 'ran_at': pendulum.now().to_datetime_string(), 'failed': 1, 'attempts': job['attempts'] + 1, }) if ran < obj.run_times and isinstance(obj, Queueable): time.sleep(1) schema.table('queue_jobs').where( 'id', job['id']).update({ 'attempts': job['attempts'] + 1, }) continue else: schema.table('queue_jobs').where( 'id', job['id']).update({ 'attempts': job['attempts'] + 1, 'ran_at': pendulum.now().to_datetime_string(), 'failed': 1, }) if hasattr(obj, 'failed'): getattr(obj, 'failed')(unserialized, str(e)) self.add_to_failed_queue_table(unserialized, driver='database') time.sleep(5)