def main(): concurrency = multiprocessing.cpu_count() w = Worker(faktory=URL_FACTORY, queues=['default', 'busy'], concurrency=1) w.register('live_see_all', live_see_all) w.register('parse_profile', parse_profile) w.register('parse_posts', parse_posts) w.run()
def main(faktory_url='tcp://localhost:7419', queues=['default'], concurrency=1): logging.info('Faktory Instance: {}'.format(faktory_url)) logging.info('Concurrency: {}'.format(concurrency)) logging.info('Queues: {}'.format(queues)) w = Worker( faktory=faktory_url, queues=queues, concurrency=concurrency ) w.register('docker', docker_runner) w.run()
import logging logging.basicConfig(level=logging.INFO) from faktory import Worker def your_function(x, y): return x + y w = Worker(faktory="tcp://localhost:7419", queues=["default"], concurrency=1) w.register("test", your_function) w.run() # runs until control-c or worker shutdown from Faktory web UI # check examples/producer.py for how to submit tasks to be run by faktory
from faktory import Worker from jobs.master_environments import environments_sync from jobs.master_facts import facts_sync from jobs.master_nodes import nodes_sync from jobs.master_classes import classes_sync from jobs.matching_nodes import matching_nodes_sync w = Worker(queues=["default"], concurrency=1) w.register("master-environments", environments_sync) w.register("master-facts", facts_sync) w.register("master-nodes", nodes_sync) w.register("master-classes", classes_sync) w.register("group-nodes", matching_nodes_sync) w.run()
import logging import sys from faktory import Worker from utils import process_job from constants import QUEUE_NAME, QUEUE_JOB_TYPE, WORKER_CONCURRENCY logging.getLogger().setLevel(logging.INFO) logger = logging.getLogger() stream = logging.StreamHandler(sys.stdout) stream.setLevel(logging.INFO) logger.addHandler(stream) if __name__ == "__main__": w = Worker(queues=[QUEUE_NAME], concurrency=WORKER_CONCURRENCY) w.register(QUEUE_JOB_TYPE, process_job) logger.info('[Switch_Consumer] Sucessfully registered consumer on task queue, waiting for new jobs ....') w.run()
username="******", content="The update for {} ({}) by {} succeded!".format( title, wsid, author)).execute() def workshop_queued(wsid, title): DiscordWebhook(url=env.str("DISCORD_WEBHOOK"), username="******", content="[Bulk Update] Queued {} ({}).".format( title, wsid)).execute() def workshop_update_bulk(args): with faktory.connection() as client: for data in workshop.search('[Photon]'): # client.queue("WorkshopUpdateQueued", args=(data["publishedfileid"], data["title"])) client.queue("UpdateWorkshop", args=({ "wsid": data["publishedfileid"], "force": "force" in args and args["force"] }, )) w = Worker(concurrency=5) w.register("UpdateWorkshop", workshop_update) w.register("WorkshopUpdateFailed", workshop_results_failed) w.register("WorkshopUpdateComplete", workshop_results_success) w.register("WorkshopUpdateQueued", workshop_queued) w.register("UpdateAllWorkshop", workshop_update_bulk) w.run()
success = client.queue( "start_checkin", at=five_minutes_before_checkin, args=( checkin_id, checkin_time_utc.timestamp(), reservation_number, first_name, last_name, ), ) params = {"id": checkin_id, "status": "SCHEDULED"} if not success: params["status"] = "FAILED" gql_client.execute(query, variable_values=params) except errors.FailedCheckin: logger.error("Could not look up requested reservation") params = {"id": checkin_id, "status": "FAILED"} gql_client.execute(query, variable_values=params) return w = Worker(faktory=faktory_url, queues=["default"], concurrency=16) w.register("start_checkin", start_checkin) w.register("schedule_checkin", schedule_checkin) w.run() # runs until control-c or worker shutdown from Faktory web UI