def index_prices():

    job = q.enqueue_call(func=populate_tickers,
                         retry=Retry(3),
                         result_ttl=5000)

    job = q.enqueue_call(func=get_index_prices,
                         retry=Retry(3),
                         result_ttl=5000)
示例#2
0
def links():
    url = request.form.get("url")
    url_found = db.search(Query().url == url)
    if request.method == 'POST' and 'delete' not in request.form.keys():
        if url_found:  # Evitar duplicadas
            msg = "Url já existe"
            return return_with_url_list_msg(msg)

        elif not validators.url(url):  # Validar Url
            msg = "A Url não é valida"
            return return_with_url_list_msg(msg)

        else:
            user_input = {'url': url}
            db.insert(user_input)
            msg = 'Nova url cadastrada'
            result = q.enqueue(worker_func,
                               url,
                               retry=Retry(max=3, interval=[10, 30, 60]))
            return return_with_url_list_msg(msg)

    if request.method == 'POST' and 'delete' in request.form.keys():
        db.remove(Query().url == request.form['delete'])
        msg = "Url deletada"
        return return_with_url_list_msg(msg)

    return return_with_url_list_msg()
def refresh_price():
    """Refresh stock info"""

    tickers = si.get_all_tickers()

    for symbol in tickers:
        q.enqueue_call(func=si.get_price,
                       args=(symbol, ),
                       retry=Retry(3),
                       result_ttl=5000)
    # after all updates are added to the queue
    # add the bad price check
    q.enqueue_call(func=si.find_bad_updates,
                   args=None,
                   retry=Retry(3),
                   result_ttl=5000)
示例#4
0
文件: cli.py 项目: luisbc92/rq
def enqueue(cli_config, queue, timeout, result_ttl, ttl, failure_ttl,
            description, depends_on, job_id, at_front, retry_max,
            retry_interval, schedule_in, schedule_at, quiet, serializer,
            function, arguments, **options):
    """Enqueues a job from the command line"""
    args, kwargs = parse_function_args(arguments)
    function_string = get_call_string(function, args, kwargs)
    description = description or function_string

    retry = None
    if retry_max > 0:
        retry = Retry(retry_max, retry_interval)

    schedule = parse_schedule(schedule_in, schedule_at)

    with Connection(cli_config.connection):
        queue = cli_config.queue_class(queue, serializer=serializer)

        if schedule is None:
            job = queue.enqueue_call(function, args, kwargs, timeout,
                                     result_ttl, ttl, failure_ttl, description,
                                     depends_on, job_id, at_front, None, retry)
        else:
            job = queue.create_job(function, args, kwargs, timeout, result_ttl,
                                   ttl, failure_ttl, description, depends_on,
                                   job_id, None, JobStatus.SCHEDULED, retry)
            queue.schedule_job(job, schedule)

    if not quiet:
        click.echo('Enqueued %s with job-id \'%s\'.' %
                   (blue(function_string), job.id))
示例#5
0
def upload_image():
    q_len = len(q)
    if request.method == 'POST':
        if request.files:
            # Push image's bytes string into queue
            job = q.enqueue(
                process_img, request.files['image'].read(), retry=Retry(max=5))  # Retry 5 times until removed from queue
    return jsonify({'message': 'Image is uploaded successfully!'})
示例#6
0
    def test_enqueue_with_retry(self):
        """Enqueueing with retry_strategy works"""
        queue = Queue('example', connection=self.testconn)
        job = queue.enqueue(say_hello, retry=Retry(max=3, interval=5))

        job = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job.retries_left, 3)
        self.assertEqual(job.retry_intervals, [5])
示例#7
0
def save_video(url, languages):
    client = coreapi.Client()
    normalized_url = query_string_remove(url)
    schema = client.get(normalized_url)

    soup = BeautifulSoup(schema, "html.parser")
    video_meta_unprocessed = soup.find("div",
                                       attrs={
                                           "itemscope":
                                           True,
                                           "itemtype":
                                           "https://schema.org/VideoObject"
                                       })
    video_meta = BeautifulSoup(str(video_meta_unprocessed), "html.parser")

    duration = video_meta.find("meta", attrs={"itemprop":
                                              "duration"})["content"]
    license_url = video_meta.find("link", attrs={"itemprop":
                                                 "license"})["href"]
    title = video_meta.find("meta", attrs={"itemprop": "name"})["content"]
    description = video_meta.find("meta", attrs={"itemprop":
                                                 "description"})["content"]

    script_unprocessed = str(soup.find("script", attrs={"data-spec": "q"}))
    openIndex = script_unprocessed.index('{')
    closeIndex = script_unprocessed.rindex('}')

    jsonSubstring = script_unprocessed[openIndex:closeIndex + 1]
    talk_meta = json.loads(jsonSubstring)["__INITIAL_DATA__"]

    video_id = talk_meta["current_talk"]

    url = talk_meta["url"]
    viewed_count = talk_meta["viewed_count"]
    event = talk_meta["event"]
    speakers = []
    for speaker in talk_meta["speakers"]:
        name = construct_name(speaker)
        speakers.append(name)

    video = Video(video_id=video_id,
                  duration=duration,
                  url=url,
                  license_url=license_url,
                  title=title,
                  description=description,
                  speakers=speakers,
                  event=event,
                  viewed_count=viewed_count)
    video.save()

    video_serializer = VideoSerializer(video)

    django_rq.enqueue(func=save_subtitles,
                      args=[video_id, languages],
                      retry=Retry(max=3, interval=[10, 30, 60]))

    print(video_serializer.data)
示例#8
0
def queue_collection_tickers(tickers, collection):

    for tick in tickers:
        q.enqueue_call(
            func=write_ticker_to_collections,
            args=(tick, collection),
            retry=Retry(3),
            result_ttl=5000
        )
示例#9
0
    def send_message(self, message):
        """
        Method for sending messages for specific messengers using queues

        :param message: dict object
        format is {"body": "Hello",
                   "send_at": "2020-10-04 08:15:00",
                   "recipients": [
                       {"uuid": 1, "service": "whatsapp"},
                       {"uuid": None, "service": "telegram"}
                   ]}
        """
        started_jobs = []
        scheduled = []
        for messenger in message["recipients"]:
            service = self.messengers.get(messenger.get("service"))
            if not service:
                raise ValueError("Unknown messenger!!!")

            queued = datetime.strftime(datetime.now(), "%Y-%m-%d %H:%M:%S")

            description = (
                f"Message to user: {messenger.get('uuid')} with body: {message.get('body')} "
                f"Queued at {queued} to messenger: {messenger.get('service')}")

            if message.get("send_at"):
                scheduled_send_job = self.scheduled_queue.enqueue_at(
                    message.get("send_at"),
                    service.send_message,
                    message,
                    retry=Retry(max=RETRY_COUNT, interval=RETRY_INTERVAL),
                    description=description)
                scheduled.append(scheduled_send_job.description)
            else:
                send_job = self.main_queue.enqueue(
                    service.send_message,
                    message,
                    retry=Retry(max=RETRY_COUNT, interval=RETRY_INTERVAL),
                    description=description)

                started_jobs.append(send_job.description)

        result = {"started": started_jobs, "scheduled": scheduled}
        return result
示例#10
0
def generate_guidebook_paths(datastack):
    if not SHOW_PATH_TOOL:
        raise GuidebookException("Path tool is not enabled")

    root_id = request.args.get("root_id", None)
    if root_id is not None:
        root_id = int(root_id)
    root_loc = parse_location(request.args.get("root_location", None))
    collapse_soma = request.args.get("collapse_soma") == "True"

    split_loc = parse_location(request.args.get("split_location", None))
    downstream = request.args.get("downstream") == "True"
    root_id_from_point = request.args.get("root_id_from_point") == "True"
    spacing = request.args.get("spacing", 3000)

    num_paths_raw = request.args.get("num_paths", "all")
    num_path_dict = {"all": "all", "5": 5, "10": 10, "15": 15}
    num_paths = num_path_dict.get(num_paths_raw, "all")

    exclude_short = request.args.get("exclude_short", "True") == "True"
    if exclude_short:
        segment_length_thresh = SHORT_SEGMENT_THRESH
    else:
        segment_length_thresh = 0

    root_point_resolution = current_app.config.get(
        "GUIDEBOOK_EXPECTED_RESOLUTION", [4, 4, 40])
    print(f"Resolution: {root_point_resolution}")

    kwargs = {
        "datastack": datastack,
        "server_address": current_app.config.get("GLOBAL_SERVER_ADDRESS"),
        "return_as": "url",
        "root_id": root_id,
        "root_point": root_loc,
        "root_point_resolution": root_point_resolution,
        "n_choice": num_paths,
        "segment_length_thresh": segment_length_thresh,
        "spacing": int(spacing),
        "collapse_soma": collapse_soma,
        "n_parallel": int(current_app.config.get("N_PARALLEL")),
        "invalidation_d": int(current_app.config.get("INVALIDATION_D")),
        "selection_point": split_loc,
        "downstream": downstream,
        "root_id_from_point": root_id_from_point,
        "auth_token_key": current_app.config.get("AUTH_TOKEN_KEY"),
    }
    print(kwargs)
    job = q.enqueue_call(
        generate_lvl2_paths,
        kwargs=kwargs,
        result_ttl=5000,
        timeout=600,
        retry=Retry(max=2, interval=10),
    )
    return redirect(url_for(".show_result_paths", job_key=job.get_id()))
示例#11
0
def refresh_info():
    """Refresh stock info"""

    tickers = si.get_all_tickers()

    for symbol in tickers:
        q.enqueue_call(func=si.get_info,
                       args=(symbol, ),
                       retry=Retry(3),
                       result_ttl=5000)
示例#12
0
def handle_remesh(table_id):
    current_app.request_type = "remesh_enque"
    current_app.table_id = table_id
    is_priority = request.args.get('priority', True, type=str2bool)
    is_redisjob = request.args.get('use_redis', False, type=str2bool)
    user_id = str(g.auth_user["id"])
    current_app.user_id = user_id
    new_lvl2_ids = json.loads(request.data)["new_lvl2_ids"]

    if is_redisjob:
        with Connection(redis.from_url(current_app.config["REDIS_URL"])):

            if is_priority:
                retry = Retry(max=3, interval=[1, 10, 60])
                queue_name = "mesh-chunks"
            else:
                retry = Retry(max=3, interval=[60, 60, 60])
                queue_name = "mesh-chunks-low-priority"
            q = Queue(queue_name, retry=retry, default_timeout=1200)
            task = q.enqueue(meshing_tasks.remeshing, table_id, new_lvl2_ids)

        response_object = {
            "status": "success",
            "data": {
                "task_id": task.get_id()
            }
        }

        return jsonify(response_object), 202
    else:
        new_lvl2_ids = np.array(new_lvl2_ids, dtype=np.uint64)
        cg = app_utils.get_cg(table_id)

        if len(new_lvl2_ids) > 0:
            t = threading.Thread(target=_remeshing,
                                 args=(cg.get_serialized_info(), new_lvl2_ids))
            t.start()

        return Response(status=202)
示例#13
0
def add_project_handler(request):
    try:
        validate(instance=request.json, schema=ProjectJsonSchema)
        data = request.json.get('project')

        # Get project name
        project_name = data.get('nama')
        # Get workers
        pekerja_names = data.get('pekerja')
        workers_obj = [types.ProjectWorker(name) for name in pekerja_names]
        # get dates
        start_date = datetime.strptime(
            data.get('start_date'), "%Y-%m-%dT%H:%M:%S.%fZ")
        end_date = datetime.strptime(
            data.get('end_date'), "%Y-%m-%dT%H:%M:%S.%fZ")

        # Get project deliverables
        deliverables_data = data.get('deliverables')
        deliverables = []
        deliverables_data_to_insert = []
        for deliverable in deliverables_data:
            section = deliverable.get('section')
            item = deliverable.get('item')
            subitem = deliverable.get('subitem')
            price = deliverable.get('price')
            quantity = deliverable.get('quantity')
            info = deliverable.get('info')
            unit = deliverable.get('unit')
            workers = deliverable.get('workers', [])
            schedules = deliverable.get('schedules', [])

            new_deliverable = types.ProjectDeliverable(
                project_name, section, item, subitem, info, quantity, price, unit, workers, schedules)
            deliverables.append(new_deliverable)

        # enqueue object
        project = fiano.Project(project_name, deliverables=deliverables,
                                workers=workers_obj, start_date=start_date, end_date=end_date)

        job = worker_queue.enqueue(
            fiano.insert_project, project, retry=Retry(3))
        
        db_job = worker_queue.enqueue(
            repository.insert_to_db, project, deliverables, workers)

        return "Success enqueuing job for project {} ".format(project_name)

    except ValidationError as e:
        print(e)
        return e.message, 400
示例#14
0
def get_signal():
    if request.method == 'POST':
        try:
            if not os.path.exists('./app/images'):
                os.makedirs('./app/images')
        except OSError:
            app.logger.error('Error when creating images directory')

        response = request.get_json()
        record_name = response['record_name']
        record_path = f'./app/images/{record_name}'

        job = q.enqueue(
            download_and_process_img, record_name, record_path, retry=Retry(max=5))  # Retry 5 times until removed from queue

    return jsonify({'message': 'Image is crawled successfully!'})
def push_notification(notification):
    if notification.delivery_status == Notification.IN_PROGRESS:
        subscription = Subscription.query.get(notification.subscription_id)
        if subscription.active:
            job = q.enqueue_call(send_notification,
                                 args=(notification, subscription),
                                 result_ttl=500,
                                 retry=Retry(max=7,
                                             interval=[1, 2, 4, 8, 16, 32,
                                                       64]),
                                 timeout='30s',
                                 failure_ttl=300)
        else:
            notification.delivery_status = Notification.CANCELLED
            db.session.add(notification)
            db.session.commit()
示例#16
0
def generate_guidebook_chunkgraph(datastack):
    root_id = request.args.get("root_id", None)
    if root_id is not None:
        root_id = int(root_id)
    root_loc = parse_location(request.args.get("root_location", None))
    branch_points = request.args.get("branch_points", "True") == "True"
    end_points = request.args.get("end_points", "True") == "True"
    collapse_soma = request.args.get("collapse_soma") == "True"
    segmentation_fallback = request.args.get("segmentation_fallback",
                                             False) == "True"
    split_loc = parse_location(request.args.get("split_location", None))
    downstream = request.args.get("downstream") == "True"
    root_id_from_point = request.args.get("root_id_from_point") == "True"

    root_point_resolution = current_app.config.get(
        "GUIDEBOOK_EXPECTED_RESOLUTION", [4, 4, 40])
    print(f"Resolution: {root_point_resolution}")

    kwargs = {
        "datastack": datastack,
        "server_address": current_app.config.get("GLOBAL_SERVER_ADDRESS"),
        "return_as": "url",
        "root_id": root_id,
        "root_point": root_loc,
        "refine_branch_points": branch_points,
        "refine_end_points": end_points,
        "collapse_soma": collapse_soma,
        "n_parallel": int(current_app.config.get("N_PARALLEL")),
        "root_point_resolution": root_point_resolution,
        "segmentation_fallback": segmentation_fallback,
        "invalidation_d": int(current_app.config.get("INVALIDATION_D")),
        "selection_point": split_loc,
        "downstream": downstream,
        "root_id_from_point": root_id_from_point,
        "auth_token_key": current_app.config.get("AUTH_TOKEN_KEY"),
    }
    print(kwargs)
    job = q.enqueue_call(
        generate_lvl2_proofreading,
        kwargs=kwargs,
        result_ttl=5000,
        timeout=600,
        retry=Retry(max=2, interval=10),
    )
    return redirect(url_for(".show_result_points", job_key=job.get_id()))
示例#17
0
def create_job(
        function: callable,
        date_time: datetime,
        utc_hours: int = 0,
        queue_name: str = "email",
        **kwargs,
) -> str:
    """
    Add a new Job to Queue.

    Params:
    ------
    function: callable - The job function
    date_time: datetime - The specific time when the job must be executed
    utc_hours: int - Eg: -5 or +2 The specific GTM.
    queue_name: str - The name of the task queue.

    Return:
    ------
    job_id: str - The specifc job id
    """

    with Connection(redis.from_url(settings.REDIS_URL)):
        redis_queue = Queue(queue_name)

        # Fix the correct time to execute.
        utc_to_place_time = datetime.utcnow() + timedelta(hours=utc_hours)
        seconds = date_time - utc_to_place_time
        minutes = seconds.seconds / 60

        # Enqueue the job.
        job = redis_queue.enqueue_in(
            time_delta=timedelta(minutes=minutes),
            func=function,
            kwargs=kwargs,
            retry=Retry(max=3, interval=[10, 30, 60])
        )

        return job.get_id()
示例#18
0
import logging
from datetime import timedelta

import django_rq
from rq import Retry

from .base import BaseBackend
from .utils import _send_notification
from .. import default_settings as settings

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('django_notifs.backends.rq')

retry = None
if settings.NOTIFICATIONS_RETRY:
    retry = Retry(max=settings.NOTIFICATIONS_MAX_RETRIES,
                  interval=settings.NOTIFICATIONS_RETRY_INTERVAL)


class RQBackend(BaseBackend):
    def run(self, countdown):
        for channel_alias in self.notification['channels']:
            queue = django_rq.get_queue(settings.NOTIFICATIONS_QUEUE_NAME)
            queue.enqueue_in(timedelta(seconds=countdown),
                             _send_notification,
                             self.notification,
                             channel_alias,
                             logger,
                             retry=retry)
示例#19
0
 def _with_retries(self) -> Optional[Retry]:
     ts = self.retry_intervals()
     if ts:
         return Retry(max=len(ts), interval=ts)
     return None