def get(self):

        # If you have the ndb.toplevel middleware you can just fire-and-forget:
        tasks.addTask('default', doStuff, "foo")

        # Otherwise it is recommended to call get_result before exiting the request handler:
        tasks.addTask('default', doStuff, "bar").get_result()

        self.response.write("Task enqueued")
Example #2
0
  def map_fn(self, items):
    tasks.addTask(['worker', 'worker2'],
                  self.mimail_client2.resend,
                  retries=items,
                  _countdown=self.countdown_sec)

    self.countdown_sec += 1
    self.retry_count += len(items)

    for item in items:
      yield item.key.delete_async()
Example #3
0
def convertFile(filename, format=""):
    if format == "":
        basename, ext = os.path.splitext(filename)
        if ext.lower() in [".wav", ".wma"]:
            format = "mp3"
        else:
            format = "psp"

    command = findFFMpeg()
    if command != "":
        args = ["-y", "-i", filename, basename + "." + format]
        tasks.addTask(command, args)
Example #4
0
  def map_fn(self, entities):
    mimail_client2 = MiMailClient2(self.sendgrid['USERNAME'], self.sendgrid['PASSWORD'])
    for entity in entities:
      self.count += 1
      countdown_sec = self.count // settings.QUEUE_CHUNKS_SIZE

      if not self.is_dry_run:
        r = tasks.addTask(['worker', 'worker2'],
                          mimail_client2.run,
                          schedule=self.schedule_job,
                          content=self.content,
                          recipient_queues=entity,
                          sharding_count_name=self.sharding_count_name,
                          _countdown=countdown_sec).get_result()

        if r:
          self.success_worker += 1
          n = ndb.Future('yo dawg')
          n.set_result('yo')
          yield n

        else:
          self.fail_worker += 1
          entity.status = 'fail_worker'
          yield entity.put_async()

      else:
        r2 = true_false_pick(self.dry_run_fail_rate)
        if r2:
          r = tasks.addTask(['worker', 'worker2'],
                            mimail_client2.run,
                            schedule=self.schedule_job,
                            content=self.content,
                            recipient_queues=entity,
                            sharding_count_name=self.sharding_count_name,
                            _countdown=countdown_sec).get_result()

          if r:
            self.success_worker += 1
            n = ndb.Future('yo dawg')
            n.set_result('yo')
            yield n

          else:
            self.fail_worker += 1
            entity.status = 'fail_worker'
            yield entity.put_async()

        else:
          self.fail_worker += 1
          entity.status = 'fail_worker'
          yield entity.put_async()
Example #5
0
def createTask():

    if request.method == "GET":
        return render_template("newTask.html")

    if request.method == "POST":
        question = request.form["question"]
        answer = request.form["btn"]
        correctAnswer = request.form["correctAnswer"]
        course = request.form["course"]

        if not question or not answer or not correctAnswer:

            return render_template("newTask.html")

        courseId = courses.getId(course)

        tasks.addTask(courseId, question, answer, correctAnswer)

        return redirect("/courseInfo")
Example #6
0
  def get(self):
    now = Delorean().truncate('minute')
    mtime = self.request.get('mtime')

    logging.info('match schedule_timestamp query = %f' % now.epoch())

    if mtime:
      logging.debug('manual fire schedule %s ' % mtime)
      jobs = Schedule.query(Schedule.schedule_timestamp == float(mtime), Schedule.error == None, Schedule.status == '').fetch()

    elif settings.DEBUG:
      jobs = Schedule.query(Schedule.schedule_timestamp == 1432634400.0, Schedule.error == None, Schedule.status == '').fetch()

    else:
      jobs = Schedule.query(Schedule.schedule_timestamp == now.epoch(), Schedule.error == None, Schedule.status == '').fetch()

    for job in jobs:
      logging.info('job schedule found!!, categroy:%s, hour_capacity= %d' % (job.category, job.hour_capacity))

      mailer2_async = Mailer2Async(job.key, ['mailer'])
      tasks.addTask(['mailer'], mailer2_async.run)
Example #7
0
    def post(self):
        schedule_urlsafe = self.request.get('urlsafe')
        dump_type = self.request.get('dump_type')
        schedule = ndb.Key(urlsafe=schedule_urlsafe).get()

        if schedule:
            tasks_queue_name = ['recipient-queue-data-mapper']

            if dump_type == 'unsend':
                object_name = '{}-{}-unsend-dump.csv'.format(
                    schedule.subject.encode('utf8'),
                    schedule.category.encode('utf8'))
                dumper = FailWorkerDumper(schedule.key, tasks_queue_name,
                                          object_name)
                tasks.addTask(tasks_queue_name, dumper.run)

            elif dump_type == 'send':
                object_name = '{}-{}-logemail-dump.csv'.format(
                    schedule.subject.encode('utf8'),
                    schedule.category.encode('utf8'))
                dumper = LogEmailDumper(schedule.key, tasks_queue_name,
                                        object_name)
                tasks.addTask(tasks_queue_name, dumper.run)
Example #8
0
  def post(self):
    """
    delete schedule job. change schedule stat first and start
    to delete the following entities.

    schedule job:
      -> RecipientQueueData
      -> LogEmail
      -> FailLogEmail
      -> Retry

    cron job to check if other entities has been delete
    then delete schedule itself.
    """

    schedule_urlsafe = self.request.get('urlsafe')
    logging.info('schedule_urlsafe: %s' % schedule_urlsafe)
    schedule = ndb.Key(urlsafe=schedule_urlsafe).get()

    if schedule:
      schedule.status = 'deleting'
      schedule.put()

      # delete sharding count
      if schedule.sharding_count_name:
        config = ndb.Key(GeneralCounterShardConfig, schedule.sharding_count_name).get()
        if config:
          shard_key_strings = [SHARD_KEY_TEMPLATE.format(schedule.sharding_count_name, index)
                               for index in range(config.num_shards)]

          ndb.delete_multi([ndb.Key(GeneralCounterShard, shard_key_string) for shard_key_string in shard_key_strings])
          config.key.delete()

        mem_sharding_count = memcache.get(schedule.sharding_count_name)
        if mem_sharding_count:
          memcache.delete(schedule.sharding_count_name)

      mapper_RecipientQueueData = RecipientQueueDataDeleteMapper(schedule.key, ['schedule-delete-mapper'])
      mapper_logEmail = LogEmailDeleteMapper(schedule.key, ['schedule-delete-mapper'])
      mapper_FailLogEmail = LogFailEmailDeleteMapper(schedule.key, ['schedule-delete-mapper'])
      mapper_Retry = RetryDeleteMapper(schedule.key, ['schedule-delete-mapper'])

      tasks.addTask(['schedule-delete-mapper'], mapper_RecipientQueueData.run)
      tasks.addTask(['schedule-delete-mapper'], mapper_logEmail.run)
      tasks.addTask(['schedule-delete-mapper'], mapper_FailLogEmail.run)
      tasks.addTask(['schedule-delete-mapper'], mapper_Retry.run)
Example #9
0
  def post(self):
    parameters = pickle.loads(self.request.get('parameters'))

    parser = Parser()
    r = tasks.addTask(['parsecsv'], parser.run, parameters, settings.MAX_TASKSQUEUE_EXECUTED_TIME).get_result()

    if not r:
      new_schedule = Schedule()
      new_schedule.sendgrid_account = parameters.get('sendgrid_account')
      new_schedule.subject = parameters.get('subject')
      new_schedule.sender_name = parameters.get('sender_name')
      new_schedule.sender_email = parameters.get('sender_email')

      new_schedule.category = parameters.get('category')
      new_schedule.reply_to = parameters.get('reply_to')
      new_schedule.type = parameters.get('type')

      new_schedule.txt_object_name = parameters.get('txt_object_name')
      new_schedule.edm_object_name = parameters.get('edm_object_name')
      new_schedule.bucket_name = parameters.get('bucket_name')
      new_schedule.replace_edm_csv_property = parameters.get('replace_edm_csv_property')

      new_schedule.schedule_duration = int(parameters.get('schedule_duration'))
      new_schedule.ip_counts = int(parameters.get('ip_counts'))

      new_schedule.recipient_skip = int(parameters.get('recipient_skip'))
      new_schedule.hour_rate = int(parameters.get('hour_rate'))
      new_schedule.start_time = parameters.get('start_time')
      new_schedule.daily_capacity = int(parameters.get('daily_capacity'))

      new_schedule.error = 'add schedule job taskqueue fail. retry later.'

      new_schedule.is_dry_run = parameters.get('is_dry_run')
      new_schedule.dry_run_fail_rate = parameters.get('dry_run_fail_rate')

      new_schedule.put()
Example #10
0
 def get(self):
     schedule_mapper = ClearRecipientQueueDataMapper(
         ['schedule-delete-mapper'])
     tasks.addTask(['schedule-delete-mapper'], schedule_mapper.run)
Example #11
0
 def enqueue(self, next_cursor):
   # new_mapper = Mailer2Async(self.schedule_key, self.tasks_queue)
   # tasks.addTask(self.tasks_queue, new_mapper._continue, c)
   tasks.addTask(self.tasks_queue, self._continue, next_cursor)
Example #12
0
 def enqueue(self, next_cursor):
   # new_mapper = RecipientQueueDataDeleteMapper(self.schedule_key, self.tasks_queue)
   # tasks.addTask(self.tasks_queue, new_mapper._continue)
   tasks.addTask(self.tasks_queue, self._continue, next_cursor)
Example #13
0
 def enqueue(self, next_cursor):
   # new_mapper = LogFailEmailDeleteMapper(self.schedule_key, self.tasks_queue)
   # tasks.addTask(self.tasks_queue, new_mapper._continue)
   tasks.addTask(self.tasks_queue, self._continue, next_cursor)
Example #14
0
 def enqueue(self):
     new_mapper = ClearReTryMapper(self.tasks_queue)
     tasks.addTask(self.tasks_queue, new_mapper._continue)
Example #15
0
 def enqueue(self):
     new_mapper = ClearRecipientQueueDataMapper(self.tasks_queue)
     tasks.addTask(self.tasks_queue, new_mapper._continue)
Example #16
0
 def get(self):
     clear_retry_mapper = ClearReTryMapper(['schedule-delete-mapper'])
     tasks.addTask(['schedule-delete-mapper'], clear_retry_mapper.run)
Example #17
0
 def enqueue(self, next_cursor):
     self.writer = None
     tasks.addTask(self.tasks_queue, self._continue, next_cursor)
Example #18
0
 def enqueue(self, next_cursor):
   tasks.addTask(self.tasks_queue, self._continue, next_cursor)
Example #19
0
 def get(self):
   if ReTry.query().get() is not None:
     tasks_queue = ['retry-resend']
     retry_send_async_mapper = RetrySendAsyncMapper(tasks_queue)
     tasks.addTask(tasks_queue, retry_send_async_mapper.run)