Esempio n. 1
0
    def test_page_obj_change_data_from_template_tags(self):
        from django import template

        superuser = self.get_superuser()
        with self.login_user_context(superuser):
            page_data = self.get_new_page_data()
            change_user = str(superuser)
            #some databases don't store microseconds, so move the start flag back by 1 second
            before_change = tz_now()+datetime.timedelta(seconds=-1)
            self.client.post(URL_CMS_PAGE_ADD, page_data)
            page = Page.objects.get(title_set__slug=page_data['slug'], publisher_is_draft=True)
            self.client.post('/en/admin/cms/page/%s/' % page.id, page_data)
            t = template.Template("{% load cms_tags %}{% page_attribute changed_by %} changed on {% page_attribute changed_date as page_change %}{{ page_change|date:'Y-m-d\TH:i:s' }}")
            req = HttpRequest()
            page.save()
            page.publish('en')
            after_change = tz_now()
            req.current_page = page
            req.REQUEST = {}

            actual_result = t.render(template.Context({"request": req}))
            desired_result = "{0} changed on {1}".format(change_user, actual_result[-19:])
            save_time = make_aware(datetime.datetime.strptime(actual_result[-19:], "%Y-%m-%dT%H:%M:%S"), get_current_timezone())

            self.assertEqual(actual_result, desired_result)
            # direct time comparisons are flaky, so we just check if the page's changed_date is within the time range taken by this test
            self.assertTrue(before_change <= save_time)
            self.assertTrue(save_time <= after_change)
Esempio n. 2
0
    def run(self, save=True):
        """
        Runs this ``Job``.  If ``save`` is ``True`` the dates (``last_run`` and ``next_run``)
        are updated.  If ``save`` is ``False`` the job simply gets run and nothing changes.

        A ``Log`` will be created if there is any output from either stdout or stderr.
        """
        success = False
        run_date = tz_now()
        self.is_running = True
        self.pid = os.getpid()
        self.host = socket.gethostname()
        self.next_run = self.rrule.after(run_date)
        self.last_run = run_date
        self.save()

        stdout_str, stderr_str = "", ""

        try:
            if self.shell_command:
                stdout_str, stderr_str, success = self.run_shell_command()
            else:
                stdout_str, stderr_str, success = self.run_management_command()
        finally:
            # since jobs can be long running, reload the object to pick up
            # any updates to the object since the job started
            self = self.__class__.objects.get(id=self.id)
            # If stderr was written the job is not successful
            self.last_run_successful = success
            self.is_running = False
            self.pid = None
            self.host = None
            self.adhoc_run = False
            self.save()

        end_date = tz_now()

        # Create a log entry no matter what to see the last time the Job ran:
        log = Log.objects.create(
            job=self,
            hostname=socket.gethostname(),
            run_date=run_date,
            end_date=end_date,
            stdout=stdout_str,
            stderr=stderr_str,
            success=success,
        )

        # If there was any output to stderr, e-mail it to any error (defualt) subscribers.
        # We'll assume that if there was any error output, even if there was also info ouput
        # That an error exists and needs to be dealt with
        if stderr_str:
            log.email_subscribers()

        # Otherwise - if there was only output to stdout, e-mail it to any info subscribers
        elif stdout_str:
            log.email_subscribers(is_info=True)
Esempio n. 3
0
 def save(self, *args, **kwargs):
     if not self.pk:
         self.creation_date = tz_now()
     else:
         """
         there are some strange creation_date entries in the dump.
         To ensure that we have a creation data always, we add this one
         """
         if not self.creation_date:
             self.creation_date = tz_now()
             
     super(CreationDateMixin, self).save(*args, **kwargs)
Esempio n. 4
0
 def test_06_event_is_finished(self):
     """
     Test if the event is finished and if the title is set correctly
     with the tag [FINISHED]
     """
     event = Event()
     event.date_start = tz_now() - datetime.timedelta(minutes=20)
     event.date_end = tz_now() - datetime.timedelta(minutes=10)
     event.title = 'My Title'
     event.summary = ''
     event.category = event.MAINTENANCE
     event.save()
     event = Event.objects.all()[0]
     title = self.rss.item_title(event)
     self.assertEqual(title, '[FINISHED] My Title')
Esempio n. 5
0
    def save(self, *args, **kwargs):
        daymodel = self.cleaned_data['daymodel']
        with_content = self.cleaned_data['with_content']
        daymodel_delta = daymodel.stop - daymodel.start
        # Bind datetime for each days using datebook period as the base date
        daydates = [self.daydate.replace(day=int(item)) for item in self.cleaned_data['days']]
        
        # Get time dst for daylight seasons
        self.current_tz = get_current_timezone()
        self.winter_offset = make_aware(datetime.datetime(daymodel.start.year, 1, 1), self.current_tz).utcoffset()
        self.summer_offset = make_aware(datetime.datetime(daymodel.start.year, 7, 1), self.current_tz).utcoffset()
        self.seasons_offset = (self.summer_offset-self.winter_offset)
        
        # Fill existing entries
        for entry in self.datebook.dayentry_set.filter(activity_date__in=daydates).order_by('activity_date'):
            # Get the start/stop datetimes
            goto_start, goto_stop = self.combine_day_and_daymodel_time(entry.start, daymodel.start, daymodel_delta)
            
            # Fill object attribute using the daymodel
            entry.start = goto_start
            entry.stop = goto_stop
            entry.pause = daymodel.pause
            entry.overtime = daymodel.overtime
            if with_content:
                entry.content = daymodel.content
            entry.vacation = False # Allways remove the vacation
            entry.save()
            # Remove the day number from remaining selected days
            i = self.cleaned_data['days'].index(str(entry.activity_date.day))
            self.cleaned_data['days'].pop(i)
        
        # Create remaining selected days
        new_days = []
        for day_no in self.cleaned_data['days']:
            activity_date = self.datebook.period.replace(day=int(day_no))

            goto_start, goto_stop = self.combine_day_and_daymodel_time(activity_date, daymodel.start, daymodel_delta)
            
            content = ""
            if with_content:
                content = daymodel.content
            
            new_days.append(DayEntry(
                datebook=self.datebook,
                activity_date=activity_date,
                start=goto_start,
                stop=goto_stop,
                pause=daymodel.pause,
                overtime=daymodel.overtime,
                content=content,
                vacation=False,
            ))
        # Bulk create all new days
        if new_days:
            DayEntry.objects.bulk_create(new_days)
        # Update the datebook because model save method is not triggered with bulk creating
        self.datebook.modified = tz_now()
        self.datebook.save()
        
        return None
def upload_to(instance, filename):
    now = tz_now()
    filename_base, filename_ext = os.path.splitext(filename)
    return "editorial/%s%s" % (
        now.strftime("%Y/%m/%Y%m%d%H%M%S"),
        filename_ext.lower(),
    )
Esempio n. 7
0
    def update_on_save(self, sender, instance, created, **kwargs):
        current, updated = getattr(instance, self.get_cache_name())

        if updated is None:
            return None

        queryset = self.get_collection(instance).exclude(pk=instance.pk)

        updates = {}
        if self.auto_now_fields:
            now = tz_now()
            for field in self.auto_now_fields:
                updates[field.name] = now

        if created:
            # increment positions gte updated
            queryset = queryset.filter(**{'%s__gte' % self.name: updated})
            updates[self.name] = models.F(self.name) + 1
        elif updated > current:
            # decrement positions gt current and lte updated
            queryset = queryset.filter(**{'%s__gt' % self.name: current, '%s__lte' % self.name: updated})
            updates[self.name] = models.F(self.name) - 1
        else:
            # increment positions lt current and gte updated
            queryset = queryset.filter(**{'%s__lt' % self.name: current, '%s__gte' % self.name: updated})
            updates[self.name] = models.F(self.name) + 1

        queryset.update(**updates)
        setattr(instance, self.get_cache_name(), (updated, None))
Esempio n. 8
0
 def get_queryset(self):
     conditions = []
     now = tz_now()
     conditions.append(models.Q(
         published_from=None,
         published_till=None,
         ))
     conditions.append(models.Q(
         published_from__lt=now,
         published_till=None,
         ))
     conditions.append(models.Q(
         published_from=None,
         published_till__gt=now,
         ))
     conditions.append(models.Q(
         published_from__lt=now,
         published_till__gt=now,
         ))
     return super(
         PublishingMixinPublishedManager,
         self,
         ).get_queryset().filter(
             reduce(operator.or_, conditions),
             ).filter(status__exact=STATUS_CODE_PUBLISHED)
Esempio n. 9
0
    def update_global(self, label,
                      lozenge=None, icons=None):
        """POST global update to the glance (all users, rooms).

        Args:
            label: string, the glance label text

        Kwargs:
            lozenge: a Lozenge tuple, if this update is altering the lozenge
            icon: an Icon tuple, if this update is altering the icon

        Returns a GlanceUpdate object.

        """
        url = "https://api.hipchat.com/v2/addon/ui"
        update = GlanceUpdate(
            glance=self,
            label_value=label,
            lozenge=lozenge or Lozenge(LOZENGE_EMPTY, ''),
            icons=icons or Icon('', '')
        )
        token = (
            Install.objects
            .filter(app=self.app)
            .filter(expires_at__gt=tz_now())
            .first()
        )
        if token is None:
            raise NoValidAccessToken()
        return self._update(url, token.access_token, update)
Esempio n. 10
0
    def test_05_event_finished(self):
        now = tz_now().replace(microsecond=0) - datetime.timedelta(minutes=20)
        end = now + datetime.timedelta(minutes=10)
        event = self._create_event(now, end)

        cron_social_network()
        self.assertEqual(SnEvent.objects.count(), 0)
Esempio n. 11
0
    def get_timeuntil(self):
        """
        Returns a string representing the time until the next
        time this Job will be run.
        """
        if self.adhoc_run:
            return _('ASAP')
        elif self.disabled:
            return _('never (disabled)')
        elif not self.next_run:
            return "-"

        delta = self.next_run - tz_now()
        if delta.days < 0:
            # The job is past due and should be run as soon as possible
            return _('due')
        elif delta.seconds < 60:
            # Adapted from django.utils.timesince
            count = lambda n: ungettext('second', 'seconds', n)
            return ugettext('%(number)d %(type)s') % {
                'number': delta.seconds,
                'type': count(delta.seconds)
            }

        return timeuntil(self.next_run)
Esempio n. 12
0
    def test_06_postpone_with_date_end(self):
        event = Event()
        now = tz_now().replace(microsecond=0)
        end = now + datetime.timedelta(minutes=10)
        event.date_start = now
        event.duration = 10
        event.title = 'My Title'
        event.summary = ''
        event.category = 1
        event.save()
        event.date_end = end
        event.save()

        event = Event.objects.get(title='My Title')
        self.assertEqual(event.date_start, now)
        self.assertEqual(event.date_end, end)
        self.assertEqual(event.estimate_date_end, end)
        self.assertEqual(event.duration, 10)
        event.date_start = end
        event.date_end = event.date_start + datetime.timedelta(minutes=12)
        event.save()

        event = Event.objects.get(title='My Title')
        self.assertEqual(event.date_start, end)
        self.assertEqual(event.date_end, end + datetime.timedelta(minutes=12))
        self.assertEqual(event.estimate_date_end, event.date_end)
        self.assertEqual(event.duration, 12)
Esempio n. 13
0
 def test_01_rss_order(self):
     """
     Test if the RSS feed is generated with the good order (pubdate by desc)
     """
     def getKey(last_update):
         return last_update
     event = Event()
     order_expected = sorted(
         [event.last_update for event in Event.objects.all()[:50]], key=getKey, reverse=True)
     items_order = [event.last_update for event in self.rss.items()]
     self.assertListEqual(items_order, order_expected)
     event.date_start = tz_now() + datetime.timedelta(minutes=10)
     event.duration = 120
     event.category = event.MAINTENANCE
     event.summary = ''
     event.save()
     order_expected = sorted(
         [event.last_update for event in Event.objects.all()[:50]], key=getKey, reverse=True)
     items_order = [event.last_update for event in self.rss.items()]
     self.assertListEqual(items_order, order_expected)
     eventlog = EventLog(
         event=event,
         comment='',
         user_id=1)
     eventlog.save()
     order_expected = sorted(
         [event.last_update for event in Event.objects.all()[:50]], key=getKey, reverse=True)
     items_order = [event.last_update for event in self.rss.items()]
     self.assertListEqual(items_order, order_expected)
Esempio n. 14
0
    def test_06_event_maintenace_futre(self):
        self.assertEqual(SnEvent.objects.count(), 0)

        now = tz_now().replace(microsecond=0) + datetime.timedelta(minutes=10)
        end = now + datetime.timedelta(minutes=10)
        event = self._create_event(now, end, Event.MAINTENANCE)

        self.assertEqual(SnEvent.objects.count(), 0)
Esempio n. 15
0
 def due(self):
     """
     Returns a ``QuerySet`` of all jobs waiting to be run.
     """
     return self.filter(
         Q(next_run__lte=tz_now(), disabled=False, is_running=False)
         | Q(adhoc_run=True, is_running=False)
     )
 def update_from_pypi(self):
     """Call get_latest_version and then save the object."""
     info  = self.get_info()
     self.licence = package_licence(info)
     self.latest_version = package_version(info)
     self.diff_status = version_diff(self.current_version, self.latest_version)
     self.checked_pypi_at = tz_now()
     self.save()
     return self
Esempio n. 17
0
 def save(self, *args, **kwargs):
     if not self.author:
         self.author = get_current_user()
         
     # publishing date save logic.
     if not self.published_from:
         self.published_from = tz_now()
               
     super(PublishingMixin, self).save(*args, **kwargs)
Esempio n. 18
0
 def update_on_delete(self, sender, instance, **kwargs):
     current = getattr(instance, self.get_cache_name())[0]
     queryset = self.get_collection(instance)
     updates = {self.name: models.F(self.name) - 1}
     if self.auto_now_fields:
         now = tz_now()
         for field in self.auto_now_fields:
             updates[field.name] = now
     queryset.filter(**{'%s__gt' % self.name: current}).update(**updates)
Esempio n. 19
0
 def test_04_event_started_without_end(self):
     now = tz_now().replace(microsecond=0) - datetime.timedelta(minutes=10)
     event = self._create_event(now)
     cron_social_network()
     self.assertEqual(SnEvent.objects.count(), 1)
     sn_event = SnEvent.objects.get(id=1)
     self.assertEqual(sn_event.event, event)
     self.assertEqual(sn_event.name, MockSN.name)
     self.assertEqual(sn_event.sn_id, '1')
Esempio n. 20
0
 def test_03_title_event_maintenance_is_started(self):
     """
     Test if the event have been started and if the title is set correctly
     with the tag [STARTED]
     """
     event = Event()
     event.date_start = tz_now() - datetime.timedelta(minutes=10)
     event.duration = 120
     event.title = 'My Title'
     event.summary = ''
     event.category = event.MAINTENANCE
     event.save()
     event = Event.objects.all()[0]
     title = self.rss.item_title(event)
     self.assertEqual(title, '[STARTED] My Title')
     event.date_end = tz_now() + datetime.timedelta(minutes=20)
     event.save()
     title = self.rss.item_title(event)
     self.assertEqual(title, '[STARTED] My Title')
Esempio n. 21
0
 def test_sitemap_includes_last_modification_date(self):
     one_day_ago = tz_now() - datetime.timedelta(days=1)
     page = create_page("page", "nav_playground.html", "en", published=True, publication_date=one_day_ago)
     page.creation_date = one_day_ago
     page.save()
     page.publish('en')
     sitemap = CMSSitemap()
     self.assertEqual(sitemap.items().count(), 1)
     actual_last_modification_time = sitemap.lastmod(sitemap.items()[0])
     self.assertTrue(actual_last_modification_time > one_day_ago)
Esempio n. 22
0
    def public(self, *args, **kwargs):
        """
        obtiene todo lo que este activo, no expiro y ya fue publicado
        """

        now = tz_now()
        qs = self.active(*args, **kwargs)
        return qs.filter(models.Q(expiration_date__gte=now) | models.Q(expiration_date__isnull=True), 
                publication_date__lte=now,
                status__is_public=True)
Esempio n. 23
0
    def save(self, *args, **kwargs):
        if not self.disabled:
            if not self.last_run:
                self.last_run = tz_now()
            if not self.next_run:
                self.next_run = self.rrule.after(self.last_run)
        else:
            self.next_run = None

        super(Job, self).save(*args, **kwargs)
 def handle(self, *args, **options):
     running_jobs = Job.objects.filter(is_running=True)
     now = tz_now()
     for job in running_jobs:
         if now > (job.last_run + dt.timedelta(seconds=job.timeout or 60*10)):
             print 'setting job with pid {} as not running (no pid found in os)'.format(job.pid)
             job.is_running = False
             job.pid = None
             job.host = None
             job.save(update_fields=['is_running', 'pid', 'host'])
Esempio n. 25
0
 def update_from_pypi(self):
     """Call get_latest_version and then save the object."""
     package = pypi.Package(self.package_name)
     self.licence = package.licence()
     self.latest_version = package.latest_version()
     self.next_version = package.next_version(self.current_version)
     self.diff_status = pypi.version_diff(self.current_version, self.latest_version)
     self.checked_pypi_at = tz_now()
     self.save()
     return self
Esempio n. 26
0
 def latest_updated(self):
     """
     Returns the latest item's updated or the current time if there are
     no items.
     """
     updates = [item['updated'] for item in self.items]
     if len(updates) > 0:
         updates.sort()
         return updates[-1]
     else:
         return tz_now()  # @@@ really we should allow a feed to define
Esempio n. 27
0
    def days_since_user_score(self, user):
        """Return the number of days since the User last submitted a score.

        Returns -1 if the user has never been asked.

        """
        score = self.most_recent_user_score(user)
        if score is None:
            return -1
        else:
            return (tz_now().date() - score.timestamp.date()).days
Esempio n. 28
0
 def test_page_already_expired(self):
     """
     Test that a page which has a end date in the past gives a 404, not a
     500.
     """
     yesterday = tz_now() - datetime.timedelta(days=1)
     with SettingsOverride(CMS_PERMISSION=False):
         page = create_page('page', 'nav_playground.html', 'en',
                            publication_end_date=yesterday, published=True)
         resp = self.client.get(page.get_absolute_url('en'))
         self.assertEqual(resp.status_code, 404)
Esempio n. 29
0
 def save(self, *args, **kwargs):
     """
     Fill 'created' and 'modified' attributes on first create
     """
     if self.created is None:
         self.created = tz_now()
     
     if self.modified is None:
         self.modified = self.created
         
     super(Thread, self).save(*args, **kwargs)
Esempio n. 30
0
 def test_sitemap_uses_publication_date_when_later_than_modification(self):
     now = tz_now()
     now -= datetime.timedelta(microseconds=now.microsecond)
     one_day_ago = now - datetime.timedelta(days=1)
     page = create_page("page", "nav_playground.html", "en", published=True, publication_date=now)
     title = page.get_title_obj('en')
     page.creation_date = one_day_ago
     page.changed_date = one_day_ago
     sitemap = CMSSitemap()
     actual_last_modification_time = sitemap.lastmod(title)
     self.assertEqual(actual_last_modification_time.date(), now.date())
Esempio n. 31
0
def get_past_event_cutoff():
    return (tz_now() - datetime.timedelta(days=1)).date()
Esempio n. 32
0
 def save(self, **kwargs):
     self.date_updated = tz_now()
     if not self.date_created:
         self.date_created = tz_now()
     return super(AbstractFeedSubmission, self).save(**kwargs)
Esempio n. 33
0
    def process_pending_tasks(self, pending_tasks):
        running_workflow_templates = set([
            wf.unified_job_template_id
            for wf in self.get_running_workflow_jobs()
        ])
        tasks_to_update_job_explanation = []
        for task in pending_tasks:
            if self.start_task_limit <= 0:
                break
            blocked_by = self.job_blocked_by(task)
            if blocked_by:
                task.log_lifecycle("blocked", blocked_by=blocked_by)
                job_explanation = gettext_noop(
                    f"waiting for {blocked_by._meta.model_name}-{blocked_by.id} to finish"
                )
                if task.job_explanation != job_explanation:
                    if task.created < (tz_now() -
                                       self.time_delta_job_explanation):
                        task.job_explanation = job_explanation
                        tasks_to_update_job_explanation.append(task)
                continue
            preferred_instance_groups = task.preferred_instance_groups

            found_acceptable_queue = False
            if isinstance(task, WorkflowJob):
                if task.unified_job_template_id in running_workflow_templates:
                    if not task.allow_simultaneous:
                        logger.debug(
                            "{} is blocked from running, workflow already running"
                            .format(task.log_format))
                        continue
                else:
                    running_workflow_templates.add(
                        task.unified_job_template_id)
                self.start_task(task, None, task.get_jobs_fail_chain(), None)
                continue

            for rampart_group in preferred_instance_groups:
                if task.capacity_type == 'execution' and rampart_group.is_container_group:
                    self.graph[rampart_group.name]['graph'].add_job(task)
                    self.start_task(task, rampart_group,
                                    task.get_jobs_fail_chain(), None)
                    found_acceptable_queue = True
                    break

                # TODO: remove this after we have confidence that OCP control nodes are reporting node_type=control
                if settings.IS_K8S and task.capacity_type == 'execution':
                    logger.debug(
                        "Skipping group {}, task cannot run on control plane".
                        format(rampart_group.name))
                    continue

                remaining_capacity = self.get_remaining_capacity(
                    rampart_group.name, capacity_type=task.capacity_type)
                if task.task_impact > 0 and remaining_capacity <= 0:
                    logger.debug(
                        "Skipping group {}, remaining_capacity {} <= 0".format(
                            rampart_group.name, remaining_capacity))
                    continue

                execution_instance = InstanceGroup.fit_task_to_most_remaining_capacity_instance(
                    task, self.graph[rampart_group.name]
                    ['instances']) or InstanceGroup.find_largest_idle_instance(
                        self.graph[rampart_group.name]['instances'],
                        capacity_type=task.capacity_type)

                if execution_instance or rampart_group.is_container_group:
                    if not rampart_group.is_container_group:
                        execution_instance.remaining_capacity = max(
                            0, execution_instance.remaining_capacity -
                            task.task_impact)
                        execution_instance.jobs_running += 1
                        logger.debug(
                            "Starting {} in group {} instance {} (remaining_capacity={})"
                            .format(task.log_format, rampart_group.name,
                                    execution_instance.hostname,
                                    remaining_capacity))

                    if execution_instance:
                        execution_instance = self.real_instances[
                            execution_instance.hostname]
                    self.graph[rampart_group.name]['graph'].add_job(task)
                    self.start_task(task, rampart_group,
                                    task.get_jobs_fail_chain(),
                                    execution_instance)
                    found_acceptable_queue = True
                    break
                else:
                    logger.debug(
                        "No instance available in group {} to run job {} w/ capacity requirement {}"
                        .format(rampart_group.name, task.log_format,
                                task.task_impact))
            if not found_acceptable_queue:
                task.log_lifecycle("needs_capacity")
                job_explanation = gettext_noop(
                    "This job is not ready to start because there is not enough available capacity."
                )
                if task.job_explanation != job_explanation:
                    if task.created < (tz_now() -
                                       self.time_delta_job_explanation):
                        # Many launched jobs are immediately blocked, but most blocks will resolve in a few seconds.
                        # Therefore we should only update the job_explanation after some time has elapsed to
                        # prevent excessive task saves.
                        task.job_explanation = job_explanation
                        tasks_to_update_job_explanation.append(task)
                logger.debug(
                    "{} couldn't be scheduled on graph, waiting for next cycle"
                    .format(task.log_format))
        UnifiedJob.objects.bulk_update(tasks_to_update_job_explanation,
                                       ['job_explanation'])
Esempio n. 34
0
def gen_unique_id():
    return hashlib.sha1(u'{0}:{1}'.format(
        get_random_string(ID_LENGTH), tz_now()).encode('utf-8')).hexdigest()
Esempio n. 35
0
    def _base_set(self, mode, key, value, timeout=DEFAULT_TIMEOUT):
        timeout = self.get_backend_timeout(timeout)
        db = router.db_for_write(self.cache_model_class)
        connection = connections[db]
        quote_name = connection.ops.quote_name
        table = quote_name(self._table)

        with connection.cursor() as cursor:
            cursor.execute("SELECT COUNT(*) FROM %s" % table)
            num = cursor.fetchone()[0]
            now = tz_now()
            now = now.replace(microsecond=0)
            if timeout is None:
                exp = datetime.max
            else:
                tz = timezone.utc if settings.USE_TZ else None
                exp = datetime.fromtimestamp(timeout, tz=tz)
            exp = exp.replace(microsecond=0)
            if num > self._max_entries:
                self._cull(db, cursor, now, num)
            pickled = pickle.dumps(value, self.pickle_protocol)
            # The DB column is expecting a string, so make sure the value is a
            # string, not bytes. Refs #19274.
            b64encoded = base64.b64encode(pickled).decode("latin1")
            try:
                # Note: typecasting for datetimes is needed by some 3rd party
                # database backends. All core backends work without typecasting,
                # so be careful about changes here - test suite will NOT pick
                # regressions.
                with transaction.atomic(using=db):
                    cursor.execute(
                        "SELECT %s, %s FROM %s WHERE %s = %%s" % (
                            quote_name("cache_key"),
                            quote_name("expires"),
                            table,
                            quote_name("cache_key"),
                        ),
                        [key],
                    )
                    result = cursor.fetchone()

                    if result:
                        current_expires = result[1]
                        expression = models.Expression(
                            output_field=models.DateTimeField())
                        for converter in connection.ops.get_db_converters(
                                expression) + expression.get_db_converters(
                                    connection):
                            current_expires = converter(
                                current_expires, expression, connection)

                    exp = connection.ops.adapt_datetimefield_value(exp)
                    if result and mode == "touch":
                        cursor.execute(
                            "UPDATE %s SET %s = %%s WHERE %s = %%s" %
                            (table, quote_name("expires"),
                             quote_name("cache_key")),
                            [exp, key],
                        )
                    elif result and (mode == "set" or
                                     (mode == "add"
                                      and current_expires < now)):
                        cursor.execute(
                            "UPDATE %s SET %s = %%s, %s = %%s WHERE %s = %%s" %
                            (
                                table,
                                quote_name("value"),
                                quote_name("expires"),
                                quote_name("cache_key"),
                            ),
                            [b64encoded, exp, key],
                        )
                    elif mode != "touch":
                        cursor.execute(
                            "INSERT INTO %s (%s, %s, %s) VALUES (%%s, %%s, %%s)"
                            % (
                                table,
                                quote_name("cache_key"),
                                quote_name("value"),
                                quote_name("expires"),
                            ),
                            [key, b64encoded, exp],
                        )
                    else:
                        return False  # touch failed.
            except DatabaseError:
                # To be threadsafe, updates/inserts are allowed to fail silently
                return False
            else:
                return True
Esempio n. 36
0
 def _support_admin_time(self):
     end = time(settings.SUPPORT_ADMIN_END_HOUR)
     start = time(settings.SUPPORT_ADMIN_START_HOUR)
     return end >= tz_now().time() >= start
Esempio n. 37
0
def upload_to(instance, filename):
    now = tz_now()
    filename_base, filename_ext = os.path.splitext(filename)
    return "editorial/%s%s" % (
        now.strftime("%Y/%m/%Y%m%d%H%M%S"),
        filename_ext.lower())
Esempio n. 38
0
    def cleanup_inconsistent_celery_tasks(self):
        '''
        Rectify tower db <-> celery inconsistent view of jobs state
        '''
        last_cleanup = cache.get(
            'last_celery_task_cleanup') or datetime.min.replace(tzinfo=utc)
        if (tz_now() - last_cleanup
            ).seconds < settings.AWX_INCONSISTENT_TASK_INTERVAL:
            return

        logger.debug("Failing inconsistent running jobs.")
        celery_task_start_time = tz_now()
        active_task_queues, active_queues = self.get_active_tasks()
        cache.set('last_celery_task_cleanup', tz_now())

        if active_queues is None:
            logger.error('Failed to retrieve active tasks from celery')
            return None
        '''
        Only consider failing tasks on instances for which we obtained a task
        list from celery for.
        '''
        running_tasks, waiting_tasks = self.get_running_tasks()
        all_celery_task_ids = []
        for node, node_jobs in active_queues.iteritems():
            all_celery_task_ids.extend(node_jobs)

        self.fail_jobs_if_not_in_celery(waiting_tasks, all_celery_task_ids,
                                        celery_task_start_time)

        for node, node_jobs in running_tasks.iteritems():
            isolated = False
            if node in active_queues:
                active_tasks = active_queues[node]
            else:
                '''
                Node task list not found in celery. We may branch into cases:
                 - instance is unknown to tower, system is improperly configured
                 - instance is reported as down, then fail all jobs on the node
                 - instance is an isolated node, then check running tasks
                   among all allowed controller nodes for management process
                 - valid healthy instance not included in celery task list
                   probably a netsplit case, leave it alone
                '''
                instance = Instance.objects.filter(hostname=node).first()

                if instance is None:
                    logger.error(
                        "Execution node Instance {} not found in database. "
                        "The node is currently executing jobs {}".format(
                            node, [j.log_format for j in node_jobs]))
                    active_tasks = []
                elif instance.capacity == 0:
                    active_tasks = []
                elif instance.rampart_groups.filter(
                        controller__isnull=False).exists():
                    active_tasks = all_celery_task_ids
                    isolated = True
                else:
                    continue

            self.fail_jobs_if_not_in_celery(node_jobs,
                                            active_tasks,
                                            celery_task_start_time,
                                            isolated=isolated)
Esempio n. 39
0
 def expire(self) -> None:
     """Mark the token as expired immediately, effectively killing the token."""
     self.expiration_time = tz_now() - datetime.timedelta(microseconds=1)
     self.save()
Esempio n. 40
0
 def save(self, *args: Any, **kwargs: Any) -> RequestToken:
     if "update_fields" not in kwargs:
         self.timestamp = self.timestamp or tz_now()
     super(RequestTokenLog, self).save(*args, **kwargs)
     return self
Esempio n. 41
0
 def get_expired_workflow_approvals(self):
     # timeout of 0 indicates that it never expires
     qs = WorkflowApproval.objects.filter(status='pending').exclude(
         timeout=0).filter(expires__lt=tz_now())
     return qs
Esempio n. 42
0
    def process_pending_tasks(self, pending_tasks):
        tasks_to_update_job_explanation = []
        for task in pending_tasks:
            if self.start_task_limit <= 0:
                break
            if self.timed_out():
                logger.warning(
                    "Task manager has reached time out while processing pending jobs, exiting loop early"
                )
                break
            blocked_by = self.job_blocked_by(task)
            if blocked_by:
                self.subsystem_metrics.inc(f"{self.prefix}_tasks_blocked", 1)
                task.log_lifecycle("blocked", blocked_by=blocked_by)
                job_explanation = gettext_noop(
                    f"waiting for {blocked_by._meta.model_name}-{blocked_by.id} to finish"
                )
                if task.job_explanation != job_explanation:
                    if task.created < (tz_now() -
                                       self.time_delta_job_explanation):
                        task.job_explanation = job_explanation
                        tasks_to_update_job_explanation.append(task)
                continue

            if isinstance(task, WorkflowJob):
                # Previously we were tracking allow_simultaneous blocking both here and in DependencyGraph.
                # Double check that using just the DependencyGraph works for Workflows and Sliced Jobs.
                self.start_task(task, None, task.get_jobs_fail_chain(), None)
                continue

            found_acceptable_queue = False

            preferred_instance_groups = self.instance_groups.get_instance_groups_from_task_cache(
                task)

            # Determine if there is control capacity for the task
            if task.capacity_type == 'control':
                control_impact = task.task_impact + settings.AWX_CONTROL_NODE_TASK_IMPACT
            else:
                control_impact = settings.AWX_CONTROL_NODE_TASK_IMPACT
            control_instance = self.instance_groups.fit_task_to_most_remaining_capacity_instance(
                task,
                instance_group_name=settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME,
                impact=control_impact,
                capacity_type='control')
            if not control_instance:
                self.task_needs_capacity(task, tasks_to_update_job_explanation)
                logger.debug(
                    f"Skipping task {task.log_format} in pending, not enough capacity left on controlplane to control new tasks"
                )
                continue

            task.controller_node = control_instance.hostname

            # All task.capacity_type == 'control' jobs should run on control plane, no need to loop over instance groups
            if task.capacity_type == 'control':
                task.execution_node = control_instance.hostname
                execution_instance = self.instances[
                    control_instance.hostname].obj
                task.log_lifecycle("controller_node_chosen")
                task.log_lifecycle("execution_node_chosen")
                self.start_task(task, self.controlplane_ig,
                                task.get_jobs_fail_chain(), execution_instance)
                found_acceptable_queue = True
                continue

            for instance_group in preferred_instance_groups:
                if instance_group.is_container_group:
                    self.start_task(task, instance_group,
                                    task.get_jobs_fail_chain(), None)
                    found_acceptable_queue = True
                    break

                # TODO: remove this after we have confidence that OCP control nodes are reporting node_type=control
                if settings.IS_K8S and task.capacity_type == 'execution':
                    logger.debug(
                        "Skipping group {}, task cannot run on control plane".
                        format(instance_group.name))
                    continue
                # at this point we know the instance group is NOT a container group
                # because if it was, it would have started the task and broke out of the loop.
                execution_instance = self.instance_groups.fit_task_to_most_remaining_capacity_instance(
                    task,
                    instance_group_name=instance_group.name,
                    add_hybrid_control_cost=True
                ) or self.instance_groups.find_largest_idle_instance(
                    instance_group_name=instance_group.name,
                    capacity_type=task.capacity_type)

                if execution_instance:
                    task.execution_node = execution_instance.hostname
                    # If our execution instance is a hybrid, prefer to do control tasks there as well.
                    if execution_instance.node_type == 'hybrid':
                        control_instance = execution_instance
                        task.controller_node = execution_instance.hostname

                    task.log_lifecycle("controller_node_chosen")
                    task.log_lifecycle("execution_node_chosen")
                    logger.debug(
                        "Starting {} in group {} instance {} (remaining_capacity={})"
                        .format(task.log_format, instance_group.name,
                                execution_instance.hostname,
                                execution_instance.remaining_capacity))
                    execution_instance = self.instances[
                        execution_instance.hostname].obj
                    self.start_task(task, instance_group,
                                    task.get_jobs_fail_chain(),
                                    execution_instance)
                    found_acceptable_queue = True
                    break
                else:
                    logger.debug(
                        "No instance available in group {} to run job {} w/ capacity requirement {}"
                        .format(instance_group.name, task.log_format,
                                task.task_impact))
            if not found_acceptable_queue:
                self.task_needs_capacity(task, tasks_to_update_job_explanation)
        UnifiedJob.objects.bulk_update(tasks_to_update_job_explanation,
                                       ['job_explanation'])
Esempio n. 43
0
    def process_pending_tasks(self, pending_tasks):
        running_workflow_templates = {
            wf.unified_job_template_id
            for wf in self.get_running_workflow_jobs()
        }
        tasks_to_update_job_explanation = []
        for task in pending_tasks:
            if self.start_task_limit <= 0:
                break
            blocked_by = self.job_blocked_by(task)
            if blocked_by:
                task.log_lifecycle("blocked", blocked_by=blocked_by)
                job_explanation = gettext_noop(
                    f"waiting for {blocked_by._meta.model_name}-{blocked_by.id} to finish"
                )
                if task.job_explanation != job_explanation:
                    if task.created < (tz_now() -
                                       self.time_delta_job_explanation):
                        task.job_explanation = job_explanation
                        tasks_to_update_job_explanation.append(task)
                continue

            found_acceptable_queue = False
            preferred_instance_groups = task.preferred_instance_groups

            if isinstance(task, WorkflowJob):
                if task.unified_job_template_id in running_workflow_templates:
                    if not task.allow_simultaneous:
                        logger.debug(
                            "{} is blocked from running, workflow already running"
                            .format(task.log_format))
                        continue
                else:
                    running_workflow_templates.add(
                        task.unified_job_template_id)
                self.start_task(task, None, task.get_jobs_fail_chain(), None)
                continue

            # Determine if there is control capacity for the task
            if task.capacity_type == 'control':
                control_impact = task.task_impact + settings.AWX_CONTROL_NODE_TASK_IMPACT
            else:
                control_impact = settings.AWX_CONTROL_NODE_TASK_IMPACT
            control_instance = InstanceGroup.fit_task_to_most_remaining_capacity_instance(
                task,
                self.graph[
                    settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME]['instances'],
                impact=control_impact,
                capacity_type='control')
            if not control_instance:
                self.task_needs_capacity(task, tasks_to_update_job_explanation)
                logger.debug(
                    f"Skipping task {task.log_format} in pending, not enough capacity left on controlplane to control new tasks"
                )
                continue

            task.controller_node = control_instance.hostname

            # All task.capacity_type == 'control' jobs should run on control plane, no need to loop over instance groups
            if task.capacity_type == 'control':
                task.execution_node = control_instance.hostname
                control_instance.remaining_capacity = max(
                    0, control_instance.remaining_capacity - control_impact)
                control_instance.jobs_running += 1
                self.dependency_graph.add_job(task)
                execution_instance = self.real_instances[
                    control_instance.hostname]
                self.start_task(task, self.controlplane_ig,
                                task.get_jobs_fail_chain(), execution_instance)
                found_acceptable_queue = True
                continue

            for rampart_group in preferred_instance_groups:
                if rampart_group.is_container_group:
                    control_instance.jobs_running += 1
                    self.dependency_graph.add_job(task)
                    self.start_task(task, rampart_group,
                                    task.get_jobs_fail_chain(), None)
                    found_acceptable_queue = True
                    break

                # TODO: remove this after we have confidence that OCP control nodes are reporting node_type=control
                if settings.IS_K8S and task.capacity_type == 'execution':
                    logger.debug(
                        "Skipping group {}, task cannot run on control plane".
                        format(rampart_group.name))
                    continue
                # at this point we know the instance group is NOT a container group
                # because if it was, it would have started the task and broke out of the loop.
                execution_instance = InstanceGroup.fit_task_to_most_remaining_capacity_instance(
                    task,
                    self.graph[rampart_group.name]['instances'],
                    add_hybrid_control_cost=True
                ) or InstanceGroup.find_largest_idle_instance(
                    self.graph[rampart_group.name]['instances'],
                    capacity_type=task.capacity_type)

                if execution_instance:
                    task.execution_node = execution_instance.hostname
                    # If our execution instance is a hybrid, prefer to do control tasks there as well.
                    if execution_instance.node_type == 'hybrid':
                        control_instance = execution_instance
                        task.controller_node = execution_instance.hostname

                    control_instance.remaining_capacity = max(
                        0, control_instance.remaining_capacity -
                        settings.AWX_CONTROL_NODE_TASK_IMPACT)
                    task.log_lifecycle("controller_node_chosen")
                    if control_instance != execution_instance:
                        control_instance.jobs_running += 1
                    execution_instance.remaining_capacity = max(
                        0, execution_instance.remaining_capacity -
                        task.task_impact)
                    execution_instance.jobs_running += 1
                    task.log_lifecycle("execution_node_chosen")
                    logger.debug(
                        "Starting {} in group {} instance {} (remaining_capacity={})"
                        .format(task.log_format, rampart_group.name,
                                execution_instance.hostname,
                                execution_instance.remaining_capacity))
                    execution_instance = self.real_instances[
                        execution_instance.hostname]
                    self.dependency_graph.add_job(task)
                    self.start_task(task, rampart_group,
                                    task.get_jobs_fail_chain(),
                                    execution_instance)
                    found_acceptable_queue = True
                    break
                else:
                    logger.debug(
                        "No instance available in group {} to run job {} w/ capacity requirement {}"
                        .format(rampart_group.name, task.log_format,
                                task.task_impact))
            if not found_acceptable_queue:
                self.task_needs_capacity(task, tasks_to_update_job_explanation)
        UnifiedJob.objects.bulk_update(tasks_to_update_job_explanation,
                                       ['job_explanation'])
Esempio n. 44
0
 def filter_queryset(self, request, queryset, view):
     return queryset.filter(status=AppointmentState.Confirmed.value,
                            date_time__lt=tz_now())
Esempio n. 45
0
 def filter_queryset(self, request, queryset, view):
     return queryset.filter(date_time__lte=tz_now())
def _generate_build_path():
    """Generate a unique build path to avoid concurrent builds clashing"""
    return os.path.join(BUILD_ROOT_DIR, tz_now().isoformat())
Esempio n. 47
0
 def upcoming(self):
     return self.get_queryset().order_by('timeslot__start_time').filter(timeslot__end_time__gte=tz_now())
Esempio n. 48
0
 def create_applicant(self, username):
     """Create new Applicant and user."""
     data = {'id': username, 'created_at': tz_now().isoformat()}
     user = get_user_model().objects.create_user(username)
     applicant = Applicant.objects.create_applicant(user, raw=data)
     return applicant
Esempio n. 49
0
 def flush(self, force=False):
     now = tz_now()
     if force or (time.time() - self.last_flush
                  ) > settings.JOB_EVENT_BUFFER_SECONDS or any([
                      len(events) >= 1000 for events in self.buff.values()
                  ]):
         metrics_bulk_events_saved = 0
         metrics_singular_events_saved = 0
         metrics_events_batch_save_errors = 0
         metrics_events_broadcast = 0
         metrics_events_missing_created = 0
         metrics_total_job_event_processing_seconds = datetime.timedelta(
             seconds=0)
         for cls, events in self.buff.items():
             logger.debug(
                 f'{cls.__name__}.objects.bulk_create({len(events)})')
             for e in events:
                 e.modified = now  # this can be set before created because now is set above on line 149
                 if not e.created:
                     e.created = now
                     metrics_events_missing_created += 1
                 else:  # only calculate the seconds if the created time already has been set
                     metrics_total_job_event_processing_seconds += e.modified - e.created
             metrics_duration_to_save = time.perf_counter()
             try:
                 cls.objects.bulk_create(events)
                 metrics_bulk_events_saved += len(events)
             except Exception as exc:
                 logger.warning(
                     f'Error in events bulk_create, will try indiviually up to 5 errors, error {str(exc)}'
                 )
                 # if an exception occurs, we should re-attempt to save the
                 # events one-by-one, because something in the list is
                 # broken/stale
                 consecutive_errors = 0
                 events_saved = 0
                 metrics_events_batch_save_errors += 1
                 for e in events:
                     try:
                         e.save()
                         events_saved += 1
                         consecutive_errors = 0
                     except Exception as exc_indv:
                         consecutive_errors += 1
                         logger.info(
                             f'Database Error Saving individual Job Event, error {str(exc_indv)}'
                         )
                     if consecutive_errors >= 5:
                         raise
                 metrics_singular_events_saved += events_saved
                 if events_saved == 0:
                     raise
             metrics_duration_to_save = time.perf_counter(
             ) - metrics_duration_to_save
             for e in events:
                 if not getattr(e, '_skip_websocket_message', False):
                     metrics_events_broadcast += 1
                     emit_event_detail(e)
                 if getattr(e, '_notification_trigger_event', False):
                     job_stats_wrapup(getattr(e, e.JOB_REFERENCE), event=e)
         self.buff = {}
         self.last_flush = time.time()
         # only update metrics if we saved events
         if (metrics_bulk_events_saved + metrics_singular_events_saved) > 0:
             self.subsystem_metrics.inc(
                 'callback_receiver_batch_events_errors',
                 metrics_events_batch_save_errors)
             self.subsystem_metrics.inc(
                 'callback_receiver_events_insert_db_seconds',
                 metrics_duration_to_save)
             self.subsystem_metrics.inc(
                 'callback_receiver_events_insert_db',
                 metrics_bulk_events_saved + metrics_singular_events_saved)
             self.subsystem_metrics.observe(
                 'callback_receiver_batch_events_insert_db',
                 metrics_bulk_events_saved)
             self.subsystem_metrics.inc(
                 'callback_receiver_events_in_memory',
                 -(metrics_bulk_events_saved +
                   metrics_singular_events_saved))
             self.subsystem_metrics.inc(
                 'callback_receiver_events_broadcast',
                 metrics_events_broadcast)
             self.subsystem_metrics.set(
                 'callback_receiver_event_processing_avg_seconds',
                 metrics_total_job_event_processing_seconds.total_seconds()
                 /
                 (metrics_bulk_events_saved + metrics_singular_events_saved
                  - metrics_events_missing_created),
             )
         if self.subsystem_metrics.should_pipe_execute() is True:
             self.subsystem_metrics.pipe_execute()
Esempio n. 50
0
 def is_expired(self, now=None):
     if not now:
         now = tz_now()
     return bool(self.expires < now)
 def get_now(self):
     return tz_now()
Esempio n. 52
0
 def save(self, *args, **kwargs):
     self.date_updated = tz_now()
     return super(AbstractFulfillmentOrder, self).save(*args, **kwargs)
Esempio n. 53
0
def get_past_event_cutoff():
    # A safe datetime that defines when 'past' has happened
    # so we don't stop showing events too soon
    return (tz_now() - datetime.timedelta(days=1)).date()
Esempio n. 54
0
        assert "awx.main.tests.functional.test_dispatch.add: Queue value required and may not be None" == e.value.args[0]

    def test_queue_defined_in_task_decorator(self):
        message, queue = multiply.apply_async([2, 2])
        assert queue == 'hard-math'

    def test_queue_overridden_from_task_decorator(self):
        message, queue = multiply.apply_async([2, 2], queue='not-so-hard')
        assert queue == 'not-so-hard'

    def test_apply_with_callable_queuename(self):
        message, queue = add.apply_async([2, 2], queue=lambda: 'called')
        assert queue == 'called'


yesterday = tz_now() - datetime.timedelta(days=1)


@pytest.mark.django_db
class TestJobReaper(object):

    @pytest.mark.parametrize('status, execution_node, controller_node, modified, fail', [
        ('running', '', '', None, False),        # running, not assigned to the instance
        ('running', 'awx', '', None, True),      # running, has the instance as its execution_node
        ('running', '', 'awx', None, True),      # running, has the instance as its controller_node
        ('waiting', '', '', None, False),        # waiting, not assigned to the instance
        ('waiting', 'awx', '', None, False),     # waiting, was edited less than a minute ago
        ('waiting', '', 'awx', None, False),     # waiting, was edited less than a minute ago
        ('waiting', 'awx', '', yesterday, True), # waiting, assigned to the execution_node, stale
        ('waiting', '', 'awx', yesterday, True), # waiting, assigned to the controller_node, stale
    ])