Esempio n. 1
0
class Subscription(models.Model):
    class STATES(object):
        ACTIVE = 'active'
        INACTIVE = 'inactive'
        CANCELED = 'canceled'
        ENDED = 'ended'

    STATE_CHOICES = Choices(
        (STATES.ACTIVE, _('Active')), (STATES.INACTIVE, _('Inactive')),
        (STATES.CANCELED, _('Canceled')), (STATES.ENDED, _('Ended')))

    class CANCEL_OPTIONS(object):
        NOW = 'now'
        END_OF_BILLING_CYCLE = 'end_of_billing_cycle'

    _INTERVALS_CODES = {
        'year': rrule.YEARLY,
        'month': rrule.MONTHLY,
        'week': rrule.WEEKLY,
        'day': rrule.DAILY
    }

    plan = models.ForeignKey(
        'Plan',
        on_delete=models.CASCADE,
        help_text='The plan the customer is subscribed to.')
    description = models.CharField(max_length=1024, blank=True, null=True)
    customer = models.ForeignKey(
        'Customer',
        related_name='subscriptions',
        on_delete=models.CASCADE,
        help_text='The customer who is subscribed to the plan.')
    trial_end = models.DateField(
        blank=True,
        null=True,
        help_text='The date at which the trial ends. '
        'If set, overrides the computed trial end date from the plan.')
    start_date = models.DateField(
        blank=True,
        null=True,
        help_text='The starting date for the subscription.')
    cancel_date = models.DateField(
        blank=True,
        null=True,
        help_text='The date when the subscription was canceled.')
    ended_at = models.DateField(
        blank=True,
        null=True,
        help_text='The date when the subscription ended.')
    reference = models.CharField(
        max_length=128,
        blank=True,
        null=True,
        validators=[validate_reference],
        help_text="The subscription's reference in an external system.")
    state = FSMField(choices=STATE_CHOICES,
                     max_length=12,
                     default=STATES.INACTIVE,
                     help_text='The state the subscription is in.')
    meta = JSONField(blank=True,
                     null=True,
                     default=dict,
                     encoder=DjangoJSONEncoder)

    def clean(self):
        errors = dict()
        if self.start_date and self.trial_end:
            if self.trial_end < self.start_date:
                errors.update({
                    'trial_end':
                    "The trial end date cannot be older than "
                    "the subscription's start date."
                })
        if self.ended_at:
            if self.state not in [self.STATES.CANCELED, self.STATES.ENDED]:
                errors.update({
                    'ended_at':
                    'The ended at date cannot be set if the '
                    'subscription is not canceled or ended.'
                })
            elif self.ended_at < self.start_date:
                errors.update({
                    'ended_at':
                    "The ended at date cannot be older than the"
                    "subscription's start date."
                })

        if errors:
            raise ValidationError(errors)

    @property
    def provider(self):
        return self.plan.provider

    def _get_aligned_start_date_after_date(self,
                                           reference_date,
                                           interval_type,
                                           bymonth=None,
                                           byweekday=None,
                                           bymonthday=None):
        return list(
            rrule.rrule(
                interval_type,
                count=
                1,  # align the cycle to the given rules as quickly as possible
                bymonth=bymonth,
                bymonthday=bymonthday,
                byweekday=byweekday,
                dtstart=reference_date))[-1].date()

    def _get_last_start_date_within_range(self,
                                          range_start,
                                          range_end,
                                          interval_type,
                                          interval_count,
                                          bymonth=None,
                                          byweekday=None,
                                          bymonthday=None):
        # we try to obtain a start date aligned to the given rules
        aligned_start_date = self._get_aligned_start_date_after_date(
            reference_date=range_start,
            interval_type=interval_type,
            bymonth=bymonth,
            bymonthday=bymonthday,
            byweekday=byweekday,
        )

        relative_start_date = range_start if aligned_start_date > range_end else aligned_start_date

        dates = list(
            rrule.rrule(interval_type,
                        dtstart=relative_start_date,
                        interval=interval_count,
                        until=range_end))

        return aligned_start_date if not dates else dates[-1].date()

    def _cycle_start_date(self,
                          reference_date=None,
                          ignore_trial=None,
                          granulate=None):
        ignore_trial_default = False
        granulate_default = False

        ignore_trial = ignore_trial_default or ignore_trial
        granulate = granulate_default or granulate

        if reference_date is None:
            reference_date = timezone.now().date()

        if not self.start_date or reference_date < self.start_date:
            return None

        rules = {
            'interval_type': self._INTERVALS_CODES[self.plan.interval],
            'interval_count': 1 if granulate else self.plan.interval_count,
        }
        if self.plan.interval == self.plan.INTERVALS.MONTH:
            rules['bymonthday'] = 1  # first day of the month
        elif self.plan.interval == self.plan.INTERVALS.WEEK:
            rules['byweekday'] = 0  # first day of the week (Monday)
        elif self.plan.interval == self.plan.INTERVALS.YEAR:
            # first day of the first month (1 Jan)
            rules['bymonth'] = 1
            rules['bymonthday'] = 1

        start_date_ignoring_trial = self._get_last_start_date_within_range(
            range_start=self.start_date, range_end=reference_date, **rules)

        if ignore_trial or not self.trial_end:
            return start_date_ignoring_trial
        else:  # Trial period is considered
            if self.trial_end < reference_date:  # Trial period ended
                # The day after the trial ended can be a start date (once, right after trial ended)
                date_after_trial_end = self.trial_end + ONE_DAY

                return max(date_after_trial_end, start_date_ignoring_trial)
            else:  # Trial is still ongoing
                if granulate or self.separate_cycles_during_trial:
                    # The trial period is split into cycles according to the rules defined above
                    return start_date_ignoring_trial
                else:
                    # Otherwise, the start date of the trial period is the subscription start date
                    return self.start_date

    def _cycle_end_date(self,
                        reference_date=None,
                        ignore_trial=None,
                        granulate=None):
        ignore_trial_default = False
        granulate_default = False

        ignore_trial = ignore_trial or ignore_trial_default
        granulate = granulate or granulate_default

        if reference_date is None:
            reference_date = timezone.now().date()

        real_cycle_start_date = self._cycle_start_date(reference_date,
                                                       ignore_trial, granulate)

        # we need a current start date in order to compute a current end date
        if not real_cycle_start_date:
            return None

        # during trial and trial cycle is not separated into intervals
        if self.on_trial(reference_date) and not (
                self.separate_cycles_during_trial or granulate):
            return min(self.trial_end, (self.ended_at or datetime.max.date()))

        if self.plan.interval == self.plan.INTERVALS.YEAR:
            relative_delta = {'years': self.plan.interval_count}
        elif self.plan.interval == self.plan.INTERVALS.MONTH:
            relative_delta = {'months': self.plan.interval_count}
        elif self.plan.interval == self.plan.INTERVALS.WEEK:
            relative_delta = {'weeks': self.plan.interval_count}
        else:  # plan.INTERVALS.DAY
            relative_delta = {'days': self.plan.interval_count}

        maximum_cycle_end_date = real_cycle_start_date + relativedelta(
            **relative_delta) - ONE_DAY

        # We know that the cycle end_date is the day before the next cycle start_date,
        # therefore we check if the cycle start_date for our maximum cycle end_date is the same
        # as the initial cycle start_date.
        while True:
            reference_cycle_start_date = self._cycle_start_date(
                maximum_cycle_end_date, ignore_trial, granulate)
            # it means the cycle end_date we got is the right one
            if reference_cycle_start_date == real_cycle_start_date:
                return min(maximum_cycle_end_date,
                           (self.ended_at or datetime.max.date()))
            elif reference_cycle_start_date < real_cycle_start_date:
                # This should never happen in normal conditions, but it may stop infinite looping
                return None

            maximum_cycle_end_date = reference_cycle_start_date - ONE_DAY

    @property
    def prebill_plan(self):
        if self.plan.prebill_plan is not None:
            return self.plan.prebill_plan

        return self.provider.prebill_plan

    @property
    def cycle_billing_duration(self):
        if self.plan.cycle_billing_duration is not None:
            return self.plan.cycle_billing_duration

        return self.provider.cycle_billing_duration

    @property
    def separate_cycles_during_trial(self):
        if self.plan.separate_cycles_during_trial is not None:
            return self.plan.separate_cycles_during_trial

        return self.provider.separate_cycles_during_trial

    @property
    def generate_documents_on_trial_end(self):
        if self.plan.generate_documents_on_trial_end is not None:
            return self.plan.generate_documents_on_trial_end

        return self.provider.generate_documents_on_trial_end

    @property
    def _ignore_trial_end(self):
        return not self.generate_documents_on_trial_end

    def cycle_start_date(self, reference_date=None):
        return self._cycle_start_date(ignore_trial=self._ignore_trial_end,
                                      granulate=False,
                                      reference_date=reference_date)

    def cycle_end_date(self, reference_date=None):
        return self._cycle_end_date(ignore_trial=self._ignore_trial_end,
                                    granulate=False,
                                    reference_date=reference_date)

    def bucket_start_date(self, reference_date=None):
        return self._cycle_start_date(reference_date=reference_date,
                                      ignore_trial=False,
                                      granulate=True)

    def bucket_end_date(self, reference_date=None):
        return self._cycle_end_date(reference_date=reference_date,
                                    ignore_trial=False,
                                    granulate=True)

    def updateable_buckets(self):
        buckets = []

        if self.state in ['ended', 'inactive']:
            return buckets

        start_date = self.bucket_start_date()
        end_date = self.bucket_end_date()

        if start_date is None or end_date is None:
            return buckets

        if self.state == self.STATES.CANCELED:
            if self.cancel_date < start_date:
                return buckets

        buckets.append({'start_date': start_date, 'end_date': end_date})

        generate_after = timedelta(seconds=self.plan.generate_after)
        while (timezone.now() - generate_after < datetime.combine(
                start_date, datetime.min.time()).replace(
                    tzinfo=timezone.get_current_timezone())):
            end_date = start_date - ONE_DAY
            start_date = self.bucket_start_date(end_date)
            if start_date is None:
                return buckets
            buckets.append({'start_date': start_date, 'end_date': end_date})

        return buckets

    @property
    def is_on_trial(self):
        """
        Tells if the subscription is currently on trial.

        :rtype: bool
        """

        if self.state == self.STATES.ACTIVE and self.trial_end:
            return timezone.now().date() <= self.trial_end
        return False

    def on_trial(self, date):
        """
        Tells if the subscription was on trial at the date passed as argument.

        :param date: the date for which the check is made.
        :type date: datetime.date
        :rtype: bool
        """

        if self.trial_end:
            return date <= self.trial_end
        return False

    def _log_should_be_billed_result(self, billing_date, interval_end):
        logger.debug(
            'should_be_billed result: %s', {
                'subscription': self.id,
                'billing_date': billing_date.strftime('%Y-%m-%d'),
                'interval_end': interval_end.strftime('%Y-%m-%d')
            })

    @property
    def billed_up_to_dates(self):
        last_billing_log = self.last_billing_log

        return {
            'metered_features_billed_up_to':
            last_billing_log.metered_features_billed_up_to,
            'plan_billed_up_to': last_billing_log.plan_billed_up_to
        } if last_billing_log else {
            'metered_features_billed_up_to': self.start_date - ONE_DAY,
            'plan_billed_up_to': self.start_date - ONE_DAY
        }

    def should_be_billed(self, billing_date, generate_documents_datetime=None):
        if self.state not in [self.STATES.ACTIVE, self.STATES.CANCELED]:
            return False

        if not generate_documents_datetime:
            generate_documents_datetime = timezone.now()

        if self.cycle_billing_duration:
            if self.start_date > first_day_of_month(
                    billing_date) + self.cycle_billing_duration:
                # There was nothing to bill on the last day of the first cycle billing duration
                return False

            # We need the full cycle here (ignoring trial ends)
            cycle_start_datetime_ignoring_trial = self._cycle_start_date(
                billing_date, ignore_trial=False)
            latest_possible_billing_datetime = (
                cycle_start_datetime_ignoring_trial +
                self.cycle_billing_duration)

            billing_date = min(billing_date, latest_possible_billing_datetime)

        if billing_date > generate_documents_datetime.date():
            return False

        cycle_start_date = self.cycle_start_date(billing_date)

        if not cycle_start_date:
            return False

        if self.state == self.STATES.CANCELED:
            if billing_date <= self.cancel_date:
                return False

            cycle_start_date = self.cancel_date + ONE_DAY

        cycle_start_datetime = datetime.combine(
            cycle_start_date, datetime.min.time()).replace(tzinfo=utc)

        generate_after = timedelta(seconds=self.plan.generate_after)

        if generate_documents_datetime < cycle_start_datetime + generate_after:
            return False

        billed_up_to_dates = self.billed_up_to_dates
        plan_billed_up_to = billed_up_to_dates['plan_billed_up_to']
        metered_features_billed_up_to = billed_up_to_dates[
            'metered_features_billed_up_to']

        # We want to bill the subscription if the plan hasn't been billed for this cycle or
        # if the subscription has been canceled and the plan won't be billed for this cycle.
        if self.prebill_plan or self.state == self.STATES.CANCELED:
            plan_should_be_billed = plan_billed_up_to < cycle_start_date

            if self.state == self.STATES.CANCELED:
                return metered_features_billed_up_to < cycle_start_date or plan_should_be_billed

            return plan_should_be_billed

        # wait until the cycle that is going to be billed ends:
        billed_cycle_end_date = self.cycle_end_date(plan_billed_up_to +
                                                    ONE_DAY)
        return billed_cycle_end_date < cycle_start_date

    @property
    def _has_existing_customer_with_consolidated_billing(self):
        # TODO: move to Customer
        return (self.customer.consolidated_billing
                and self.customer.subscriptions.filter(
                    state=self.STATES.ACTIVE).count() > 1)

    @property
    def is_billed_first_time(self):
        return self.billing_logs.all().count() == 0

    @property
    def last_billing_log(self):
        return self.billing_logs.order_by('billing_date').last()

    @property
    def last_billing_date(self):
        # ToDo: Improve this when dropping Django 1.8 support
        try:
            return self.billing_logs.all()[0].billing_date
        except (BillingLog.DoesNotExist, IndexError):
            # It should never get here.
            return None

    def _should_activate_with_free_trial(self):
        return Subscription.objects.filter(plan__provider=self.plan.provider,
                                           customer=self.customer,
                                           state__in=[
                                               Subscription.STATES.ACTIVE,
                                               Subscription.STATES.CANCELED,
                                               Subscription.STATES.ENDED
                                           ]).count() == 0

    ##########################################################################
    # STATE MACHINE TRANSITIONS
    ##########################################################################
    @transition(field=state,
                source=[STATES.INACTIVE, STATES.CANCELED],
                target=STATES.ACTIVE)
    def activate(self, start_date=None, trial_end_date=None):
        if start_date:
            self.start_date = min(timezone.now().date(), start_date)
        else:
            if self.start_date:
                self.start_date = min(timezone.now().date(), self.start_date)
            else:
                self.start_date = timezone.now().date()

        if self._should_activate_with_free_trial():
            if trial_end_date:
                self.trial_end = max(self.start_date, trial_end_date)
            else:
                if self.trial_end:
                    if self.trial_end < self.start_date:
                        self.trial_end = None
                elif self.plan.trial_period_days:
                    self.trial_end = self.start_date + timedelta(
                        days=self.plan.trial_period_days - 1)

    @transition(field=state, source=STATES.ACTIVE, target=STATES.CANCELED)
    def cancel(self, when):
        now = timezone.now().date()
        bsd = self.bucket_start_date()
        bed = self.bucket_end_date()

        if when == self.CANCEL_OPTIONS.END_OF_BILLING_CYCLE:
            if self.is_on_trial:
                self.cancel_date = self.bucket_end_date(
                    reference_date=self.trial_end)
            else:
                self.cancel_date = self.cycle_end_date()
        elif when == self.CANCEL_OPTIONS.NOW:
            for metered_feature in self.plan.metered_features.all():
                log = MeteredFeatureUnitsLog.objects.filter(
                    start_date=bsd,
                    end_date=bed,
                    metered_feature=metered_feature.pk,
                    subscription=self.pk).first()
                if log:
                    log.end_date = now
                    log.save()
            if self.on_trial(now):
                self.trial_end = now
            self.cancel_date = now

        self.save()

    @transition(field=state, source=STATES.CANCELED, target=STATES.ENDED)
    def end(self):
        self.ended_at = timezone.now().date()

    ##########################################################################

    def _cancel_now(self):
        self.cancel(when=self.CANCEL_OPTIONS.NOW)

    def _cancel_at_end_of_billing_cycle(self):
        self.cancel(when=self.CANCEL_OPTIONS.END_OF_BILLING_CYCLE)

    def _add_trial_value(self,
                         start_date,
                         end_date,
                         invoice=None,
                         proforma=None):
        self._add_plan_trial(start_date=start_date,
                             end_date=end_date,
                             invoice=invoice,
                             proforma=proforma)
        self._add_mfs_for_trial(start_date=start_date,
                                end_date=end_date,
                                invoice=invoice,
                                proforma=proforma)

    def _get_interval_end_date(self, date=None):
        """
        :returns: the end date of the interval that should be billed. The
            returned value is a function f(subscription_state, date)
        :rtype: datetime.date
        """

        if self.state == self.STATES.ACTIVE:
            end_date = self.bucket_end_date(reference_date=date)
        elif self.state == self.STATES.CANCELED:
            if self.trial_end and date <= self.trial_end:
                if self.trial_end <= self.cancel_date:
                    end_date = self.trial_end
                else:
                    end_date = self.cancel_date
            else:
                end_date = self.cancel_date
        return end_date

    def _log_value_state(self, value_state):
        logger.debug('Adding value: %s', {
            'subscription': self.id,
            'value_state': value_state
        })

    def _add_plan_trial(self,
                        start_date,
                        end_date,
                        invoice=None,
                        proforma=None):
        """
        Adds the plan trial to the document, by adding an entry with positive
        prorated value and one with prorated, negative value which represents
        the discount for the trial period.
        """

        prorated, percent = self._get_proration_status_and_percent(
            start_date, end_date)
        plan_price = self.plan.amount * percent

        context = self._build_entry_context({
            'name': self.plan.name,
            'unit': self.plan.interval,
            'product_code': self.plan.product_code,
            'start_date': start_date,
            'end_date': end_date,
            'prorated': prorated,
            'proration_percentage': percent,
            'context': 'plan-trial'
        })

        unit = self._entry_unit(context)

        description = self._entry_description(context)

        # Add plan with positive value
        DocumentEntry.objects.create(invoice=invoice,
                                     proforma=proforma,
                                     description=description,
                                     unit=unit,
                                     unit_price=plan_price,
                                     quantity=Decimal('1.00'),
                                     product_code=self.plan.product_code,
                                     prorated=prorated,
                                     start_date=start_date,
                                     end_date=end_date)

        context.update({'context': 'plan-trial-discount'})

        description = self._entry_description(context)

        # Add plan with negative value
        DocumentEntry.objects.create(invoice=invoice,
                                     proforma=proforma,
                                     description=description,
                                     unit=unit,
                                     unit_price=-plan_price,
                                     quantity=Decimal('1.00'),
                                     product_code=self.plan.product_code,
                                     prorated=prorated,
                                     start_date=start_date,
                                     end_date=end_date)

        return Decimal("0.00")

    def _get_consumed_units_from_total_included_in_trial(
            self, metered_feature, consumed_units):
        """
        :returns: (consumed_units, free_units)
        """

        if metered_feature.included_units_during_trial:
            included_units_during_trial = metered_feature.included_units_during_trial
            if consumed_units > included_units_during_trial:
                extra_consumed = consumed_units - included_units_during_trial
                return extra_consumed, included_units_during_trial
            else:
                return 0, consumed_units
        elif metered_feature.included_units_during_trial == Decimal('0.0000'):
            return consumed_units, 0
        elif metered_feature.included_units_during_trial is None:
            return 0, consumed_units

    def _get_extra_consumed_units_during_trial(self, metered_feature,
                                               consumed_units):
        """
        :returns: (extra_consumed, free_units)
            extra_consumed - units consumed extra during trial that will be
                billed
            free_units - the units included in trial
        """

        if self.is_billed_first_time:
            # It's on trial and is billed first time
            return self._get_consumed_units_from_total_included_in_trial(
                metered_feature, consumed_units)
        else:
            # It's still on trial but has been billed before
            # The following part tries to handle the case when the trial
            # spans over 2 months and the subscription has been already billed
            # once => this month it is still on trial but it only
            # has remaining = consumed_last_cycle - included_during_trial
            last_log_entry = self.billing_logs.all()[0]
            if last_log_entry.invoice:
                qs = last_log_entry.invoice.invoice_entries.filter(
                    product_code=metered_feature.product_code)
            elif last_log_entry.proforma:
                qs = last_log_entry.proforma.proforma_entries.filter(
                    product_code=metered_feature.product_code)
            else:
                qs = DocumentEntry.objects.none()

            if not qs.exists():
                return self._get_consumed_units_from_total_included_in_trial(
                    metered_feature, consumed_units)

            consumed = [
                qs_item.quantity for qs_item in qs if qs_item.unit_price >= 0
            ]
            consumed_in_last_billing_cycle = sum(consumed)

            if metered_feature.included_units_during_trial:
                included_during_trial = metered_feature.included_units_during_trial
                if consumed_in_last_billing_cycle > included_during_trial:
                    return consumed_units, 0
                else:
                    remaining = included_during_trial - consumed_in_last_billing_cycle
                    if consumed_units > remaining:
                        return consumed_units - remaining, remaining
                    elif consumed_units <= remaining:
                        return 0, consumed_units
            return 0, consumed_units

    def _add_mfs_for_trial(self,
                           start_date,
                           end_date,
                           invoice=None,
                           proforma=None):
        prorated, percent = self._get_proration_status_and_percent(
            start_date, end_date)
        context = self._build_entry_context({
            'product_code': self.plan.product_code,
            'start_date': start_date,
            'end_date': end_date,
            'prorated': prorated,
            'proration_percentage': percent,
            'context': 'metered-feature-trial'
        })

        total = Decimal("0.00")

        # Add all the metered features consumed during the trial period
        for metered_feature in self.plan.metered_features.all():
            context.update({
                'metered_feature': metered_feature,
                'unit': metered_feature.unit,
                'name': metered_feature.name,
                'product_code': metered_feature.product_code
            })

            unit = self._entry_unit(context)

            qs = self.mf_log_entries.filter(metered_feature=metered_feature,
                                            start_date__gte=start_date,
                                            end_date__lte=end_date)
            log = [qs_item.consumed_units for qs_item in qs]
            total_consumed_units = sum(log)

            extra_consumed, free = self._get_extra_consumed_units_during_trial(
                metered_feature, total_consumed_units)

            if extra_consumed > 0:
                charged_units = extra_consumed
                free_units = free
            else:
                free_units = total_consumed_units
                charged_units = 0

            if free_units > 0:
                description = self._entry_description(context)

                # Positive value for the consumed items.
                DocumentEntry.objects.create(
                    invoice=invoice,
                    proforma=proforma,
                    description=description,
                    unit=unit,
                    quantity=free_units,
                    unit_price=metered_feature.price_per_unit,
                    product_code=metered_feature.product_code,
                    start_date=start_date,
                    end_date=end_date,
                    prorated=prorated)

                context.update({'context': 'metered-feature-trial-discount'})

                description = self._entry_description(context)

                # Negative value for the consumed items.
                DocumentEntry.objects.create(
                    invoice=invoice,
                    proforma=proforma,
                    description=description,
                    unit=unit,
                    quantity=free_units,
                    unit_price=-metered_feature.price_per_unit,
                    product_code=metered_feature.product_code,
                    start_date=start_date,
                    end_date=end_date,
                    prorated=prorated)

            # Extra items consumed items that are not included
            if charged_units > 0:
                context.update(
                    {'context': 'metered-feature-trial-not-discounted'})

                description_template_path = field_template_path(
                    field='entry_description',
                    provider=self.plan.provider.slug)
                description = render_to_string(description_template_path,
                                               context)

                total += DocumentEntry.objects.create(
                    invoice=invoice,
                    proforma=proforma,
                    description=description,
                    unit=unit,
                    quantity=charged_units,
                    prorated=prorated,
                    unit_price=metered_feature.price_per_unit,
                    product_code=metered_feature.product_code,
                    start_date=start_date,
                    end_date=end_date).total

        return total

    def _add_plan_value(self,
                        start_date,
                        end_date,
                        invoice=None,
                        proforma=None):
        """
        Adds to the document the value of the plan.
        """

        prorated, percent = self._get_proration_status_and_percent(
            start_date, end_date)

        context = self._build_entry_context({
            'name': self.plan.name,
            'unit': self.plan.interval,
            'product_code': self.plan.product_code,
            'start_date': start_date,
            'end_date': end_date,
            'prorated': prorated,
            'proration_percentage': percent,
            'context': 'plan'
        })
        description = self._entry_description(context)

        # Get the plan's prorated value
        plan_price = self.plan.amount * percent

        unit = self._entry_unit(context)

        return DocumentEntry.objects.create(
            invoice=invoice,
            proforma=proforma,
            description=description,
            unit=unit,
            unit_price=plan_price,
            quantity=Decimal('1.00'),
            product_code=self.plan.product_code,
            prorated=prorated,
            start_date=start_date,
            end_date=end_date).total

    def _get_consumed_units(self, metered_feature, proration_percent,
                            start_date, end_date):
        included_units = (proration_percent * metered_feature.included_units)

        qs = self.mf_log_entries.filter(metered_feature=metered_feature,
                                        start_date__gte=start_date,
                                        end_date__lte=end_date)
        log = [qs_item.consumed_units for qs_item in qs]
        total_consumed_units = reduce(lambda x, y: x + y, log, 0)

        if total_consumed_units > included_units:
            return total_consumed_units - included_units
        return 0

    def _add_mfs(self, start_date, end_date, invoice=None, proforma=None):
        prorated, percent = self._get_proration_status_and_percent(
            start_date, end_date)

        context = self._build_entry_context({
            'name': self.plan.name,
            'unit': self.plan.interval,
            'product_code': self.plan.product_code,
            'start_date': start_date,
            'end_date': end_date,
            'prorated': prorated,
            'proration_percentage': percent,
            'context': 'metered-feature'
        })

        mfs_total = Decimal('0.00')
        for metered_feature in self.plan.metered_features.all():
            consumed_units = self._get_consumed_units(metered_feature, percent,
                                                      start_date, end_date)

            context.update({
                'metered_feature': metered_feature,
                'unit': metered_feature.unit,
                'name': metered_feature.name,
                'product_code': metered_feature.product_code
            })

            description = self._entry_description(context)
            unit = self._entry_unit(context)

            mf = DocumentEntry.objects.create(
                invoice=invoice,
                proforma=proforma,
                description=description,
                unit=unit,
                quantity=consumed_units,
                prorated=prorated,
                unit_price=metered_feature.price_per_unit,
                product_code=metered_feature.product_code,
                start_date=start_date,
                end_date=end_date)

            mfs_total += mf.total

        return mfs_total

    def _get_proration_status_and_percent(self, start_date, end_date):
        """
        Returns the proration percent (how much of the interval will be billed)
        and the status (if the subscription is prorated or not).

        :returns: a tuple containing (Decimal(percent), status) where status
            can be one of [True, False]. The decimal value will from the
            interval [0.00; 1.00].
        :rtype: tuple
        """

        first_day_of_month = date(start_date.year, start_date.month, 1)
        last_day_index = calendar.monthrange(start_date.year,
                                             start_date.month)[1]
        last_day_of_month = date(start_date.year, start_date.month,
                                 last_day_index)

        if start_date == first_day_of_month and end_date == last_day_of_month:
            return False, Decimal('1.0000')
        else:
            days_in_full_interval = (last_day_of_month -
                                     first_day_of_month).days + 1
            days_in_interval = (end_date - start_date).days + 1
            percent = 1.0 * days_in_interval / days_in_full_interval
            percent = Decimal(percent).quantize(Decimal('0.0000'))

            return True, percent

    def _entry_unit(self, context):
        unit_template_path = field_template_path(
            field='entry_unit', provider=self.plan.provider.slug)
        return render_to_string(unit_template_path, context)

    def _entry_description(self, context):
        description_template_path = field_template_path(
            field='entry_description', provider=self.plan.provider.slug)
        return render_to_string(description_template_path, context)

    @property
    def _base_entry_context(self):
        return {
            'name': None,
            'unit': 1,
            'subscription': self,
            'plan': self.plan,
            'provider': self.plan.provider,
            'customer': self.customer,
            'product_code': None,
            'start_date': None,
            'end_date': None,
            'prorated': None,
            'proration_percentage': None,
            'metered_feature': None,
            'context': None
        }

    def _build_entry_context(self, context):
        base_context = self._base_entry_context
        base_context.update(context)
        return base_context

    def __str__(self):
        return u'%s (%s)' % (self.customer, self.plan.name)
Esempio n. 2
0
class Statuses(models.Model):
    name = JSONField(blank=False, null=False, default=lang_dict_field)
    alias = models.CharField(max_length=32, unique=True)

    def __str__(self):
        return self.alias
Esempio n. 3
0
class StatementLine(models.Model):
    """Records an single imported bank statement line

    A StatementLine is purely a utility to aid in the creation of transactions
    (in the process known as reconciliation). StatementLines have no impact on
    account balances.

    However, the :meth:`StatementLine.create_transaction()` method can be used to create
    a transaction based on the information in the StatementLine.

    Attributes:

        uuid (SmallUUID): UUID for statement line. Use to prevent leaking of IDs (if desired).
        timestamp (datetime): The datetime when the object was created.
        date (date): The date given by the statement line
        statement_import (StatementImport): The import to which the line belongs
        amount (Decimal): The amount for the statement line, positive or nagative.
        description (str): Any description/memo information provided
        transaction (Transaction): Optionally, the transaction created for this statement line. This normally
            occurs during reconciliation. See also :meth:`StatementLine.create_transaction()`.
    """

    uuid = SmallUUIDField(default=uuid_default(),
                          editable=False,
                          verbose_name=_("uuid"))
    timestamp = models.DateTimeField(default=timezone.now,
                                     verbose_name=_("timestamp"))
    date = models.DateField(verbose_name=_("date"))
    statement_import = models.ForeignKey(
        StatementImport,
        related_name="lines",
        on_delete=models.CASCADE,
        verbose_name=_("statement import"),
    )
    amount = models.DecimalField(max_digits=MAX_DIGITS,
                                 decimal_places=DECIMAL_PLACES,
                                 verbose_name=_("amount"))
    description = models.TextField(default="",
                                   blank=True,
                                   verbose_name=_("description"))
    type = models.CharField(max_length=50, default="", verbose_name=_("type"))
    # TODO: Add constraint to ensure transaction amount = statement line amount
    # TODO: Add constraint to ensure one statement line per transaction
    transaction = models.ForeignKey(
        Transaction,
        default=None,
        blank=True,
        null=True,
        help_text="Reconcile this statement line to this transaction",
        on_delete=models.SET_NULL,
        verbose_name=_("transaction"),
    )
    source_data = JSONField(
        default=json_default,
        help_text="Original data received from the data source.",
        verbose_name=_("source data"),
    )

    objects = StatementLineManager()

    def natural_key(self):
        return (self.uuid, )

    @property
    def is_reconciled(self):
        """Has this statement line been reconciled?

        Determined as ``True`` if :attr:`transaction` has been set.

        Returns:
            bool: ``True`` if reconciled, ``False`` if not.
        """
        return bool(self.transaction)

    @db_transaction.atomic()
    def create_transaction(self, to_account):
        """Create a transaction for this statement amount and account, into to_account

        This will also set this StatementLine's ``transaction`` attribute to the newly
        created transaction.

        Args:
            to_account (Account): The account the transaction is into / out of.

        Returns:
            Transaction: The newly created (and committed) transaction.

        """
        from_account = self.statement_import.bank_account

        transaction = Transaction.objects.create()
        Leg.objects.create(transaction=transaction,
                           account=from_account,
                           amount=+(self.amount * -1))
        Leg.objects.create(transaction=transaction,
                           account=to_account,
                           amount=-(self.amount * -1))

        transaction.date = self.date
        transaction.save()

        self.transaction = transaction
        self.save()
        return transaction

    class Meta:
        verbose_name = _("statementLine")
Esempio n. 4
0
class Food(models.Model):
    foodType = models.CharField(max_length=100)
    cacheDate = models.DateTimeField('Date cached')
    data = JSONField()
Esempio n. 5
0
class CostSummary(models.Model):
    """A summary table of OCP costs."""
    class Meta:
        """Meta for CostSummary."""

        db_table = "reporting_ocpcosts_summary"
        indexes = [
            models.Index(fields=["usage_start"],
                         name="ocpcostsum_usage_start_idx"),
            models.Index(fields=["namespace"],
                         name="ocpcostsum_namespace_idx",
                         opclasses=["varchar_pattern_ops"]),
            models.Index(fields=["node"],
                         name="ocpcostsum_node_idx",
                         opclasses=["varchar_pattern_ops"]),
            GinIndex(fields=["pod_labels"], name="ocpcostsum_pod_labels_idx"),
        ]

    report_period = models.ForeignKey("OCPUsageReportPeriod",
                                      on_delete=models.CASCADE,
                                      null=True)

    cluster_id = models.CharField(max_length=50, null=True)

    cluster_alias = models.CharField(max_length=256, null=True)

    # Kubernetes objects by convention have a max name length of 253 chars
    namespace = models.CharField(max_length=253, null=True)

    pod = models.CharField(max_length=253, null=True)

    node = models.CharField(max_length=253, null=True)

    usage_start = models.DateField(null=False)
    usage_end = models.DateField(null=False)

    pod_charge_cpu_core_hours = models.DecimalField(max_digits=27,
                                                    decimal_places=9,
                                                    null=True)

    pod_charge_memory_gigabyte_hours = models.DecimalField(max_digits=27,
                                                           decimal_places=9,
                                                           null=True)

    persistentvolumeclaim_charge_gb_month = models.DecimalField(
        max_digits=27, decimal_places=9, null=True)

    # Need more precision on calculated fields, otherwise there will be
    # Rounding errors
    infra_cost = models.DecimalField(max_digits=33,
                                     decimal_places=15,
                                     null=True)

    # This field is used in place of infrastructure_cost when
    # grouping by project
    project_infra_cost = models.DecimalField(max_digits=33,
                                             decimal_places=15,
                                             null=True)

    markup_cost = models.DecimalField(max_digits=27,
                                      decimal_places=9,
                                      null=True)

    project_markup_cost = models.DecimalField(max_digits=27,
                                              decimal_places=9,
                                              null=True)

    pod_labels = JSONField(null=True)

    monthly_cost = models.DecimalField(max_digits=33,
                                       decimal_places=15,
                                       null=True)
Esempio n. 6
0
class PieceOfWork(models.Model):
    """
    Description des oeuvres

    """
    id = models.AutoField(primary_key=True)

    dtLastSearch = models.DateTimeField(
        null=False,
        auto_now_add=True,
        help_text="Date de la derniere recherche automatique sur l'oeuvre")
    visual = models.TextField(blank=True, help_text="Visuel de l'oeuvre")
    dtStart = models.DateField(
        auto_now=True,
        null=False,
        help_text="Date de début de la réalisation de l'oeuvre")
    dtEnd = models.DateField(
        auto_now=True,
        null=False,
        help_text="Date de fin de la réalisation de l'oeuvre")
    title = models.CharField(null=False,
                             max_length=300,
                             default="sans titre",
                             help_text="Titre de l'oeuvre, même temporaire")
    title_index = models.CharField(
        null=False,
        max_length=300,
        default="",
        help_text="Titre de l'oeuvre simplifier pour gestion de la recherche")

    year = models.CharField(null=True,
                            max_length=4,
                            help_text="Année de sortie")
    nature = models.CharField(null=False,
                              default='MOVIE',
                              max_length=50,
                              help_text="Classification de l'oeuvre")
    dtCreate = models.DateField(
        auto_now_add=True,
        help_text="Date d'enregistrement de l'oeuvre dans DataCulture")

    reference = models.CharField(null=False,
                                 default="",
                                 blank=True,
                                 max_length=50,
                                 help_text="Reference d'oeuvre")
    budget = models.IntegerField(
        default=0, help_text="Coût total de réalisation de l'oeuvre")
    production = models.CharField(null=False,
                                  default="",
                                  blank=True,
                                  max_length=100,
                                  help_text="Production de l'oeuvre")

    #Structure : "url" du document, "text" du lien
    links = JSONField(
        null=True, help_text="Liens vers des références externes à l'oeuvre")

    owner = models.CharField(max_length=150,
                             default="public",
                             help_text="Auteur de l'oeuvre")
    description = models.TextField(
        null=False,
        default="",
        max_length=3000,
        help_text="Synopsis/Description/Résumé de l'oeuvre")
    # Structure : "url" du document, "type" de document (str), "title" du document
    files = JSONField(null=True, help_text="Liens vers des documents attaché")
    category = models.TextField(
        null=True,
        max_length=50,
        help_text="Liste des categories auxquelles appartient le film")
    lang = models.CharField(max_length=50,
                            null=True,
                            help_text="Langue originale de l'oeuvre")

    apiVideoId = models.CharField(max_length=20,
                                  default="",
                                  null=False,
                                  blank=True,
                                  help_text="Version stocké sur api.video")
    distributer = models.CharField(max_length=150,
                                   default="",
                                   blank=True,
                                   null=True,
                                   help_text="Distribution de l'oeuvre")
    minutes = models.IntegerField(default=None,
                                  null=True,
                                  help_text="Durée de l'oeuvre en minutes")
    copies = models.IntegerField(default=None,
                                 null=True,
                                 help_text="Nombre de copies distribuée")
    visa = models.CharField(max_length=10,
                            null=True,
                            help_text="Visa d'exploitation")
    financal_partner = JSONField(null=True,
                                 help_text="Liste des partenaires financiers")
    first_week_entrances = models.IntegerField(
        null=True, help_text="Nombre d'entrée la première semaine")
    prizes = JSONField(null=True, help_text="Liste des prix reçus")

    def __str__(self):
        rc = self.title
        if not self.id is None: rc = str(self.id) + " : " + rc
        if not self.year is None: rc = rc + " (" + self.year + ")"
        if not self.category is None: rc = rc + " - " + self.category
        return rc

    def delay_lastsearch(self):
        if self.dtLastSearch is None: return 1e12
        rc = (datetime.datetime.now().timestamp() -
              self.dtLastSearch.timestamp()) / 3600
        return rc

    def add_link(self, url, title, description=""):
        if self.links is None: self.links = []
        if url is None: return self.links

        obj = {"url": url, "text": title, "update": now(), "desc": description}
        for l in self.links:
            if l["url"] == url:
                self.links.remove(l)
                break

        self.links.append(obj)
        return self.links

    def quality_score(self):
        """
        Défini un score de qualité de la donnée. Ce score est notamment utilisé pour les fusions
        :return: le score
        """
        score=eval_field(self.title,5)\
              +eval_field(self.budget,2)\
              +eval_field(self.owner,3)\
              +2*len(self.links)\
              +eval_field(self.visual,2)\
              +eval_field(self.year,3)

        return score
Esempio n. 7
0
class listItem(models.Model):
    content = models.TextField()
    product_id = models.TextField()
    img_url = models.TextField()
    data = JSONField()
Esempio n. 8
0
class Provider(models.Model):
    """A Koku Provider.

    Used for modeling cost providers like AWS Accounts.
    """
    class Meta:
        """Meta for Provider."""

        ordering = ["name"]
        unique_together = ("authentication", "billing_source", "customer")

    PROVIDER_AWS = "AWS"
    PROVIDER_OCP = "OCP"
    PROVIDER_AZURE = "Azure"
    PROVIDER_GCP = "GCP"
    PROVIDER_IBM = "IBM"
    PROVIDER_OCI = "OCI"
    # Local Providers are for local development and testing
    PROVIDER_AWS_LOCAL = "AWS-local"
    PROVIDER_AZURE_LOCAL = "Azure-local"
    PROVIDER_GCP_LOCAL = "GCP-local"
    PROVIDER_IBM_LOCAL = "IBM-local"
    PROVIDER_OCI_LOCAL = "OCI-local"
    # The following constants are not provider types
    OCP_ALL = "OCP_All"
    OCP_AWS = "OCP_AWS"
    OCP_AZURE = "OCP_Azure"
    OCP_GCP = "OCP_GCP"

    PROVIDER_CASE_MAPPING = {
        "aws": PROVIDER_AWS,
        "ocp": PROVIDER_OCP,
        "azure": PROVIDER_AZURE,
        "gcp": PROVIDER_GCP,
        "ibm": PROVIDER_IBM,
        "oci": PROVIDER_OCI,
        "aws-local": PROVIDER_AWS_LOCAL,
        "azure-local": PROVIDER_AZURE_LOCAL,
        "gcp-local": PROVIDER_GCP_LOCAL,
        "ibm-local": PROVIDER_IBM_LOCAL,
        "oci-local": PROVIDER_OCI_LOCAL,
        "ocp-aws": OCP_AWS,
        "ocp-azure": OCP_AZURE,
    }

    PROVIDER_CHOICES = (
        (PROVIDER_AWS, PROVIDER_AWS),
        (PROVIDER_OCP, PROVIDER_OCP),
        (PROVIDER_AZURE, PROVIDER_AZURE),
        (PROVIDER_GCP, PROVIDER_GCP),
        (PROVIDER_IBM, PROVIDER_IBM),
        (PROVIDER_OCI, PROVIDER_OCI),
        (PROVIDER_AWS_LOCAL, PROVIDER_AWS_LOCAL),
        (PROVIDER_AZURE_LOCAL, PROVIDER_AZURE_LOCAL),
        (PROVIDER_GCP_LOCAL, PROVIDER_GCP_LOCAL),
        (PROVIDER_IBM_LOCAL, PROVIDER_IBM_LOCAL),
        (PROVIDER_OCI_LOCAL, PROVIDER_OCI_LOCAL),
    )
    CLOUD_PROVIDER_CHOICES = (
        (PROVIDER_AWS, PROVIDER_AWS),
        (PROVIDER_AZURE, PROVIDER_AZURE),
        (PROVIDER_GCP, PROVIDER_GCP),
        (PROVIDER_IBM, PROVIDER_IBM),
        (PROVIDER_OCI, PROVIDER_OCI),
        (PROVIDER_AWS_LOCAL, PROVIDER_AWS_LOCAL),
        (PROVIDER_AZURE_LOCAL, PROVIDER_AZURE_LOCAL),
        (PROVIDER_GCP_LOCAL, PROVIDER_GCP_LOCAL),
        (PROVIDER_IBM_LOCAL, PROVIDER_IBM_LOCAL),
        (PROVIDER_OCI_LOCAL, PROVIDER_OCI_LOCAL),
    )

    # These lists are intended for use for provider type checking
    # throughout the codebase
    PROVIDER_LIST = [choice[0] for choice in PROVIDER_CHOICES]
    CLOUD_PROVIDER_LIST = [choice[0] for choice in CLOUD_PROVIDER_CHOICES]
    OPENSHIFT_ON_CLOUD_PROVIDER_LIST = [
        PROVIDER_AWS,
        PROVIDER_AWS_LOCAL,
        PROVIDER_AZURE,
        PROVIDER_AZURE_LOCAL,
        PROVIDER_GCP,
        PROVIDER_GCP_LOCAL,
    ]

    uuid = models.UUIDField(default=uuid4, primary_key=True)
    name = models.CharField(max_length=256, null=False)
    type = models.CharField(max_length=50,
                            null=False,
                            choices=PROVIDER_CHOICES,
                            default=PROVIDER_AWS)
    authentication = models.ForeignKey("ProviderAuthentication",
                                       null=True,
                                       on_delete=models.DO_NOTHING)
    billing_source = models.ForeignKey("ProviderBillingSource",
                                       null=True,
                                       on_delete=models.DO_NOTHING,
                                       blank=True)
    customer = models.ForeignKey("Customer",
                                 null=True,
                                 on_delete=models.PROTECT)
    created_by = models.ForeignKey("User",
                                   null=True,
                                   on_delete=models.SET_NULL)
    setup_complete = models.BooleanField(default=False)

    created_timestamp = models.DateTimeField(auto_now_add=True,
                                             blank=True,
                                             null=True)

    # We update the record on the provider when we update data.
    # This helps capture events like the updates following a cost model
    # CRUD operation that triggers cost model cost summarization,
    # but not on a specific manifest, so no manifest timestamp is updated
    data_updated_timestamp = models.DateTimeField(null=True)

    active = models.BooleanField(default=True)
    paused = models.BooleanField(default=False)

    # This field applies to OpenShift providers and identifies
    # which (if any) cloud provider the cluster is on
    infrastructure = models.ForeignKey("ProviderInfrastructureMap",
                                       null=True,
                                       on_delete=models.SET_NULL)
    additional_context = JSONField(null=True, default=dict)

    def save(self, *args, **kwargs):
        """Save instance and start data ingest task for active Provider."""

        should_ingest = False
        # These values determine if a Provider is new
        if self.created_timestamp and not self.setup_complete:
            should_ingest = True

        try:
            provider = Provider.objects.get(uuid=self.uuid)
        except Provider.DoesNotExist:
            pass
        else:
            # These values determine if Provider credentials have been updated:
            if provider.authentication != self.authentication or provider.billing_source != self.billing_source:
                should_ingest = True
            else:
                should_ingest = False

        # Commit the new/updated Provider to the DB
        super().save(*args, **kwargs)

        if settings.AUTO_DATA_INGEST and should_ingest and self.active:
            # Local import of task function to avoid potential import cycle.
            from masu.celery.tasks import check_report_updates

            QUEUE = None
            if self.customer.schema_name == settings.QE_SCHEMA:
                QUEUE = "priority"
                LOG.info("Setting queue to priority for QE testing")

            LOG.info(f"Starting data ingest task for Provider {self.uuid}")
            # Start check_report_updates task after Provider has been committed.
            transaction.on_commit(lambda: check_report_updates.s(
                provider_uuid=self.uuid, queue_name=QUEUE).set(queue="priority"
                                                               ).apply_async())

    def delete(self, *args, **kwargs):
        if self.customer:
            using = router.db_for_write(self.__class__, isinstance=self)
            with schema_context(self.customer.schema_name):
                LOG.info(
                    f"PROVIDER {self.name} ({self.pk}) CASCADE DELETE -- SCHEMA {self.customer.schema_name}"
                )
                cascade_delete(self.__class__,
                               self.__class__.objects.filter(pk=self.pk))
                post_delete.send(sender=self.__class__,
                                 instance=self,
                                 using=using)
        else:
            LOG.warning(
                "Cannot customer link cannot be found! Using ORM delete!")
            super().delete()
Esempio n. 9
0
class Person(models.Model):
    gender = models.CharField(max_length=1, choices=GENDER_CHOICES)
    #  Jards Macalé is an amazing brazilian musician! =]
    enjoy_jards_macale = models.BooleanField(default=True)
    like_metal_music = models.BooleanField(default=False)
    name = models.CharField(max_length=30)
    nickname = models.SlugField(max_length=36)
    age = models.IntegerField()
    bio = models.TextField()
    birthday = models.DateField()
    birth_time = models.TimeField()
    appointment = models.DateTimeField()
    blog = models.URLField()
    occupation = models.CharField(max_length=10, choices=OCCUPATION_CHOICES)
    uuid = models.UUIDField(primary_key=False)
    name_hash = models.BinaryField(max_length=16)
    days_since_last_login = models.BigIntegerField()
    duration_of_sleep = models.DurationField()
    email = models.EmailField()
    id_document = models.CharField(unique=True, max_length=10)

    try:
        from django.db.models import JSONField

        data = JSONField()
    except ImportError:
        # Skip JSONField-related fields
        pass

    try:
        from django.contrib.postgres.fields import ArrayField, HStoreField
        from django.contrib.postgres.fields import JSONField as PostgresJSONField
        from django.contrib.postgres.fields.citext import (
            CICharField,
            CIEmailField,
            CITextField,
        )
        from django.contrib.postgres.fields.ranges import (
            BigIntegerRangeField,
            DateRangeField,
            DateTimeRangeField,
            IntegerRangeField,
        )

        if settings.USING_POSTGRES:
            acquaintances = ArrayField(models.IntegerField())
            postgres_data = PostgresJSONField()
            hstore_data = HStoreField()
            ci_char = CICharField(max_length=30)
            ci_email = CIEmailField()
            ci_text = CITextField()
            int_range = IntegerRangeField()
            bigint_range = BigIntegerRangeField()
            date_range = DateRangeField()
            datetime_range = DateTimeRangeField()
    except ImportError:
        # Skip PostgreSQL-related fields
        pass

    try:
        from django.contrib.postgres.fields.ranges import FloatRangeField

        if settings.USING_POSTGRES:
            float_range = FloatRangeField()
    except ImportError:
        # Django version greater or equal than 3.1
        pass

    try:
        from django.contrib.postgres.fields.ranges import DecimalRangeField

        if settings.USING_POSTGRES:
            decimal_range = DecimalRangeField()
    except ImportError:
        # Django version lower than 2.2
        pass

    if BAKER_GIS:
        geom = models.GeometryField()
        point = models.PointField()
        line_string = models.LineStringField()
        polygon = models.PolygonField()
        multi_point = models.MultiPointField()
        multi_line_string = models.MultiLineStringField()
        multi_polygon = models.MultiPolygonField()
        geom_collection = models.GeometryCollectionField()
Esempio n. 10
0
class BasePackage(Content):
    """
    Abstract base class for package like content.
    """

    MULTIARCH_CHOICES = [
        ("no", "no"),
        ("same", "same"),
        ("foreign", "foreign"),
        ("allowed", "allowed"),
    ]

    package = models.TextField()  # package name
    source = models.TextField(null=True)  # source package name
    version = models.TextField()
    architecture = models.TextField()  # all, i386, ...
    section = models.TextField(null=True)  # admin, comm, database, ...
    priority = models.TextField(
        null=True)  # required, standard, optional, extra
    origin = models.TextField(null=True)
    tag = models.TextField(null=True)
    bugs = models.TextField(null=True)
    essential = models.BooleanField(null=True, choices=BOOL_CHOICES)
    build_essential = models.BooleanField(null=True, choices=BOOL_CHOICES)
    installed_size = models.IntegerField(null=True)
    maintainer = models.TextField()
    original_maintainer = models.TextField(null=True)
    description = models.TextField()
    description_md5 = models.TextField(null=True)
    homepage = models.TextField(null=True)
    built_using = models.TextField(null=True)
    auto_built_package = models.TextField(null=True)
    multi_arch = models.TextField(null=True, choices=MULTIARCH_CHOICES)

    # Depends et al
    breaks = models.TextField(null=True)
    conflicts = models.TextField(null=True)
    depends = models.TextField(null=True)
    recommends = models.TextField(null=True)
    suggests = models.TextField(null=True)
    enhances = models.TextField(null=True)
    pre_depends = models.TextField(null=True)
    provides = models.TextField(null=True)
    replaces = models.TextField(null=True)

    # relative path in the upstream repository
    relative_path = models.TextField(null=False)
    # this digest is transferred to the content as a natural_key
    sha256 = models.TextField(null=False)

    custom_fields = JSONField(null=True)

    @property
    def name(self):
        """Print a nice name for Packages."""
        return "{}_{}_{}".format(self.package, self.version, self.architecture)

    def filename(self, component=""):
        """Assemble filename in pool directory."""
        sourcename = self.source or self.package
        sourcename = sourcename.split("(", 1)[0].rstrip()
        if sourcename.startswith("lib"):
            prefix = sourcename[0:4]
        else:
            prefix = sourcename[0]
        return os.path.join(
            "pool",
            component,
            prefix,
            sourcename,
            "{}.{}".format(self.name, self.SUFFIX),
        )

    repo_key_fields = ("package", "version", "architecture")

    class Meta:
        default_related_name = "%(app_label)s_%(model_name)s"
        unique_together = (("relative_path", "sha256"), )
        abstract = True
Esempio n. 11
0
class Sources(RunTextFieldValidators, models.Model):
    """Platform-Sources table.

    Used for managing Platform-Sources.
    """
    class Meta:
        """Meta for Sources."""

        db_table = "api_sources"
        ordering = ["name"]

    # Backend Platform-Services data.
    # Source ID is unique identifier
    source_id = models.IntegerField(primary_key=True)

    # Source UID
    source_uuid = models.UUIDField(unique=True, null=True)

    # Source name.
    name = models.TextField(max_length=256,
                            null=True,
                            validators=[MaxLengthValidator(256)])

    # Red Hat identity header.  Passed along to Koku API for entitlement and rbac reasons.
    auth_header = models.TextField(null=True)

    # Kafka message offset for Platform-Sources kafka stream
    offset = models.IntegerField(null=False)

    # Koku Specific data.
    # Customer Account ID
    account_id = models.TextField(null=True)

    # Provider type (i.e. AWS, OCP, AZURE)
    source_type = models.TextField(null=False)

    # Provider authentication (AWS roleARN, OCP Sources UID, etc.)
    authentication = JSONField(null=False, default=dict)

    # Provider billing source (AWS S3 bucket)
    billing_source = JSONField(null=True, default=dict)

    # Unique identifier for koku Provider
    koku_uuid = models.TextField(null=True, unique=True)

    # This allows us to convenitently join source and provider tables with
    # The Django ORM without using a real database foreign key constraint
    provider = models.ForeignKey("Provider",
                                 null=True,
                                 on_delete=models.DO_NOTHING,
                                 db_constraint=False)

    # This field indicates if the source is paused.
    paused = models.BooleanField(default=False)

    # When source has been deleted on Platform-Sources this is True indicating it hasn't been
    # removed on the Koku side yet.  Entry is removed entirely once Koku-Provider was successfully
    # removed.
    pending_delete = models.BooleanField(default=False)

    # When a source is being updated by either Platform-Sources or from API (auth, billing source)
    # this flag will indicate that the update needs to be picked up by the Koku-Provider synchronization
    # handler.
    pending_update = models.BooleanField(default=False)

    # When a source delete occurs before a source create.  Messages can be out of order when arriving
    # on different kafka partitions.
    out_of_order_delete = models.BooleanField(default=False)

    # Availability status
    status = JSONField(null=True, default=dict)
    additional_context = JSONField(null=True, default=dict)

    def __str__(self):
        """Get the string representation."""
        return (
            f"Source ID: {self.source_id}\nName: {self.name}\nSource UUID: {self.source_uuid}\n"
            f"Source Type: {self.source_type}\nAuthentication: {self.authentication}\n"
            f"Billing Source: {self.billing_source}\nKoku UUID: {self.koku_uuid}\n"
            f"Pending Delete: {self.pending_delete}\nPending Update: {self.pending_update}\n"
        )
Esempio n. 12
0
class Services(models.Model):
    name = models.CharField(max_length=100)
    price = models.PositiveIntegerField()
    details = JSONField(default=dict)
Esempio n. 13
0
class Unit(SoftDeleteModel):
    id = models.IntegerField(primary_key=True)

    public = models.BooleanField(null=False, default=True)

    location = models.PointField(null=True, srid=PROJECTION_SRID)  # lat, lng?
    geometry = models.GeometryField(srid=PROJECTION_SRID, null=True)
    department = models.ForeignKey(Department,
                                   null=True,
                                   on_delete=models.CASCADE)
    root_department = models.ForeignKey(Department,
                                        null=True,
                                        related_name="descendant_units",
                                        on_delete=models.CASCADE)

    organizer_type = models.PositiveSmallIntegerField(choices=ORGANIZER_TYPES,
                                                      null=True)
    organizer_name = models.CharField(max_length=150, null=True)
    organizer_business_id = models.CharField(max_length=10, null=True)

    provider_type = models.PositiveSmallIntegerField(choices=PROVIDER_TYPES,
                                                     null=True)
    contract_type = models.PositiveSmallIntegerField(choices=CONTRACT_TYPES,
                                                     null=True)

    picture_url = models.URLField(max_length=250, null=True)
    picture_entrance_url = models.URLField(max_length=500, null=True)
    streetview_entrance_url = models.URLField(max_length=500, null=True)

    description = models.TextField(null=True)
    short_description = models.TextField(null=True)
    name = models.CharField(max_length=200, db_index=True)
    street_address = models.CharField(max_length=100, null=True)

    www = models.URLField(max_length=400, null=True)
    address_postal_full = models.CharField(max_length=100, null=True)
    call_charge_info = models.CharField(max_length=500, null=True)

    picture_caption = models.TextField(null=True)

    phone = models.CharField(max_length=120, null=True)
    fax = models.CharField(max_length=50, null=True)
    email = models.EmailField(max_length=100, null=True)
    accessibility_phone = models.CharField(max_length=50, null=True)
    accessibility_email = models.EmailField(max_length=100, null=True)
    accessibility_www = models.URLField(max_length=400, null=True)

    created_time = models.DateTimeField(
        null=True)  # ASK API: are these UTC? no Z in output

    municipality = models.ForeignKey(Municipality,
                                     null=True,
                                     db_index=True,
                                     on_delete=models.CASCADE)
    address_zip = models.CharField(max_length=10, null=True)

    data_source = models.CharField(max_length=50, null=True)
    extensions = HStoreField(null=True)

    last_modified_time = models.DateTimeField(
        db_index=True, help_text="Time of last modification")

    service_nodes = models.ManyToManyField("ServiceNode", related_name="units")
    services = models.ManyToManyField("Service",
                                      related_name="units",
                                      through="UnitServiceDetails")
    keywords = models.ManyToManyField(Keyword)

    connection_hash = models.CharField(
        max_length=40,
        null=True,
        help_text="Automatically generated hash of connection info",
    )
    accessibility_property_hash = models.CharField(
        max_length=40,
        null=True,
        help_text="Automatically generated hash of accessibility property info",
    )
    identifier_hash = models.CharField(
        max_length=40,
        null=True,
        help_text="Automatically generated hash of other identifiers",
    )
    service_details_hash = models.CharField(max_length=40, null=True)

    accessibility_viewpoints = JSONField(default=dict, null=True)

    # Cached fields for better performance
    root_service_nodes = models.CharField(max_length=50, null=True)

    objects = Manager()
    search_objects = UnitSearchManager()

    class Meta:
        ordering = ["-pk"]

    def __str__(self):
        return "%s (%s)" % (get_translated(self, "name"), self.id)

    def get_root_service_nodes(self):
        from .service_node import ServiceNode

        tree_ids = self.service_nodes.all().values_list("tree_id",
                                                        flat=True).distinct()
        qs = ServiceNode.objects.filter(level=0).filter(
            tree_id__in=list(tree_ids))
        service_node_list = qs.values_list("id", flat=True).distinct()
        return sorted(service_node_list)

    def service_names(self):
        return "\n".join((service.name for service in self.services.all()))

    def service_keywords(self):
        return "\n".join(("\n".join((keyword.name
                                     for keyword in service.keywords.all()))
                          for service in self.services.all()))

    def highlight_names(self):
        unit_connection_model = apps.get_model(app_label="services",
                                               model_name="UnitConnection")
        return "\n".join(
            (connection.name for connection in self.connections.filter(
                section_type=unit_connection_model.HIGHLIGHT_TYPE)))
Esempio n. 14
0
class Invitation(Model):
    """
    The invitation class
    """
    email = EmailField(unique = True,
            help_text="Email to send invite to")
        
    # Added when user completes registration
    user = OneToOneField(
        User, related_name = "invitation", null = True, blank = True,
        on_delete = CASCADE)

    mailed = BooleanField(default=False,editable=False,
            help_text="Indicates whether or not the invitation has been dispatched, "
                      "that is, if an email has been sent to the target recipient")

    fulfilled = BooleanField(default=False,editable=False, 
            help_text="Indicates whether or not the invitation has been fulfilled, "
                      "meaning a user has been registered for it.")

    metadata = JSONField(default=dict,blank=True,editable=False,
            help_text="Metadata that will be added to the user profile "
                      "once they register. (occupation/sex/age/...) "
                      "<a href='https://en.wikipedia.org/wiki/json' target='_blank'>"
                      "JSON Formatted."
                      "</a>")

    customemail = TextField(verbose_name="Custom invitation email", default = "", 
            blank = True,
            help_text = "A custom email text to send to this particular invitee. "
                        "The text will be pasted into the standard email template, "
                        "which includes the invitation link. If this field is left "
                        "blank, the active email template will be used instead.")

    customsig = TextField(verbose_name="Custom signature", default = "The Conflict Cartographer Team",
            blank = False, null = False,
            help_text = "A custom signature which will be displayed at the bottom"
                        " of the email. The default is \"The Conflict Cartographer Team\".")

    invitedBy = EmailField(verbose_name="Invitation was sent by this user",
            default = "", blank = True, null = True)

    countries = ManyToManyField(Country,related_name="invited_assignees",
            blank = True,
            help_text="Countries that will be assigned to the user "
                      "once they complete registration")

    refkey = CharField(max_length = 32, null = True, editable=False)

    def save(self,*args,**kwargs):
        if self.refkey is None:
            self.refkey = referralKeygen(self.email)

        qs = User.objects.filter(email = self.email)
        if len(qs)>0:
            self.fulfilled = True

        super().save(*args,**kwargs)

    def profile(self,user):
        profile = Profile(
            user = user
        )
        profile.save()
        profile.countries.set(self.countries.all())
        return profile

    def invitationLink(self):
        return os.path.join(settings.INVITATION_LINK_BASE,self.refkey)

    def unsubLink(self):
        return os.path.join(settings.UNSUB_LINK_BASE,self.refkey)

    def __str__(self):
        return f"Invitation for {self.email}"
Esempio n. 15
0
class Cache(models.Model):
	query = models.CharField(max_length=1000)
	data = JSONField(null=True)
	def __str__(self):
		return self.query
Esempio n. 16
0
class Device(models.Model):
    userCD = models.ForeignKey(User, on_delete=models.CASCADE)
    device = JSONField()

    def __str__(self):
        return str(self.userCD)
Esempio n. 17
0
class Profil(models.Model):
    """
    Le profil stocke l'ensemble des informations sur les anciens étudiants, issue du cursus standard ou pro
    """
    id = models.AutoField(primary_key=True)
    #company=models.ForeignKey("Company",on_delete=models.DO_NOTHING,null=True)

    gender = models.CharField(max_length=1,
                              blank=True,
                              default="M",
                              choices=(('M', 'Male'), ('F', 'Female'),
                                       ('A', 'Autre'), ('', 'NSP')),
                              help_text="Genre du profil")
    firstname = models.CharField(max_length=40,
                                 null=False,
                                 default='',
                                 help_text="Prénom du profil")
    lastname = models.CharField(max_length=70,
                                null=False,
                                default='',
                                help_text="Nom du profil")
    name_index = models.CharField(max_length=70,
                                  null=False,
                                  default='',
                                  help_text="Index du nomprenom du profil")

    public_photo = models.BooleanField(
        default=False,
        null=False,
        help_text=
        "Indique si la photo peut être ou pas présentée sur la page publique")
    birthdate = models.DateField(null=True,
                                 help_text="Date de naissance du profil")
    mobile = models.CharField(blank=True,
                              max_length=20,
                              null=True,
                              default="06",
                              help_text="@Numéro de mobile")
    nationality = models.CharField(blank=True,
                                   max_length=30,
                                   null=False,
                                   default="Française",
                                   help_text="Nationnalité du profil")

    department = models.CharField(
        blank=True,
        max_length=60,
        null=True,
        default="",
        help_text="Cursus (pro ou standard) suivi pendant les études")
    department_category = models.CharField(
        blank=True,
        max_length=30,
        null=True,
        default="",
        help_text="Categorie / code de regroupement de la formation")

    job = models.CharField(max_length=60,
                           null=True,
                           default="",
                           blank=True,
                           help_text="Profession actuelle")
    degree_year = models.IntegerField(
        null=True, help_text="Année de sortie de l'école (promotion)")

    linkedin = models.URLField(
        blank=True,
        null=True,
        help_text="Adresse de la page public linkedin du profil")
    email = models.EmailField(null=True,
                              unique=False,
                              help_text="@email du profil")
    instagram = models.URLField(blank=True,
                                null=True,
                                help_text="Adresse du compte instagram")
    telegram = models.URLField(blank=True,
                               null=True,
                               help_text="Adresse public du compte telegram")
    facebook = models.URLField(
        blank=True,
        null=True,
        help_text="Adresse de la page facebook du profil")
    twitter = models.URLField(blank=True,
                              null=True,
                              help_text="Adresse de la page twitter du profil")
    tiktok = models.URLField(blank=True,
                             null=True,
                             help_text="Adresse de la page tiktok du profil")
    youtube = models.URLField(blank=True,
                              null=True,
                              help_text="Adresse de la page youtube du profil")
    vimeo = models.URLField(blank=True,
                            null=True,
                            help_text="Adresse de la page vimeo du profil")
    school = models.CharField(blank=True,
                              max_length=30,
                              null=True,
                              default="FEMIS",
                              help_text="Ecole")

    acceptSponsor = models.BooleanField(
        null=False,
        default=False,
        help_text="Le profil accepte les demandes de mentorat")
    sponsorBy = models.ForeignKey('Profil',
                                  null=True,
                                  on_delete=CASCADE,
                                  help_text="Nom du mentor")

    photo = models.TextField(blank=True,
                             default="/assets/img/anonymous.png",
                             help_text="Photo du profil au format Base64")

    cursus = models.CharField(max_length=1,
                              blank=False,
                              default="S",
                              choices=(('S', 'Standard'), ('P',
                                                           'Professionnel')),
                              help_text="Type de formation")
    address = models.CharField(
        null=True,
        blank=True,
        max_length=200,
        help_text="Adresse postale au format numéro / rue / batiment")
    town = models.CharField(
        null=True,
        blank=True,
        max_length=50,
        help_text="Ville de l'adresse postale de résidence")
    cp = models.CharField(null=True,
                          blank=True,
                          max_length=5,
                          help_text="code postal de résidence")
    country = models.CharField(null=True,
                               default="France",
                               blank=True,
                               max_length=50,
                               help_text="Pays de naissance")

    website = models.URLField(null=True,
                              blank=True,
                              default="",
                              help_text="Site web du profil")
    dtLastUpdate = models.DateTimeField(
        null=False,
        auto_now=True,
        help_text="Date de la dernière modification du profil")
    dtLastSearch = models.DateTimeField(
        null=False,
        default=datetime.datetime(2021, 1, 1, 0, 0, 0, 0),
        help_text="Date de la dernière recherche d'expérience pour le profil")
    dtLastNotif = models.DateTimeField(
        null=False,
        default=datetime.datetime(2021, 1, 1, 0, 0, 0, 0),
        help_text="Date de la dernière notification envoyée")
    obsolescenceScore = models.IntegerField(
        default=0,
        help_text=
        "Indique le degré d'obsolescence probable (utilisé pour les relances)")
    biography = models.TextField(null=True,
                                 default="",
                                 max_length=2000,
                                 help_text="Biographie du profil")
    links = JSONField(null=True,
                      help_text="Liens vers des références externes au profil")
    auto_updates = models.CharField(max_length=300,
                                    null=False,
                                    default="0,0,0,0,0,0",
                                    help_text="Date de mise a jour")
    advices = JSONField(
        null=True,
        default=None,
        help_text="Conseils pour augmenter la visibilité du profil")
    source = models.CharField(null=True,
                              blank=True,
                              max_length=50,
                              help_text="Source de la fiche")

    blockchain = models.CharField(null=False,
                                  blank=True,
                                  default="",
                                  max_length=50,
                                  help_text="Adresse elrond du profil")

    class Meta(object):
        ordering = ["lastname"]

    def delay_update(self, _type, update=False):
        """
        :return: delay de mise a jour en heure
        """
        lastUpdates = self.auto_updates.replace("[", "").replace("]",
                                                                 "").split(",")
        rc = (datetime.datetime.now().timestamp() -
              float(lastUpdates[_type])) / 3600
        if update:
            lastUpdates[_type] = str(datetime.datetime.now().timestamp())
            self.auto_updates = ",".join(lastUpdates)

        return rc

    def delay_lastsearch(self):
        """
        :return: delay de mise a jour en heure
        """
        rc = (datetime.datetime.now().timestamp() -
              self.dtLastSearch.timestamp()) / 3600
        return rc

    def add_link(self, url, title, description=""):
        if self.links is None: self.links = []
        obj = {"url": url, "text": title, "update": now(), "desc": description}
        for l in self.links:
            if l["url"] == url:
                self.links.remove(l)
                break

        self.links.append(obj)
        return self.links

    @property
    def public_url(self):
        return "./public/?id=" + str(
            self.id
        ) + "&name=" + self.firstname + " " + self.lastname + "&toolbar=false"

    @property
    def promo(self):
        return str(self.degree_year)

    @property
    def fullname(self):
        return '%s %s' % (self.firstname, self.lastname.upper())

    @property
    def str_links(self):
        if self.links is None: return ""
        s = ""
        for l in self.links:
            s = s + l.url + ";"
        return s

    def __str__(self):
        return "{'id':" + str(
            self.id
        ) + ",'email':'" + self.email + "','fullname':'" + self.fullname + "','address':'" + self.address + " " + self.cp + " " + self.town + "'}"

    @property
    def name_field_indexing(self):
        return {"name": self.lastname.upper() + " " + self.firstname}
Esempio n. 18
0
class Individual(ModelWithGUID):
    SEX_MALE = 'M'
    SEX_FEMALE = 'F'
    SEX_UNKNOWN = 'U'
    SEX_CHOICES = (
        (SEX_MALE, 'Male'),
        ('F', 'Female'),
        ('U', 'Unknown'),
    )

    AFFECTED_STATUS_AFFECTED = 'A'
    AFFECTED_STATUS_UNAFFECTED = 'N'
    AFFECTED_STATUS_UNKNOWN = 'U'
    AFFECTED_STATUS_CHOICES = (
        (AFFECTED_STATUS_AFFECTED, 'Affected'),
        (AFFECTED_STATUS_UNAFFECTED, 'Unaffected'),
        (AFFECTED_STATUS_UNKNOWN, 'Unknown'),
    )

    CASE_REVIEW_STATUS_IN_REVIEW = "I"
    CASE_REVIEW_STATUS_CHOICES = (
        ('I', 'In Review'),
        ('U', 'Uncertain'),
        ('A', 'Accepted'),
        ('R', 'Not Accepted'),
        ('Q', 'More Info Needed'),
        ('P', 'Pending Results and Records'),
        ('N', 'NMI Review'),
        ('W', 'Waitlist'),
        ('L', 'Lost To Follow-Up'),
        ('V', 'Inactive'),
    )

    ONSET_AGE_CHOICES = [
        ('G', 'Congenital onset'),
        ('E', 'Embryonal onset'),
        ('F', 'Fetal onset'),
        ('N', 'Neonatal onset'),
        ('I', 'Infantile onset'),
        ('C', 'Childhood onset'),
        ('J', 'Juvenile onset'),
        ('A', 'Adult onset'),
        ('Y', 'Young adult onset'),
        ('M', 'Middle age onset'),
        ('L', 'Late onset'),
    ]

    INHERITANCE_CHOICES = [
        ('S', 'Sporadic'),
        ('D', 'Autosomal dominant inheritance'),
        ('L', 'Sex-limited autosomal dominant'),
        ('A', 'Male-limited autosomal dominant'),
        ('C', 'Autosomal dominant contiguous gene syndrome'),
        ('R', 'Autosomal recessive inheritance'),
        ('G', 'Gonosomal inheritance'),
        ('X', 'X-linked inheritance'),
        ('Z', 'X-linked recessive inheritance'),
        ('Y', 'Y-linked inheritance'),
        ('W', 'X-linked dominant inheritance'),
        ('F', 'Multifactorial inheritance'),
        ('M', 'Mitochondrial inheritance'),
    ]

    FEMALE_RELATIONSHIP_CHOICES = {
        'M': 'Mother',
        'G': 'Maternal Grandmother',
        'X': 'Paternal Grandmother',
        'A': 'Maternal Aunt',
        'E': 'Paternal Aunt',
        'N': 'Niece',
    }

    MALE_RELATIONSHIP_CHOICES = {
        'F': 'Father',
        'W': 'Maternal Grandfather',
        'Y': 'Paternal Grandfather',
        'L': 'Maternal Uncle',
        'D': 'Paternal Uncle',
        'P': 'Nephew',
    }

    RELATIONSHIP_CHOICES = list(FEMALE_RELATIONSHIP_CHOICES.items()) + list(
        MALE_RELATIONSHIP_CHOICES.items()) + [
            ('S', 'Self'),
            ('B', 'Sibling'),
            ('C', 'Child'),
            ('H', 'Maternal Half Sibling'),
            ('J', 'Paternal Half Sibling'),
            ('Z', 'Maternal 1st Cousin'),
            ('K', 'Paternal 1st Cousin'),
            ('O', 'Other'),
            ('U', 'Unknown'),
        ]

    SEX_LOOKUP = dict(SEX_CHOICES)
    AFFECTED_STATUS_LOOKUP = dict(AFFECTED_STATUS_CHOICES)
    CASE_REVIEW_STATUS_LOOKUP = dict(CASE_REVIEW_STATUS_CHOICES)
    CASE_REVIEW_STATUS_REVERSE_LOOKUP = {
        name.lower(): key
        for key, name in CASE_REVIEW_STATUS_CHOICES
    }
    ONSET_AGE_LOOKUP = dict(ONSET_AGE_CHOICES)
    ONSET_AGE_REVERSE_LOOKUP = {name: key for key, name in ONSET_AGE_CHOICES}
    INHERITANCE_LOOKUP = dict(INHERITANCE_CHOICES)
    INHERITANCE_REVERSE_LOOKUP = {
        name: key
        for key, name in INHERITANCE_CHOICES
    }
    RELATIONSHIP_LOOKUP = dict(RELATIONSHIP_CHOICES)

    family = models.ForeignKey(Family, on_delete=models.PROTECT)

    # WARNING: individual_id is unique within a family, but not necessarily unique globally
    individual_id = models.TextField(db_index=True)

    mother = models.ForeignKey('seqr.Individual',
                               null=True,
                               blank=True,
                               on_delete=models.SET_NULL,
                               related_name='maternal_children')
    father = models.ForeignKey('seqr.Individual',
                               null=True,
                               blank=True,
                               on_delete=models.SET_NULL,
                               related_name='paternal_children')

    sex = models.CharField(max_length=1, choices=SEX_CHOICES, default='U')
    affected = models.CharField(max_length=1,
                                choices=AFFECTED_STATUS_CHOICES,
                                default=AFFECTED_STATUS_UNKNOWN)

    # TODO once sample and individual ids are fully decoupled no reason to maintain this field
    display_name = models.TextField(default="", blank=True)

    notes = models.TextField(blank=True, null=True)

    case_review_status = models.CharField(max_length=2,
                                          choices=CASE_REVIEW_STATUS_CHOICES,
                                          default=CASE_REVIEW_STATUS_IN_REVIEW)
    case_review_status_last_modified_date = models.DateTimeField(null=True,
                                                                 blank=True,
                                                                 db_index=True)
    case_review_status_last_modified_by = models.ForeignKey(
        User,
        null=True,
        blank=True,
        related_name='+',
        on_delete=models.SET_NULL)
    case_review_discussion = models.TextField(null=True, blank=True)

    proband_relationship = models.CharField(max_length=1,
                                            choices=RELATIONSHIP_CHOICES,
                                            null=True)

    birth_year = YearField()
    death_year = YearField()
    onset_age = models.CharField(max_length=1,
                                 choices=ONSET_AGE_CHOICES,
                                 null=True)

    maternal_ethnicity = ArrayField(models.CharField(max_length=40), null=True)
    paternal_ethnicity = ArrayField(models.CharField(max_length=40), null=True)
    consanguinity = models.BooleanField(null=True)
    affected_relatives = models.BooleanField(null=True)
    expected_inheritance = ArrayField(models.CharField(
        max_length=1, choices=INHERITANCE_CHOICES),
                                      null=True)

    # features are objects with an id field for HPO id and optional notes and qualifiers fields
    features = JSONField(null=True)
    absent_features = JSONField(null=True)
    # nonstandard_features are objects with an id field for a free text label and optional
    # notes, qualifiers, and categories fields
    nonstandard_features = JSONField(null=True)
    absent_nonstandard_features = JSONField(null=True)

    # Disorders are a list of MIM IDs
    disorders = ArrayField(models.CharField(max_length=10), null=True)

    # genes are objects with required key gene (may be blank) and optional key comments
    candidate_genes = JSONField(null=True)
    rejected_genes = JSONField(null=True)

    ar_fertility_meds = models.BooleanField(null=True)
    ar_iui = models.BooleanField(null=True)
    ar_ivf = models.BooleanField(null=True)
    ar_icsi = models.BooleanField(null=True)
    ar_surrogacy = models.BooleanField(null=True)
    ar_donoregg = models.BooleanField(null=True)
    ar_donorsperm = models.BooleanField(null=True)

    filter_flags = JSONField(null=True)
    pop_platform_filters = JSONField(null=True)
    population = models.CharField(max_length=5, null=True)
    sv_flags = JSONField(null=True)

    def __unicode__(self):
        return self.individual_id.strip()

    def _compute_guid(self):
        return 'I%07d_%s' % (self.id, _slugify(str(self)))

    class Meta:
        unique_together = ('family', 'individual_id')

        json_fields = [
            'guid',
            'individual_id',
            'father',
            'mother',
            'sex',
            'affected',
            'display_name',
            'notes',
            'created_date',
            'last_modified_date',
            'filter_flags',
            'pop_platform_filters',
            'population',
            'sv_flags',
            'birth_year',
            'death_year',
            'onset_age',
            'maternal_ethnicity',
            'paternal_ethnicity',
            'consanguinity',
            'affected_relatives',
            'expected_inheritance',
            'disorders',
            'candidate_genes',
            'rejected_genes',
            'ar_iui',
            'ar_ivf',
            'ar_icsi',
            'ar_surrogacy',
            'ar_donoregg',
            'ar_donorsperm',
            'ar_fertility_meds',
        ]
        internal_json_fields = ['proband_relationship']
Esempio n. 19
0
class GCPCostEntryLineItemDailySummary(models.Model):
    """A daily aggregation of line items.

    This table is aggregated by service, and does not
    have a breakdown by resource or tags. The contents of this table
    should be considered ephemeral. It will be regularly deleted from
    and repopulated.

    """
    class Meta:
        """Meta for GCPCostEntryLineItemDailySummary."""

        managed = False  # for partitioning

        db_table = "reporting_gcpcostentrylineitem_daily_summary"
        indexes = [
            models.Index(fields=["usage_start"],
                         name="gcp_summary_usage_start_idx"),
            models.Index(fields=["instance_type"],
                         name="gcp_summary_instance_type_idx"),
            GinIndex(fields=["tags"], name="gcp_tags_idx"),
            models.Index(fields=["project_id"],
                         name="gcp_summary_project_id_idx"),
            models.Index(fields=["project_name"],
                         name="gcp_summary_project_name_idx"),
            models.Index(fields=["service_id"],
                         name="gcp_summary_service_id_idx"),
            models.Index(fields=["service_alias"],
                         name="gcp_summary_service_alias_idx"),
        ]

    uuid = models.UUIDField(primary_key=True)

    cost_entry_bill = models.ForeignKey(GCPCostEntryBill,
                                        on_delete=models.CASCADE)

    # The following fields are used for grouping
    account_id = models.CharField(max_length=20)
    project_id = models.CharField(max_length=256)
    project_name = models.CharField(max_length=256)
    service_id = models.CharField(max_length=256, null=True)
    service_alias = models.CharField(max_length=256, null=True, blank=True)
    sku_id = models.CharField(max_length=256, null=True)
    sku_alias = models.CharField(max_length=256, null=True)
    usage_start = models.DateField(null=False)
    usage_end = models.DateField(null=True)
    region = models.CharField(max_length=50, null=True)
    instance_type = models.CharField(max_length=50, null=True)
    unit = models.CharField(max_length=63, null=True)
    line_item_type = models.CharField(max_length=256, null=True)
    usage_amount = models.DecimalField(max_digits=24,
                                       decimal_places=9,
                                       null=True)
    currency = models.CharField(max_length=10)

    # The following fields are aggregates
    unblended_cost = models.DecimalField(max_digits=24,
                                         decimal_places=9,
                                         null=True)
    markup_cost = models.DecimalField(max_digits=24,
                                      decimal_places=9,
                                      null=True)
    tags = JSONField(null=True)
    source_uuid = models.UUIDField(unique=False, null=True)
Esempio n. 20
0
class Project(ProjectMixin, models.Model):
    """
    """
    objects = ProjectManager()
    __original_label_config = None

    title = models.CharField(
        _('title'),
        null=True,
        blank=True,
        default='',
        max_length=settings.PROJECT_TITLE_MAX_LEN,
        help_text=
        f'Project name. Must be between {settings.PROJECT_TITLE_MIN_LEN} and {settings.PROJECT_TITLE_MAX_LEN} characters long.',
        validators=[
            MinLengthValidator(settings.PROJECT_TITLE_MIN_LEN),
            MaxLengthValidator(settings.PROJECT_TITLE_MAX_LEN)
        ])
    description = models.TextField(_('description'),
                                   blank=True,
                                   null=True,
                                   default='',
                                   help_text='Project description')

    organization = models.ForeignKey('organizations.Organization',
                                     on_delete=models.CASCADE,
                                     related_name='projects',
                                     null=True)
    label_config = models.TextField(
        _('label config'),
        blank=True,
        null=True,
        default='<View></View>',
        help_text=
        'Label config in XML format. See more about it in documentation')
    expert_instruction = models.TextField(
        _('expert instruction'),
        blank=True,
        null=True,
        default='',
        help_text='Labeling instructions in HTML format')
    show_instruction = models.BooleanField(
        _('show instruction'),
        default=False,
        help_text='Show instructions to the annotator before they start')

    show_skip_button = models.BooleanField(
        _('show skip button'),
        default=True,
        help_text=
        'Show a skip button in interface and allow annotators to skip the task'
    )
    enable_empty_annotation = models.BooleanField(
        _('enable empty annotation'),
        default=True,
        help_text='Allow annotators to submit empty annotations')

    show_annotation_history = models.BooleanField(
        _('show annotation history'),
        default=False,
        help_text='Show annotation history to annotator')
    show_collab_predictions = models.BooleanField(
        _('show predictions to annotator'),
        default=True,
        help_text='If set, the annotator can view model predictions')
    evaluate_predictions_automatically = models.BooleanField(
        _('evaluate predictions automatically'),
        default=False,
        help_text='Retrieve and display predictions when loading a task')
    token = models.CharField(_('token'),
                             max_length=256,
                             default=create_hash,
                             null=True,
                             blank=True)
    result_count = models.IntegerField(
        _('result count'),
        default=0,
        help_text='Total results inside of annotations counter')
    color = models.CharField(_('color'),
                             max_length=16,
                             default='#FFFFFF',
                             null=True,
                             blank=True)

    created_by = models.ForeignKey(settings.AUTH_USER_MODEL,
                                   related_name='created_projects',
                                   on_delete=models.SET_NULL,
                                   null=True,
                                   verbose_name=_('created by'))
    maximum_annotations = models.IntegerField(
        _('maximum annotation number'),
        default=1,
        help_text='Maximum number of annotations for one task. '
        'If the number of annotations per task is equal or greater '
        'to this value, the task is completed (is_labeled=True)')
    min_annotations_to_start_training = models.IntegerField(
        _('min_annotations_to_start_training'),
        default=10,
        help_text=
        'Minimum number of completed tasks after which model training is started'
    )

    control_weights = JSONField(_('control weights'),
                                null=True,
                                default=dict,
                                help_text='Weights for control tags')
    model_version = models.TextField(
        _('model version'),
        blank=True,
        null=True,
        default='',
        help_text='Machine learning model version')
    data_types = JSONField(_('data_types'), default=dict, null=True)

    is_draft = models.BooleanField(
        _('is draft'),
        default=False,
        help_text='Whether or not the project is in the middle of being created'
    )
    is_published = models.BooleanField(
        _('published'),
        default=False,
        help_text='Whether or not the project is published to annotators')
    created_at = models.DateTimeField(_('created at'), auto_now_add=True)
    updated_at = models.DateTimeField(_('updated at'), auto_now=True)

    SEQUENCE = 'Sequential sampling'
    UNIFORM = 'Uniform sampling'
    UNCERTAINTY = 'Uncertainty sampling'

    SAMPLING_CHOICES = ((
        SEQUENCE, 'Tasks are ordered by Data manager ordering'
    ), (UNIFORM, 'Tasks are chosen randomly'), (
        UNCERTAINTY,
        'Tasks are chosen according to model uncertainty scores (active learning mode)'
    ))

    sampling = models.CharField(max_length=100,
                                choices=SAMPLING_CHOICES,
                                null=True,
                                default=SEQUENCE)
    show_ground_truth_first = models.BooleanField(_('show ground truth first'),
                                                  default=True)
    show_overlap_first = models.BooleanField(_('show overlap first'),
                                             default=True)
    overlap_cohort_percentage = models.IntegerField(
        _('overlap_cohort_percentage'), default=100)

    task_data_login = models.CharField(
        _('task_data_login'),
        max_length=256,
        blank=True,
        null=True,
        help_text='Task data credentials: login')
    task_data_password = models.CharField(
        _('task_data_password'),
        max_length=256,
        blank=True,
        null=True,
        help_text='Task data credentials: password')

    def __init__(self, *args, **kwargs):
        super(Project, self).__init__(*args, **kwargs)
        self.__original_label_config = self.label_config
        self.__maximum_annotations = self.maximum_annotations
        self.__overlap_cohort_percentage = self.overlap_cohort_percentage

        # TODO: once bugfix with incorrect data types in List
        # logging.warning('! Please, remove code below after patching of all projects (extract_data_types)')
        if self.label_config is not None:
            if self.data_types != extract_data_types(self.label_config):
                self.data_types = extract_data_types(self.label_config)

    @property
    def num_tasks(self):
        return self.tasks.count()

    def get_current_predictions(self):
        return Prediction.objects.filter(
            Q(task__project=self.id) & Q(model_version=self.model_version))

    @property
    def num_predictions(self):
        return self.get_current_predictions().count()

    @property
    def num_annotations(self):
        return Annotation.objects.filter(
            Q(task__project=self) & Q_finished_annotations
            & Q(ground_truth=False)).count()

    @property
    def has_predictions(self):
        return self.get_current_predictions().exists()

    @property
    def has_any_predictions(self):
        return Prediction.objects.filter(Q(task__project=self.id)).exists()

    @property
    def business(self):
        return self.created_by.business

    @property
    def is_private(self):
        return None

    @property
    def has_storages(self):
        return hasattr(
            self, 'storages'
        ) and self.storages is not None and self.storages.count() > 0

    @property
    def secure_mode(self):
        return False

    @property
    def one_object_in_label_config(self):
        return len(self.data_types) <= 1

    @property
    def only_undefined_field(self):
        return self.one_object_in_label_config and self.summary.common_data_columns and self.summary.common_data_columns[
            0] == settings.DATA_UNDEFINED_NAME

    @property
    def get_labeled_count(self):
        return self.tasks.filter(is_labeled=True).count()

    @property
    def get_collected_count(self):
        return self.tasks.count()

    @property
    def get_total_possible_count(self):
        """
            Tasks has overlap - how many tc should be accepted
            possible count = sum [ t.overlap for t in tasks]

        :return: N int total amount of Annotations that should be submitted
        """
        if self.tasks.count() == 0:
            return 0
        return self.tasks.aggregate(Sum('overlap'))['overlap__sum']

    @property
    def get_available_for_labeling(self):
        return self.get_collected_count - self.get_labeled_count

    @property
    def need_annotators(self):
        return self.maximum_annotations - self.num_annotators

    @classmethod
    def find_by_invite_url(cls, url):
        token = url.strip('/').split('/')[-1]
        if len(token):
            return Project.objects.get(token=token)
        else:
            raise KeyError(f'Can\'t find Project by invite URL: {url}')

    def reset_token(self):
        self.token = create_hash()
        self.save()

    def add_collaborator(self, user):
        created = False
        with transaction.atomic():
            try:
                ProjectMember.objects.get(user=user, project=self)
            except ProjectMember.DoesNotExist:
                ProjectMember.objects.create(user=user, project=self)
                created = True
            else:
                logger.debug(
                    f'Project membership {self} for user {user} already exists'
                )
        return created

    def has_collaborator(self, user):
        return ProjectMember.objects.filter(user=user, project=self).exists()

    def has_collaborator_enabled(self, user):
        membership = ProjectMember.objects.filter(user=user, project=self)
        return membership.exists() and membership.first().enabled

    def update_tasks_states(self, maximum_annotations_changed,
                            overlap_cohort_percentage_changed,
                            tasks_number_changed):

        # if only maximum annotations parameter is tweaked
        if maximum_annotations_changed and not overlap_cohort_percentage_changed:
            tasks_with_overlap = self.tasks.filter(overlap__gt=1)
            if tasks_with_overlap.exists():
                # if there is a part with overlaped tasks, affect only them
                tasks_with_overlap.update(overlap=self.maximum_annotations)
            else:
                # otherwise affect all tasks
                self.tasks.update(overlap=self.maximum_annotations)

        # if cohort slider is tweaked
        elif overlap_cohort_percentage_changed and self.maximum_annotations > 1:
            self._rearrange_overlap_cohort()

        # if adding/deleting tasks and cohort settings are applied
        elif tasks_number_changed and self.overlap_cohort_percentage < 100 and self.maximum_annotations > 1:
            self._rearrange_overlap_cohort()

        if maximum_annotations_changed or overlap_cohort_percentage_changed:
            bulk_update_stats_project_tasks(
                self.tasks.filter(
                    Q(annotations__isnull=False)
                    & Q(annotations__ground_truth=False)))

    def _rearrange_overlap_cohort(self):
        tasks_with_overlap = self.tasks.filter(overlap__gt=1)
        tasks_with_overlap_count = tasks_with_overlap.count()
        total_tasks = self.tasks.count()

        new_tasks_with_overlap_count = int(self.overlap_cohort_percentage /
                                           100 * total_tasks + 0.5)
        if tasks_with_overlap_count > new_tasks_with_overlap_count:
            # TODO: warn if we try to reduce current cohort that is already labeled with overlap
            reduce_by = tasks_with_overlap_count - new_tasks_with_overlap_count
            reduce_tasks = sample_query(tasks_with_overlap, reduce_by)
            reduce_tasks.update(overlap=1)
            reduced_tasks_ids = reduce_tasks.values_list('id', flat=True)
            tasks_with_overlap.exclude(id__in=reduced_tasks_ids).update(
                overlap=self.maximum_annotations)

        elif tasks_with_overlap_count < new_tasks_with_overlap_count:
            increase_by = new_tasks_with_overlap_count - tasks_with_overlap_count
            tasks_without_overlap = self.tasks.filter(overlap=1)
            increase_tasks = sample_query(tasks_without_overlap, increase_by)
            increase_tasks.update(overlap=self.maximum_annotations)
            tasks_with_overlap.update(overlap=self.maximum_annotations)

    def remove_tasks_by_file_uploads(self, file_upload_ids):
        self.tasks.filter(file_upload_id__in=file_upload_ids).delete()

    def advance_onboarding(self):
        """ Move project to next onboarding step
        """
        po_qs = self.steps_left.order_by('step__order')
        count = po_qs.count()

        if count:
            po = po_qs.first()
            po.finished = True
            po.save()

            return count != 1

    def created_at_prettify(self):
        return self.created_at.strftime("%d %b %Y %H:%M:%S")

    def onboarding_step_finished(self, step):
        """ Mark specific step as finished
        """
        pos = ProjectOnboardingSteps.objects.get(code=step)
        po = ProjectOnboarding.objects.get(project=self, step=pos)
        po.finished = True
        po.save()

        return po

    def data_types_json(self):
        return json.dumps(self.data_types)

    def available_data_keys(self):
        return sorted(list(self.data_types.keys()))

    @classmethod
    def validate_label_config(cls, config_string):
        validate_label_config(config_string)

    def validate_config(self, config_string):
        self.validate_label_config(config_string)
        if not hasattr(self, 'summary'):
            return

        # validate data columns consistency
        fields_from_config = get_all_object_tag_names(config_string)
        if not fields_from_config:
            logger.debug(f'Data fields not found in labeling config')
            return
        fields_from_data = set(self.summary.common_data_columns)
        fields_from_data.discard(settings.DATA_UNDEFINED_NAME)
        if fields_from_data and not fields_from_config.issubset(
                fields_from_data):
            different_fields = list(
                fields_from_config.difference(fields_from_data))
            raise ValidationError(
                f'These fields are not present in the data: {",".join(different_fields)}'
            )

        # validate annotations consistency
        annotations_from_config = set(
            get_all_control_tag_tuples(config_string))
        if not annotations_from_config:
            logger.debug(f'Annotation schema is not found in config')
            return
        annotations_from_data = set(self.summary.created_annotations)
        if annotations_from_data and not annotations_from_data.issubset(
                annotations_from_config):
            different_annotations = list(
                annotations_from_data.difference(annotations_from_config))
            diff_str = []
            for ann_tuple in different_annotations:
                from_name, to_name, t = ann_tuple.split('|')
                diff_str.append(
                    f'{self.summary.created_annotations[ann_tuple]} '
                    f'with from_name={from_name}, to_name={to_name}, type={t}')
            diff_str = '\n'.join(diff_str)
            raise ValidationError(
                f'Created annotations are incompatible with provided labeling schema, '
                f'we found:\n{diff_str}')

        # validate labels consistency
        labels_from_config = get_all_labels(config_string)
        created_labels = self.summary.created_labels
        for control_tag_from_data, labels_from_data in created_labels.items():
            # Check if labels created in annotations, and their control tag has been removed
            if labels_from_data and control_tag_from_data not in labels_from_config:
                raise ValidationError(
                    f'There are {sum(labels_from_data.values(), 0)} annotation(s) created with tag '
                    f'"{control_tag_from_data}", you can\'t remove it')
            labels_from_config_by_tag = set(
                labels_from_config[control_tag_from_data])
            if not set(labels_from_data).issubset(
                    set(labels_from_config_by_tag)):
                different_labels = list(
                    set(labels_from_data).difference(
                        labels_from_config_by_tag))
                diff_str = '\n'.join(f'{l} ({labels_from_data[l]} annotations)'
                                     for l in different_labels)
                raise ValidationError(
                    f'These labels still exist in annotations:\n{diff_str}')

    def _label_config_has_changed(self):
        return self.label_config != self.__original_label_config

    def delete_predictions(self):
        predictions = Prediction.objects.filter(task__project=self)
        count = predictions.count()
        predictions.delete()
        return {'deleted_predictions': count}

    def get_updated_weights(self):
        outputs = parse_config(self.label_config)
        control_weights = {}
        exclude_control_types = ('Filter', )
        for control_name in outputs:
            control_type = outputs[control_name]['type']
            if control_type in exclude_control_types:
                continue
            control_weights[control_name] = {
                'overall': 1.0,
                'type': control_type,
                'labels': {
                    label: 1.0
                    for label in outputs[control_name].get('labels', [])
                }
            }
        return control_weights

    def save(self, *args, recalc=True, **kwargs):
        exists = True if self.pk else False

        if self.label_config and (self._label_config_has_changed()
                                  or not exists or not self.control_weights):
            self.control_weights = self.get_updated_weights()
        super(Project, self).save(*args, **kwargs)
        project_with_config_just_created = not exists and self.pk and self.label_config
        if self._label_config_has_changed(
        ) or project_with_config_just_created:
            self.data_types = extract_data_types(self.label_config)

        if self._label_config_has_changed():
            self.__original_label_config = self.label_config

        if not exists:
            steps = ProjectOnboardingSteps.objects.all()
            objs = [
                ProjectOnboarding(project=self, step=step) for step in steps
            ]
            ProjectOnboarding.objects.bulk_create(objs)

        # argument for recalculate project task stats
        if recalc:
            self.update_tasks_states(
                maximum_annotations_changed=self.__maximum_annotations !=
                self.maximum_annotations,
                overlap_cohort_percentage_changed=self.
                __overlap_cohort_percentage != self.overlap_cohort_percentage,
                tasks_number_changed=False)
            self.__maximum_annotations = self.maximum_annotations
            self.__overlap_cohort_percentage = self.overlap_cohort_percentage

    def get_member_ids(self):
        if hasattr(self, 'team_link'):
            # project has defined team scope
            # TODO: avoid checking team but rather add all project members when creating a project
            return self.team_link.team.members.values_list('user', flat=True)
        else:
            from users.models import User
            # TODO: may want to return all users from organization
            return User.objects.none()

    def has_team_user(self, user):
        return hasattr(self,
                       'team_link') and self.team_link.team.has_user(user)

    def annotators(self):
        """ Annotators connected to this project including team members
        """
        from users.models import User
        member_ids = self.get_member_ids()
        team_members = User.objects.filter(id__in=member_ids).order_by('email')

        # add members from invited projects
        project_member_ids = self.members.values_list('user__id', flat=True)
        project_members = User.objects.filter(id__in=project_member_ids)

        annotators = team_members | project_members

        # set annotator.team_member=True if annotator is not an invited user
        annotators = annotators.annotate(team_member=Case(
            When(id__in=project_member_ids, then=Value(False)),
            default=Value(True),
            output_field=BooleanField(),
        ))
        return annotators

    def annotators_with_annotations(self, min_count=500):
        """ Annotators with annotation number > min_number

        :param min_count: minimal annotation number to leave an annotators
        :return: filtered annotators
        """
        annotators = self.annotators()
        q = Q(
            annotations__task__project=self) & Q_task_finished_annotations & Q(
                annotations__ground_truth=False)
        annotators = annotators.annotate(
            annotation_count=Count('annotations', filter=q, distinct=True))
        return annotators.filter(annotation_count__gte=min_count)

    def labeled_tasks(self):
        return self.tasks.filter(is_labeled=True)

    def has_annotations(self):
        from tasks.models import Annotation  # prevent cycling imports
        return Annotation.objects.filter(
            Q(task__project=self) & Q(ground_truth=False)).count() > 0

    # [TODO] this should be a template tag or something like this
    @property
    def label_config_line(self):
        c = self.label_config
        return config_line_stipped(c)

    def get_sample_task(self, label_config=None):
        config = label_config or self.label_config
        task, _, _ = get_sample_task(config)
        return task

    def eta(self):
        """
            Show eta for project to be finished
            eta = avg task annotations finish time * remain annotations

            task has overlap = amount of task annotations to consider as finished (is_labeled)
            remain annotations = sum ( task annotations to be done to fulfill each unfinished task overlap)

        :return: time in seconds
        """
        # finished tasks * overlap
        finished_tasks = Task.objects.filter(project=self.id, is_labeled=True)
        # one could make more than need to overlap
        min_n_finished_annotations = sum([ft.overlap for ft in finished_tasks])

        annotations_unfinished_tasks = Annotation.objects.filter(
            task__project=self.id,
            task__is_labeled=False,
            ground_truth=False,
            result__isnull=False).count()

        # get minimum remain annotations
        total_annotations_needed = self.get_total_possible_count
        annotations_remain = total_annotations_needed - min_n_finished_annotations - annotations_unfinished_tasks

        # get average time of all finished TC
        finished_annotations = Annotation.objects.filter(
            Q(task__project=self.id) & Q(ground_truth=False),
            result__isnull=False).values('lead_time')
        avg_lead_time = finished_annotations.aggregate(
            avg_lead_time=Avg('lead_time'))['avg_lead_time']

        if avg_lead_time is None:
            return None
        return avg_lead_time * annotations_remain

    def finished(self):
        return not self.tasks.filter(is_labeled=False).exists()

    def annotations_lead_time(self):
        annotations = Annotation.objects.filter(
            Q(task__project=self.id) & Q(ground_truth=False))
        return annotations.aggregate(
            avg_lead_time=Avg('lead_time'))['avg_lead_time']

    @staticmethod
    def django_settings():
        return settings

    @staticmethod
    def max_tasks_file_size():
        return settings.TASKS_MAX_FILE_SIZE

    def get_control_tags_from_config(self):
        return parse_config(self.label_config)

    def get_parsed_config(self):
        return parse_config(self.label_config)

    def __str__(self):
        return f'{self.title} (id={self.id})' or _(
            "Business number %d") % self.pk

    class Meta:
        db_table = 'project'
Esempio n. 21
0
class barcodeResult(models.Model):
    barcode = models.TextField()
    data = JSONField()
Esempio n. 22
0
class ProjectSummary(models.Model):

    project = AutoOneToOneField(Project,
                                primary_key=True,
                                on_delete=models.CASCADE,
                                related_name='summary')
    created_at = models.DateTimeField(_('created at'),
                                      auto_now_add=True,
                                      help_text='Creation time')

    # { col1: task_count_with_col1, col2: task_count_with_col2 }
    all_data_columns = JSONField(
        _('all data columns'),
        null=True,
        default=dict,
        help_text='All data columns found in imported tasks')
    # [col1, col2]
    common_data_columns = JSONField(
        _('common data columns'),
        null=True,
        default=list,
        help_text='Common data columns found across imported tasks')
    # { (from_name, to_name, type): annotation_count }
    created_annotations = JSONField(
        _('created annotations'),
        null=True,
        default=dict,
        help_text=
        'Unique annotation types identified by tuple (from_name, to_name, type)'
    )  # noqa
    # { from_name: {label1: task_count_with_label1, label2: task_count_with_label2} }
    created_labels = JSONField(_('created labels'),
                               null=True,
                               default=dict,
                               help_text='Unique labels')

    def has_permission(self, user):
        return self.project.has_permission(user)

    def reset(self):
        self.all_data_columns = {}
        self.common_data_columns = []
        self.created_annotations = {}
        self.created_labels = {}
        self.save()

    def update_data_columns(self, tasks):
        common_data_columns = set()
        all_data_columns = dict(self.all_data_columns)
        for task in tasks:
            try:
                task_data = get_attr_or_item(task, 'data')
            except KeyError:
                task_data = task
            task_data_keys = task_data.keys()
            for column in task_data_keys:
                all_data_columns[column] = all_data_columns.get(column, 0) + 1
            if not common_data_columns:
                common_data_columns = set(task_data_keys)
            else:
                common_data_columns &= set(task_data_keys)

        self.all_data_columns = all_data_columns
        if not self.common_data_columns:
            self.common_data_columns = list(sorted(common_data_columns))
        else:
            self.common_data_columns = list(
                sorted(set(self.common_data_columns) & common_data_columns))
        logger.debug(f'summary.all_data_columns = {self.all_data_columns}')
        logger.debug(
            f'summary.common_data_columns = {self.common_data_columns}')
        self.save()

    def remove_data_columns(self, tasks):
        all_data_columns = dict(self.all_data_columns)
        keys_to_remove = []

        for task in tasks:
            task_data = get_attr_or_item(task, 'data')
            for key in task_data.keys():
                if key in all_data_columns:
                    all_data_columns[key] -= 1
                    if all_data_columns[key] == 0:
                        keys_to_remove.append(key)
                        all_data_columns.pop(key)
        self.all_data_columns = all_data_columns

        if keys_to_remove:
            common_data_columns = list(self.common_data_columns)
            for key in keys_to_remove:
                if key in common_data_columns:
                    common_data_columns.remove(key)
            self.common_data_columns = common_data_columns
        logger.debug(f'summary.all_data_columns = {self.all_data_columns}')
        logger.debug(
            f'summary.common_data_columns = {self.common_data_columns}')
        self.save()

    def _get_annotation_key(self, result):
        result_type = result.get('type')
        if result_type in ('relation', 'rating', 'pairwise'):
            return None
        if 'from_name' not in result or 'to_name' not in result:
            logger.error(
                'Unexpected annotation.result format: "from_name" or "to_name" not found in %r'
                % result)
            return None
        result_from_name = result['from_name']
        key = get_annotation_tuple(result_from_name, result['to_name'],
                                   result_type or '')
        return key

    def _get_labels(self, result):
        result_type = result.get('type')
        labels = []
        for label in result['value'].get(result_type, []):
            if isinstance(label, list):
                labels.extend(label)
            else:
                labels.append(label)
        return [str(l) for l in labels]

    def update_created_annotations_and_labels(self, annotations):
        created_annotations = dict(self.created_annotations)
        labels = dict(self.created_labels)
        for annotation in annotations:
            results = get_attr_or_item(annotation, 'result') or []
            for result in results:

                # aggregate annotation types
                key = self._get_annotation_key(result)
                if not key:
                    continue
                created_annotations[key] = created_annotations.get(key, 0) + 1
                from_name = result['from_name']

                # aggregate labels
                if from_name not in self.created_labels:
                    labels[from_name] = dict()

                for label in self._get_labels(result):
                    labels[from_name][label] = labels[from_name].get(label,
                                                                     0) + 1

        logger.debug(f'summary.created_annotations = {created_annotations}')
        logger.debug(f'summary.created_labels = {labels}')
        self.created_annotations = created_annotations
        self.created_labels = labels
        self.save()

    def remove_created_annotations_and_labels(self, annotations):
        created_annotations = dict(self.created_annotations)
        labels = dict(self.created_labels)
        for annotation in annotations:
            results = get_attr_or_item(annotation, 'result') or []
            for result in results:

                # reduce annotation counters
                key = self._get_annotation_key(result)
                if key in created_annotations:
                    created_annotations[key] -= 1
                    if created_annotations[key] == 0:
                        created_annotations.pop(key)

                # reduce labels counters
                from_name = result.get('from_name')
                if from_name not in labels:
                    continue
                for label in self._get_labels(result):
                    label = str(label)
                    if label in labels[from_name]:
                        labels[from_name][label] -= 1
                        if labels[from_name][label] == 0:
                            labels[from_name].pop(label)
                if not labels[from_name]:
                    labels.pop(from_name)
        logger.debug(f'summary.created_annotations = {created_annotations}')
        logger.debug(f'summary.created_labels = {labels}')
        self.created_annotations = created_annotations
        self.created_labels = labels
        self.save()
Esempio n. 23
0
        class Model(models.Model):
            json = JSONField()
            hstore = HStoreField()

            class Meta:
                app_label = "django_tables2_test"
Esempio n. 24
0
class SamplingEventDevice(base.IrekuaModelBaseUser):
    sampling_event = models.ForeignKey(
        'SamplingEvent',
        on_delete=models.PROTECT,
        db_column='sampling_event_id',
        verbose_name=_('sampling event'),
        help_text=_('Sampling event in which this device was deployed'),
        blank=False,
        null=False)

    deployed_on = models.DateTimeField(
        db_column='deployed_on',
        verbose_name=_('deployed on'),
        help_text=_('Date at which the device started capturing information.'),
        blank=True,
        null=True)
    recovered_on = models.DateTimeField(
        db_column='recovered_on',
        verbose_name=_('recovered on'),
        help_text=_('Date at which the device stoped capturing information.'),
        blank=True,
        null=True)

    geo_ref = PointField(
        blank=True,
        null=True,
        db_column='geo_ref',
        verbose_name=_('geo ref'),
        help_text=_('Georeference of deployed device as Geometry'),
        spatial_index=True)
    latitude = models.FloatField(
        db_column='latitude',
        verbose_name=_('latitude'),
        help_text=_('Latitude of deployed device (in decimal degrees)'),
        validators=[MinValueValidator(-90),
                    MaxValueValidator(90)],
        null=True,
        blank=True)
    longitude = models.FloatField(
        db_column='longitude',
        verbose_name=_('longitude'),
        help_text=_('Longitude of deployed device (in decimal degrees)'),
        validators=[MinValueValidator(-180),
                    MaxValueValidator(180)],
        null=True,
        blank=True)
    altitude = models.FloatField(
        blank=True,
        db_column='altitude',
        verbose_name=_('altitude'),
        help_text=_('Altitude of deployed device (in meters)'),
        null=True)

    collection_device = models.ForeignKey(
        'CollectionDevice',
        db_column='collection_device_id',
        verbose_name=_('collection device'),
        help_text=_('Reference to collection device used on sampling event'),
        on_delete=models.PROTECT,
        blank=True,
        null=True)
    commentaries = models.TextField(db_column='commentaries',
                                    verbose_name=_('commentaries'),
                                    help_text=_('Sampling event commentaries'),
                                    blank=True)
    metadata = JSONField(
        db_column='metadata',
        verbose_name=_('metadata'),
        help_text=_('Metadata associated to sampling event device'),
        default=empty_JSON,
        blank=True,
        null=True)
    configuration = JSONField(
        db_column='configuration',
        verbose_name=_('configuration'),
        default=empty_JSON,
        help_text=_('Configuration on device through the sampling event'),
        blank=True,
        null=True)
    licence = models.ForeignKey(
        'Licence',
        on_delete=models.PROTECT,
        db_column='licence_id',
        verbose_name=_('licence'),
        help_text=_('Licence for all items in sampling event'),
        blank=True,
        null=True)

    class Meta:
        verbose_name = _('Sampling Event Device')
        verbose_name_plural = _('Sampling Event Devices')

        unique_together = (('sampling_event', 'collection_device'), )

        ordering = ['-created_on']

    def __str__(self):
        msg = _('{} (deployed)')
        msg = msg.format(self.collection_device.internal_id)
        return msg

    def validate_licence(self):
        if self.licence is not None:
            self.licence = self.sampling_event.licence

        if self.licence is not None:
            collection = self.sampling_event.collection
            collection.validate_and_get_licence(self.licence)

    def validate_user(self):
        if self.created_by is None:
            self.created_by = self.sampling_event.created_by

        if self.created_by is None:
            return

    def validate_deployed_on(self):
        starting_date = self.sampling_event.started_on

        if not starting_date:
            return

        if not self.deployed_on:
            self.deployed_on = starting_date
            return

        if starting_date > self.deployed_on:
            message = _(
                "Deployment date cannot be earlier that sampling event starting date"
            )
            raise ValidationError(message)

    def validate_recovered_on(self):
        ending_date = self.sampling_event.ended_on

        if not ending_date:
            return

        if not self.recovered_on:
            self.recovered_on = ending_date
            return

        if ending_date < self.recovered_on:
            message = _(
                "Recovery date cannot be latter that sampling event ending date"
            )
            raise ValidationError(message)

    def validate_item_datetimes(self):
        start = self.deployed_on if self.deployed_on else None
        end = self.recovered_on if self.recovered_on else None

        if (start is None) and (end is None):
            return

        for item in self.item_set.all():
            if not item.captured_on:
                continue

            if start is not None:
                if item.captured_on < start:
                    message = _(
                        'There is an item registered in this deployment that '
                        'was captured earlier that the registered deployment '
                        'date.')
                    raise ValidationError(message)

            if end is not None:
                if item.captured_on > end:
                    message = _(
                        'There is an item registered in this deployment that '
                        'was captured later that the registered device '
                        'recovery date.')
                    raise ValidationError(message)

    def get_best_date_estimate(self, datetime_info, time_zone):
        year = datetime_info.get('year', None)
        month = datetime_info.get('month', None)
        day = datetime_info.get('day', None)
        hour = datetime_info.get('hour', None)
        minute = datetime_info.get('minute', None)
        second = datetime_info.get('second', None)

        if day is None:
            day = self.deployed_on.day

        if month is None:
            month = self.deployed_on.month
            day = 1

        if year is None:
            if self.deployed_on.year != self.recovered_on.year:
                message = _(
                    'No year was provided for date estimation and couldn\'t'
                    ' be inferred from deployment.')
                raise ValidationError(message)

            year = self.deployed_on.year

        if second is None:
            second = self.deployed_on.second

        if minute is None:
            minute = self.deployed_on.minute
            second = self.deployed_on.second

        if hour is None:
            hour = self.deployed_on.hour
            minute = self.deployed_on.minute
            second = self.deployed_on.second

        return datetime.datetime(year, month, day, hour, minute, second, 0,
                                 time_zone)

    def get_timezone(self, time_zone=None):
        if time_zone is None:
            time_zone = self.sampling_event.collection_site.site.timezone

        return pytz_timezone(time_zone)

    def validate_date(self, date_info):
        time_zone = self.get_timezone(
            time_zone=date_info.get('time_zone', None))
        hdate = self.get_best_date_estimate(date_info, time_zone)
        hdate_up = self.recovered_on.astimezone(time_zone)
        hdate_down = self.deployed_on.astimezone(time_zone)

        if hdate < hdate_down or hdate > hdate_up:
            mssg = _(
                'Date is not within the ranges in which the device was deployed: \n'
                'Deployment: {} \t Recovery: {} \t Date: {}').format(
                    hdate_down, hdate_up, hdate)
            raise ValidationError(mssg)

    def sync_coordinates_and_georef(self):
        if self.latitude is not None and self.longitude is not None:
            self.geo_ref = Point([self.longitude, self.latitude])
            return

        if self.geo_ref:
            self.latitude = self.geo_ref.y
            self.longitude = self.geo_ref.x
            return

        msg = _('Geo reference or longitude-latitude must be provided')
        raise ValidationError({'geo_ref': msg})

    def save(self, *args, **kwargs):
        if self.deployed_on is None:
            self.deployed_on = self.sampling_event.started_on

        if self.recovered_on is None:
            self.recovered_on = self.sampling_event.ended_on

        return super().save(*args, **kwargs)

    def clean(self):
        self.sync_coordinates_and_georef()

        try:
            self.validate_licence()
        except ValidationError as error:
            raise ValidationError({'licence': error})

        try:
            sampling_event_type = self.sampling_event.sampling_event_type
            device_type = self.collection_device.physical_device.device.device_type

            sampling_event_device_type = (
                sampling_event_type.validate_and_get_device_type(device_type))
        except ValidationError as error:
            raise ValidationError({'physical_device': error})

        if sampling_event_device_type is not None:
            try:
                sampling_event_device_type.validate_metadata(self.metadata)
            except ValidationError as error:
                raise ValidationError({'metadata': error})

        try:
            self.validate_deployed_on()
        except ValidationError as error:
            raise ValidationError({'deployed_on': error})

        try:
            self.validate_recovered_on()
        except ValidationError as error:
            raise ValidationError({'recovered_on': error})

        try:
            self.validate_item_datetimes()
        except ValidationError as error:
            raise ValidationError({'recovered_on': error})

        try:
            physical_device = self.collection_device.physical_device
            physical_device.validate_configuration(self.configuration)
        except ValidationError as error:
            raise ValidationError({'configuration': error})

        if self.licence is not None:
            collection = self.sampling_event.collection
            try:
                collection.validate_and_get_licence(self.licence)
            except ValidationError as error:
                raise ValidationError({'licence': error})

        super().clean()
Esempio n. 25
0
class SamplingEvent(base.IrekuaModelBaseUser):
    sampling_event_type = models.ForeignKey(
        'SamplingEventType',
        on_delete=models.PROTECT,
        db_column='sampling_event_type',
        verbose_name=_('sampling event type'),
        help_text=_('Type of sampling event'),
        blank=False,
        null=False)
    collection_site = models.ForeignKey(
        'CollectionSite',
        db_column='collection_site_id',
        verbose_name=_('collection site'),
        help_text=_('Reference to site at which sampling took place'),
        on_delete=models.PROTECT,
        blank=True,
        null=True)

    commentaries = models.TextField(
        db_column='commentaries',
        verbose_name=_('commentaries'),
        help_text=_('Sampling event commentaries'),
        blank=True)
    metadata = JSONField(
        db_column='metadata',
        verbose_name=_('metadata'),
        help_text=_('Metadata associated to sampling event'),
        default=empty_JSON,
        blank=True,
        null=True)
    started_on = models.DateTimeField(
        db_column='started_on',
        verbose_name=_('started on'),
        help_text=_('Date at which sampling begun'),
        blank=True,
        null=True)
    ended_on = models.DateTimeField(
        db_column='ended_on',
        verbose_name=_('ended on'),
        help_text=_('Date at which sampling stoped'),
        blank=True,
        null=True)
    collection = models.ForeignKey(
        'Collection',
        on_delete=models.PROTECT,
        db_column='collection_id',
        verbose_name=_('collection'),
        help_text=_('Collection to which sampling event belongs'),
        blank=False,
        null=False)
    licence = models.ForeignKey(
        'Licence',
        on_delete=models.PROTECT,
        db_column='licence_id',
        verbose_name=_('licence'),
        help_text=_('Licence for all items in sampling event'),
        blank=True,
        null=True)

    class Meta:
        verbose_name = _('Sampling Event')
        verbose_name_plural = _('Sampling Events')

        ordering = ['-created_on']

    def __str__(self):
        msg = _('%(site)s - %(date)s')
        params = dict(
            site=str(self.collection_site),
            date=self.started_on.strftime('%m/%Y'))
        return msg % params

    @property
    def items(self):
        queryset = Item.objects.filter(
            sampling_event_device__sampling_event=self)
        return queryset

    def validate_site(self):
        collection = self.collection
        site_collection = self.collection_site.collection

        if collection != site_collection:
            msg = _(
                'Site does not belong to the declared collection')
            raise ValidationError(msg)

    def validate_dates(self):
        if self.started_on > self.ended_on:
            msg = _(
                'Starting date cannot be greater than ending date')
            raise ValidationError(msg)

    def validate_date(self, date_info):
        pass

    def validate_device_deployment_dates(self):
        start = self.started_on if self.started_on else None

        if start is None:
            return

        for device in self.samplingeventdevice_set.all():
            deployed_on = device.deployed_on if device.deployed_on else start

            if start > deployed_on:
                message = _(
                    'A device was deployed in this sampling event before '
                    'the start of the sampling event.')
                raise ValidationError(message)

    def validate_device_recovery_dates(self):
        end = self.ended_on if self.ended_on else None

        if end is None:
            return

        for device in self.samplingeventdevice_set.all():
            recovered_on = device.recovered_on if device.recovered_on else end

            if end < recovered_on:
                message = _(
                    'A device was deployed in this sampling event after '
                    'the end of the sampling event.')
                raise ValidationError(message)

    def clean(self):
        collection = self.collection

        try:
            self.validate_site()
        except ValidationError as error:
            raise ValidationError({'collection_site': error})

        try:
            self.validate_dates()
        except ValidationError as error:
            raise ValidationError({'started_on': error})

        try:
            self.validate_device_deployment_dates()
        except ValidationError as error:
            raise ValidationError({'started_on': error})

        try:
            self.validate_device_recovery_dates()
        except ValidationError as error:
            raise ValidationError({'ended_on': error})

        try:
            self.sampling_event_type.validate_metadata(self.metadata)
        except ValidationError as error:
            raise ValidationError({'metadata': error})

        try:
            site_type = self.collection_site.site_type
            self.sampling_event_type.validate_site_type(site_type)
        except ValidationError as error:
            raise ValidationError({'collection_site': error})

        try:
            collection.validate_and_get_sampling_event_type(self.sampling_event_type)
        except ValidationError as error:
            raise ValidationError({'sampling_event_type': error})

        if self.licence:
            try:
                collection.validate_and_get_licence(self.licence)
            except ValidationError as error:
                raise ValidationError({'licence': error})

        super(SamplingEvent, self).clean()
Esempio n. 26
0
class Advert(models.Model):
    """
    Реклама
    """

    CHOICE_POSITION = (
        ('top', 'Сверху'),
        ('bottom', 'Снизу'),
        ('content', 'Контент'),
        ('skyscraper', 'Небоскреб'),
    )
    CHOICE_COLOR = (
        ('primary', 'primary'),
        ('secondary', 'secondary'),
        ('success', 'success'),
        ('danger', 'danger'),
        ('warning', 'warning'),
        ('info', 'info'),
        ('light', 'light'),
        ('dark', 'dark'),
        ('white', 'white'),
        ('transparent', 'transparent'),
    )

    slug = UUIDField(verbose_name='Публичный код',
                     default=uuid.uuid4,
                     db_index=True,
                     unique=True,
                     editable=False)
    title = CharField(verbose_name='Название', max_length=255)
    company = ForeignKey(Company,
                         related_name='advert_company',
                         blank=True,
                         null=True,
                         verbose_name='Компания',
                         on_delete=models.SET_NULL)
    image = ImageField(verbose_name='Картинка',
                       upload_to=latin_filename,
                       blank=True,
                       null=True)
    color = CharField(verbose_name=u'Заливка',
                      choices=CHOICE_COLOR,
                      max_length=255)
    url = CharField(verbose_name=u'Ссылка',
                    max_length=255,
                    blank=True,
                    null=True)
    html = TextField(verbose_name='Код', blank=True, null=True)
    position = CharField(verbose_name='Положение',
                         choices=CHOICE_POSITION,
                         max_length=255)
    counter_json = JSONField(verbose_name='Клики по рекламе', default=dict)
    date_start = DateTimeField(verbose_name='Дата начала')
    date_stop = DateTimeField(verbose_name='Дата конца')
    created = DateTimeField(verbose_name='Created', auto_now_add=True)
    changed = DateTimeField(verbose_name='Changed', auto_now=True)
    deleted = DateTimeField(verbose_name='Deleted',
                            blank=True,
                            null=True,
                            help_text='Set datetime, when it was deleted')

    objects = AdvertManager()

    @property
    def is_present(self):
        """
        Agreement is acting now?
        :return:
        """
        today = datetime.now(timezone.utc)
        return self.date_start < today < self.date_stop

    def __str__(self):
        return self.title

    class Meta:
        verbose_name = 'Реклама'
        verbose_name_plural = 'Реклама'
        ordering = ['-date_start']
Esempio n. 27
0
class DataSignature(models.Model):
    name = models.CharField(max_length=512)
    description = models.TextField(
        blank=True
    )  #This seems useful to have... can store the interpretation for data with this signature
    value_type = models.ForeignKey(ContentType,
                                   on_delete=models.CASCADE,
                                   related_name="+")
    data_types = models.ManyToManyField(ContentType, related_name="+")
    values = models.ManyToManyField("Value", related_name="signature")

    #Flags for certain DataSignature types
    #Feature Annotation: This Value provides a human-readable label for a feature
    feature_annotation = models.BooleanField(default=False)

    # 0: None of these linked to these values
    # Positive integer: This many, exactly, are linked to these values
    # -1: Any number of these objects may be attached (including 0) NOTE: Currently unused case?
    # -2: Any positive number of these objects may be attached
    object_counts = JSONField()

    def __str__(self):
        link_str = ", ".join([
            "%d %s" % (self.object_counts[Obj.plural_name], Obj.plural_name)
            for Obj in Object.get_object_types()
        ])
        return "%s '%s' storing %s, linked to %s" % (str(
            self.value_type).capitalize(), self.name, ", ".join([
                x.model_class().__name__
                for x in self.data_types.all() if x.model_class()
            ]), link_str)

    @classmethod
    def create(cls, name, value_type, object_counts):
        for Obj in Object.get_object_types():
            if Obj.plural_name not in object_counts:
                object_counts[Obj.plural_name] = 0
        signature = DataSignature(
            name=name,
            value_type=ContentType.objects.get_for_model(value_type),
            object_counts=object_counts)
        signature.save()
        return signature

    @classmethod
    def get_or_create(cls, name, **kwargs):
        signatures, object_counts = cls.get(name, return_counts=True, **kwargs)
        if not signatures.exists():
            signature = cls.create(name, kwargs['value_type'], object_counts)
            signatures = DataSignature.objects.filter(pk=signature.pk)
        return signatures

    @classmethod
    def get(cls, name, value_type, return_counts=False, **kwargs):
        object_counts = {}
        object_querysets = {}
        for Obj in Object.get_object_types():
            if Obj.plural_name in kwargs:
                if type(kwargs[Obj.plural_name]) in [
                        models.query.QuerySet, DataFrameQuerySet
                ]:
                    object_counts[Obj.plural_name] = kwargs[
                        Obj.plural_name].count()
                    object_querysets[Obj.plural_name] = kwargs[Obj.plural_name]
                elif type(kwargs[Obj.plural_name]) == int:
                    object_counts[Obj.plural_name] = kwargs[Obj.plural_name]
                elif type(kwargs[Obj.plural_name]) == bool:
                    object_counts[Obj.plural_name] = int(
                        kwargs[Obj.plural_name])
            else:
                object_counts[Obj.plural_name] = 0
        if type(value_type) == ContentType:
            ctype = value_type
        elif type(value_type) == str:
            ##This throws an error, and can't fix it due to circular import. TODO resolve
            ctype = ContentType.objects.get_for_model(
                Value.get_value_types(type_name=value_type))
        else:
            ctype = ContentType.objects.get_for_model(value_type)
        signatures = DataSignature.objects.filter(name=name,
                                                  object_counts=object_counts,
                                                  value_type=ctype)
        if return_counts:
            return (signatures, object_counts)
        return signatures
Esempio n. 28
0
class CustomUser(AbstractUser):
    notifications = JSONField(default={})
Esempio n. 29
0
class InputModel(models.Model):
    field = models.CharField(max_length=30)
    data = JSONField()

    def __str__(self):
        return self.data, self.field
Esempio n. 30
0
class Task(models.Model):
    """ Business tasks from project
    """
    id = models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID', db_index=True)
    data = JSONField('data', null=False, help_text='User imported or uploaded data for a task. Data is formatted according to '
                                                   'the project label config. You can find examples of data for your project '
                                                   'on the Import page in the Label Studio Data Manager UI.')
    meta = JSONField('meta', null=True, default=dict,
                     help_text='Meta is user imported (uploaded) data and can be useful as input for an ML '
                               'Backend for embeddings, advanced vectors, and other info. It is passed to '
                               'ML during training/predicting steps.')
    project = models.ForeignKey('projects.Project', related_name='tasks', on_delete=models.CASCADE, null=True,
                                help_text='Project ID for this task')
    created_at = models.DateTimeField(_('created at'), auto_now_add=True, help_text='Time a task was created')
    updated_at = models.DateTimeField(_('updated at'), auto_now=True, help_text='Last time a task was updated')
    is_labeled = models.BooleanField(_('is_labeled'), default=False,
                                     help_text='True if the number of annotations for this task is greater than or equal '
                                               'to the number of maximum_completions for the project')
    overlap = models.IntegerField(_('overlap'), default=1, db_index=True,
                                  help_text='Number of distinct annotators that processed the current task')
    file_upload = models.ForeignKey(
        'data_import.FileUpload', on_delete=models.SET_NULL, null=True, blank=True, related_name='tasks',
        help_text='Uploaded file used as data source for this task'
    )
    updates = ['is_labeled']

    objects = models.Manager()  # task manager by default
    prepared = PreparedTaskManager()  # task manager with filters, ordering, etc for data_manager app

    @property
    def file_upload_name(self):
        return os.path.basename(self.file_upload.file.name)

    @classmethod
    def get_locked_by(cls, user, project):
        """Retrieve the task locked by specified user. Returns None if the specified user didn't lock anything."""
        lock = TaskLock.objects.filter(user=user, expire_at__gt=now(), task__project=project).first()
        if lock:
            return lock.task

    def has_lock(self):
        """Check whether current task has been locked by some user"""
        num_locks = self.num_locks
        num_annotations = self.annotations.filter(ground_truth=False).count()
        num = num_locks + num_annotations
        if num > self.overlap:
            logger.error(f"Num takes={num} > overlap={self.overlap} for task={self.id} - it's a bug")
        return num >= self.overlap

    @property
    def num_locks(self):
        return self.locks.filter(expire_at__gt=now()).count()

    def get_lock_ttl(self):
        if settings.TASK_LOCK_TTL is not None:
            return settings.TASK_LOCK_TTL
        avg_lead_time = self.project.annotations_lead_time()
        return 3 * int(avg_lead_time) if avg_lead_time is not None else settings.TASK_LOCK_DEFAULT_TTL

    def clear_expired_locks(self):
        self.locks.filter(expire_at__lt=now()).delete()

    def set_lock(self, user):
        """Lock current task by specified user. Lock lifetime is set by `expire_in_secs`"""
        num_locks = self.num_locks
        if num_locks < self.overlap:
            expire_at = now() + datetime.timedelta(seconds=self.get_lock_ttl())
            TaskLock.objects.create(task=self, user=user, expire_at=expire_at)
            logger.debug(f'User={user} acquires a lock for the task={self}')
        else:
            logger.error(
                f"Current number of locks for task {self.id} is {num_locks}, but overlap={self.overlap}: "
                f"that's a bug because this task should not be taken in a label stream (task should be locked)")
        self.clear_expired_locks()

    def release_lock(self, user=None):
        """Release lock for the task.
        If user specified, it checks whether lock is released by the user who previously has locked that task"""

        if user is not None:
            self.locks.filter(user=user).delete()
        else:
            self.locks.all().delete()
        self.clear_expired_locks()

    def get_storage_link(self):
        # TODO: how to get neatly any storage class here?
        return find_first_one_to_one_related_field_by_prefix(self, 'io_storages_')

    def resolve_uri(self, task_data, proxy=True):
        if proxy and self.project.task_data_login and self.project.task_data_password:
            protected_data = {}
            for key, value in task_data.items():
                if isinstance(value, str) and string_is_url(value):
                    path = reverse('projects-file-proxy', kwargs={'pk': self.project.pk}) + '?url=' + value
                    value = urljoin(settings.HOSTNAME, path)
                protected_data[key] = value
            return protected_data
        else:
            # Try resolve URLs via storage associated with that task
            storage = self._get_task_storage()
            if storage:
                return storage.resolve_task_data_uri(task_data)
            return task_data

    def _get_task_storage(self):
        # maybe task has storage link
        storage_link = self.get_storage_link()
        if storage_link:
            return storage_link.storage

        # or try global storage settings (only s3 for now)
        elif get_env('USE_DEFAULT_STORAGE', default=False, is_bool=True):
            # TODO: this is used to access global environment storage settings.
            # We may use more than one and non-default S3 storage (like GCS, Azure)
            from io_storages.s3.models import S3ImportStorage
            return S3ImportStorage()

    def update_is_labeled(self):
        """Set is_labeled field according to annotations*.count > overlap
        """
        n = self.annotations.filter(Q_finished_annotations & Q(ground_truth=False)).count()
        # self.is_labeled = n >= self.project.maximum_annotations
        self.is_labeled = n >= self.overlap

    def reset_updates(self):
        """ Reset updates to default from model for one task.
            We need it in duplicate project or total deletion of annotations
        """
        for field in Task._meta.fields:
            if field.name in Task.updates:
                setattr(self, field.name, field.default)

    @staticmethod
    def bulk_reset_updates(project):
        """ Bulk reset updates to default, it's a fast way to reset all tasks in project
        """
        for field in Task._meta.fields:
            if field.name in Task.updates:
                project.tasks.update(**{field.name: field.default})

    @staticmethod
    def bulk_update_is_labeled(project):
        """ Fast way to update only is_labeled.
            Prefer to use Django 2.2 bulk_update(), see bulk_update_field('is_labeled')

            get all project.tasks as subquery
            Subquery(
                w coalesce get the first non-null value (count(annotations), or 0)
                make condition
                add temp field pre_is_labeled as condtion values
            )
            update all tasks with Subquery
        """
        tasks = project.tasks.filter(pk=OuterRef('pk'))
        count = Coalesce(Count(
            'annotations', filter=Q(annotations__was_cancelled=False) & Q(annotations__ground_truth=False)), Value(0))
        condition = Case(
            When(overlap__lte=count, then=Value(True)),
            default=Value(False),
            output_field=models.BooleanField(null=False)
        )
        results = tasks.annotate(pre_is_labeled=condition).values('pre_is_labeled')
        project.tasks.update(is_labeled=Subquery(results))

    def delete_url(self):
        return reverse('tasks:task-delete', kwargs={'pk': self.pk})

    def completion_for_ground_truth(self):
        """ 1 Get ground_truth completion if task has it, else
            2 Get first completion created by owner of project,
            3 Or the first of somebody if no owner's items.
            It's used for ground_truth selection right on data manager page
        """
        if not self.annotations.exists():
            return None

        # ground_truth already exist
        ground_truth_annotations = self.annotations.filter(ground_truth=True)
        if ground_truth_annotations.exists():
            return ground_truth_annotations.first()

        # owner annotation
        owner_annotations = self.annotations.filter(completed_by=self.project.created_by)
        if owner_annotations.count() > 0:
            return owner_annotations.first()

        # annotator annotation
        return self.annotations.first()

    class Meta:
        db_table = 'task'
        ordering = ['-updated_at']
        indexes = [
            models.Index(fields=['project', 'is_labeled']),
            models.Index(fields=['id', 'overlap'])
        ]