def _commit_holding_stocks(publish: str, sql):
    """同步基金持仓数据

    基金持仓来源于两张表,一为重仓股,源于季报,一为完整持仓,源于半年报或年报
    """
    if models.FundHoldingStock.objects.exists():
        date = datetime.date.today()
        date = datetime.date(date.year - 1, 1, 1)
    else:
        date = datetime.date(1990, 1, 1)
    exist = models.FundHoldingStock.objects.filter(publish=publish).values('secucode').annotate(max_date=Max('date'))
    existed = {x['secucode']: x['max_date'] for x in exist}

    full = models.Funds.objects.all()
    instance = {x.secucode: x for x in full}
    sql = render(sql, '<date>', date.strftime('%Y-%m-%d'))
    data = read_oracle(sql)
    data = data[data.publish == publish]
    data = data[data.agg(lambda x: x.date.date() > existed.get(x.secucode, datetime.date(1990, 1, 1)), axis=1)]
    data.secucode = data.secucode.apply(lambda x: instance.get(x))
    data = data[data.secucode.notnull()]
    data.ratio = data.ratio.fillna(0)
    data = data.where(data.notnull(), None)
    commit_by_chunk(data, models.FundHoldingStock)
Beispiel #2
0
    def handle(self, **options: Any) -> None:
        if options["percent_huddles"] + options["percent_personals"] > 100:
            self.stderr.write(
                "Error!  More than 100% of messages allocated.\n")
            return

        # Get consistent data for backend tests.
        if options["test_suite"]:
            random.seed(0)

        # If max_topics is not set, we set it proportional to the
        # number of messages.
        if options["max_topics"] is None:
            options["max_topics"] = 1 + options["num_messages"] // 100

        if options["delete"]:
            # Start by clearing all the data in our database
            clear_database()

            # Create our three default realms
            # Could in theory be done via zerver.lib.actions.do_create_realm, but
            # welcome-bot (needed for do_create_realm) hasn't been created yet
            create_internal_realm()
            zulip_realm = Realm.objects.create(
                string_id="zulip",
                name="Zulip Dev",
                emails_restricted_to_domains=False,
                email_address_visibility=Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS,
                description=
                "The Zulip development environment default organization."
                "  It's great for testing!",
                invite_required=False,
                org_type=Realm.CORPORATE)
            RealmDomain.objects.create(realm=zulip_realm, domain="zulip.com")
            if options["test_suite"]:
                mit_realm = Realm.objects.create(
                    string_id="zephyr",
                    name="MIT",
                    emails_restricted_to_domains=True,
                    invite_required=False,
                    org_type=Realm.CORPORATE)
                RealmDomain.objects.create(realm=mit_realm, domain="mit.edu")

                lear_realm = Realm.objects.create(
                    string_id="lear",
                    name="Lear & Co.",
                    emails_restricted_to_domains=False,
                    invite_required=False,
                    org_type=Realm.CORPORATE)

            # Create test Users (UserProfiles are automatically created,
            # as are subscriptions to the ability to receive personals).
            names = [
                ("Zoe", "*****@*****.**"),
                ("Othello, the Moor of Venice", "*****@*****.**"),
                ("Iago", "*****@*****.**"),
                ("Prospero from The Tempest", "*****@*****.**"),
                ("Cordelia Lear", "*****@*****.**"),
                ("King Hamlet", "*****@*****.**"),
                ("aaron", "*****@*****.**"),
                ("Polonius", "*****@*****.**"),
                ("Desdemona", "*****@*****.**"),
            ]

            # For testing really large batches:
            # Create extra users with semi realistic names to make search
            # functions somewhat realistic.  We'll still create 1000 users
            # like Extra222 User for some predicability.
            num_names = options['extra_users']
            num_boring_names = 300

            for i in range(min(num_names, num_boring_names)):
                full_name = f'Extra{i:03} User'
                names.append((full_name, f'extrauser{i}@zulip.com'))

            if num_names > num_boring_names:
                fnames = [
                    'Amber', 'Arpita', 'Bob', 'Cindy', 'Daniela', 'Dan',
                    'Dinesh', 'Faye', 'François', 'George', 'Hank', 'Irene',
                    'James', 'Janice', 'Jenny', 'Jill', 'John', 'Kate',
                    'Katelyn', 'Kobe', 'Lexi', 'Manish', 'Mark', 'Matt',
                    'Mayna', 'Michael', 'Pete', 'Peter', 'Phil', 'Phillipa',
                    'Preston', 'Sally', 'Scott', 'Sandra', 'Steve',
                    'Stephanie', 'Vera'
                ]
                mnames = ['de', 'van', 'von', 'Shaw', 'T.']
                lnames = [
                    'Adams', 'Agarwal', 'Beal', 'Benson', 'Bonita', 'Davis',
                    'George', 'Harden', 'James', 'Jones', 'Johnson', 'Jordan',
                    'Lee', 'Leonard', 'Singh', 'Smith', 'Patel', 'Towns',
                    'Wall'
                ]

            for i in range(num_boring_names, num_names):
                fname = random.choice(fnames) + str(i)
                full_name = fname
                if random.random() < 0.7:
                    if random.random() < 0.5:
                        full_name += ' ' + random.choice(mnames)
                    full_name += ' ' + random.choice(lnames)
                email = fname.lower() + '@zulip.com'
                names.append((full_name, email))

            create_users(zulip_realm, names, tos_version=settings.TOS_VERSION)

            iago = get_user_by_delivery_email("*****@*****.**", zulip_realm)
            do_change_user_role(iago,
                                UserProfile.ROLE_REALM_ADMINISTRATOR,
                                acting_user=None)
            iago.is_staff = True
            iago.save(update_fields=['is_staff'])

            desdemona = get_user_by_delivery_email("*****@*****.**",
                                                   zulip_realm)
            do_change_user_role(desdemona,
                                UserProfile.ROLE_REALM_OWNER,
                                acting_user=None)

            guest_user = get_user_by_delivery_email("*****@*****.**",
                                                    zulip_realm)
            guest_user.role = UserProfile.ROLE_GUEST
            guest_user.save(update_fields=['role'])

            # These bots are directly referenced from code and thus
            # are needed for the test suite.
            zulip_realm_bots = [
                ("Zulip Error Bot", "*****@*****.**"),
                ("Zulip Default Bot", "*****@*****.**"),
            ]
            for i in range(options["extra_bots"]):
                zulip_realm_bots.append(
                    (f'Extra Bot {i}', f'extrabot{i}@zulip.com'))

            create_users(zulip_realm,
                         zulip_realm_bots,
                         bot_type=UserProfile.DEFAULT_BOT)

            zoe = get_user_by_delivery_email("*****@*****.**", zulip_realm)
            zulip_webhook_bots = [
                ("Zulip Webhook Bot", "*****@*****.**"),
            ]
            # If a stream is not supplied in the webhook URL, the webhook
            # will (in some cases) send the notification as a PM to the
            # owner of the webhook bot, so bot_owner can't be None
            create_users(zulip_realm,
                         zulip_webhook_bots,
                         bot_type=UserProfile.INCOMING_WEBHOOK_BOT,
                         bot_owner=zoe)
            aaron = get_user_by_delivery_email("*****@*****.**", zulip_realm)

            zulip_outgoing_bots = [
                ("Outgoing Webhook", "*****@*****.**"),
            ]
            create_users(zulip_realm,
                         zulip_outgoing_bots,
                         bot_type=UserProfile.OUTGOING_WEBHOOK_BOT,
                         bot_owner=aaron)
            outgoing_webhook = get_user("*****@*****.**",
                                        zulip_realm)
            add_service("outgoing-webhook",
                        user_profile=outgoing_webhook,
                        interface=Service.GENERIC,
                        base_url="http://127.0.0.1:5002",
                        token=generate_api_key())

            # Add the realm internal bots to each realm.
            create_if_missing_realm_internal_bots()

            # Create public streams.
            stream_list = ["Verona", "Denmark", "Scotland", "Venice", "Rome"]
            stream_dict: Dict[str, Dict[str, Any]] = {
                "Verona": {
                    "description": "A city in Italy"
                },
                "Denmark": {
                    "description": "A Scandinavian country"
                },
                "Scotland": {
                    "description": "Located in the United Kingdom"
                },
                "Venice": {
                    "description": "A northeastern Italian city"
                },
                "Rome": {
                    "description": "Yet another Italian city",
                    "is_web_public": True
                },
            }

            bulk_create_streams(zulip_realm, stream_dict)
            recipient_streams: List[int] = [
                Stream.objects.get(name=name, realm=zulip_realm).id
                for name in stream_list
            ]

            # Create subscriptions to streams.  The following
            # algorithm will give each of the users a different but
            # deterministic subset of the streams (given a fixed list
            # of users). For the test suite, we have a fixed list of
            # subscriptions to make sure test data is consistent
            # across platforms.

            subscriptions_list: List[Tuple[UserProfile, Recipient]] = []
            profiles: Sequence[
                UserProfile] = UserProfile.objects.select_related().filter(
                    is_bot=False).order_by("email")

            if options["test_suite"]:
                subscriptions_map = {
                    '*****@*****.**': ['Verona'],
                    '*****@*****.**': ['Verona'],
                    '*****@*****.**': ['Verona', 'Denmark'],
                    '*****@*****.**': ['Verona', 'Denmark', 'Scotland'],
                    '*****@*****.**': ['Verona', 'Denmark', 'Scotland'],
                    '*****@*****.**':
                    ['Verona', 'Denmark', 'Scotland', 'Venice'],
                    '*****@*****.**':
                    ['Verona', 'Denmark', 'Scotland', 'Venice', 'Rome'],
                    '*****@*****.**': ['Verona'],
                    '*****@*****.**': ['Verona', 'Denmark', 'Venice'],
                }

                for profile in profiles:
                    email = profile.delivery_email
                    if email not in subscriptions_map:
                        raise Exception(
                            f'Subscriptions not listed for user {email}')

                    for stream_name in subscriptions_map[email]:
                        stream = Stream.objects.get(name=stream_name)
                        r = Recipient.objects.get(type=Recipient.STREAM,
                                                  type_id=stream.id)
                        subscriptions_list.append((profile, r))
            else:
                num_streams = len(recipient_streams)
                num_users = len(profiles)
                for i, profile in enumerate(profiles):
                    # Subscribe to some streams.
                    fraction = float(i) / num_users
                    num_recips = int(num_streams * fraction) + 1

                    for type_id in recipient_streams[:num_recips]:
                        r = Recipient.objects.get(type=Recipient.STREAM,
                                                  type_id=type_id)
                        subscriptions_list.append((profile, r))

            subscriptions_to_add: List[Subscription] = []
            event_time = timezone_now()
            all_subscription_logs: (List[RealmAuditLog]) = []

            i = 0
            for profile, recipient in subscriptions_list:
                i += 1
                color = STREAM_ASSIGNMENT_COLORS[i %
                                                 len(STREAM_ASSIGNMENT_COLORS)]
                s = Subscription(recipient=recipient,
                                 user_profile=profile,
                                 color=color)

                subscriptions_to_add.append(s)

                log = RealmAuditLog(
                    realm=profile.realm,
                    modified_user=profile,
                    modified_stream_id=recipient.type_id,
                    event_last_message_id=0,
                    event_type=RealmAuditLog.SUBSCRIPTION_CREATED,
                    event_time=event_time)
                all_subscription_logs.append(log)

            Subscription.objects.bulk_create(subscriptions_to_add)
            RealmAuditLog.objects.bulk_create(all_subscription_logs)

            # Create custom profile field data
            phone_number = try_add_realm_custom_profile_field(
                zulip_realm,
                "Phone number",
                CustomProfileField.SHORT_TEXT,
                hint='')
            biography = try_add_realm_custom_profile_field(
                zulip_realm,
                "Biography",
                CustomProfileField.LONG_TEXT,
                hint='What are you known for?')
            favorite_food = try_add_realm_custom_profile_field(
                zulip_realm,
                "Favorite food",
                CustomProfileField.SHORT_TEXT,
                hint="Or drink, if you'd prefer")
            field_data: ProfileFieldData = {
                'vim': {
                    'text': 'Vim',
                    'order': '1'
                },
                'emacs': {
                    'text': 'Emacs',
                    'order': '2'
                },
            }
            favorite_editor = try_add_realm_custom_profile_field(
                zulip_realm,
                "Favorite editor",
                CustomProfileField.CHOICE,
                field_data=field_data)
            birthday = try_add_realm_custom_profile_field(
                zulip_realm, "Birthday", CustomProfileField.DATE)
            favorite_website = try_add_realm_custom_profile_field(
                zulip_realm,
                "Favorite website",
                CustomProfileField.URL,
                hint="Or your personal blog's URL")
            mentor = try_add_realm_custom_profile_field(
                zulip_realm, "Mentor", CustomProfileField.USER)
            github_profile = try_add_realm_default_custom_profile_field(
                zulip_realm, "github")

            # Fill in values for Iago and Hamlet
            hamlet = get_user_by_delivery_email("*****@*****.**",
                                                zulip_realm)
            do_update_user_custom_profile_data_if_changed(
                iago, [
                    {
                        "id": phone_number.id,
                        "value": "+1-234-567-8901"
                    },
                    {
                        "id": biography.id,
                        "value": "Betrayer of Othello."
                    },
                    {
                        "id": favorite_food.id,
                        "value": "Apples"
                    },
                    {
                        "id": favorite_editor.id,
                        "value": "emacs"
                    },
                    {
                        "id": birthday.id,
                        "value": "2000-1-1"
                    },
                    {
                        "id": favorite_website.id,
                        "value": "https://zulip.readthedocs.io/en/latest/"
                    },
                    {
                        "id": mentor.id,
                        "value": [hamlet.id]
                    },
                    {
                        "id": github_profile.id,
                        "value": 'zulip'
                    },
                ])
            do_update_user_custom_profile_data_if_changed(
                hamlet, [
                    {
                        "id": phone_number.id,
                        "value": "+0-11-23-456-7890"
                    },
                    {
                        "id":
                        biography.id,
                        "value":
                        "I am:\n* The prince of Denmark\n* Nephew to the usurping Claudius",
                    },
                    {
                        "id": favorite_food.id,
                        "value": "Dark chocolate"
                    },
                    {
                        "id": favorite_editor.id,
                        "value": "vim"
                    },
                    {
                        "id": birthday.id,
                        "value": "1900-1-1"
                    },
                    {
                        "id": favorite_website.id,
                        "value": "https://blog.zulig.org"
                    },
                    {
                        "id": mentor.id,
                        "value": [iago.id]
                    },
                    {
                        "id": github_profile.id,
                        "value": 'zulipbot'
                    },
                ])
        else:
            zulip_realm = get_realm("zulip")
            recipient_streams = [
                klass.type_id
                for klass in Recipient.objects.filter(type=Recipient.STREAM)
            ]

        # Extract a list of all users
        user_profiles: List[UserProfile] = list(
            UserProfile.objects.filter(is_bot=False))

        # Create a test realm emoji.
        IMAGE_FILE_PATH = static_path('images/test-images/checkbox.png')
        with open(IMAGE_FILE_PATH, 'rb') as fp:
            check_add_realm_emoji(zulip_realm, 'green_tick', iago, fp)

        if not options["test_suite"]:
            # Populate users with some bar data
            for user in user_profiles:
                status: int = UserPresence.ACTIVE
                date = timezone_now()
                client = get_client("website")
                if user.full_name[0] <= 'H':
                    client = get_client("ZulipAndroid")
                UserPresence.objects.get_or_create(user_profile=user,
                                                   realm_id=user.realm_id,
                                                   client=client,
                                                   timestamp=date,
                                                   status=status)

        user_profiles_ids = [user_profile.id for user_profile in user_profiles]

        # Create several initial huddles
        for i in range(options["num_huddles"]):
            get_huddle(random.sample(user_profiles_ids, random.randint(3, 4)))

        # Create several initial pairs for personals
        personals_pairs = [
            random.sample(user_profiles_ids, 2)
            for i in range(options["num_personals"])
        ]

        create_alert_words(zulip_realm.id)

        # Generate a new set of test data.
        create_test_data()

        # prepopulate the URL preview/embed data for the links present
        # in the config.generate_data.json data set.  This makes it
        # possible for populate_db to run happily without Internet
        # access.
        with open("zerver/tests/fixtures/docs_url_preview_data.json",
                  "rb") as f:
            urls_with_preview_data = orjson.loads(f.read())
            for url in urls_with_preview_data:
                cache_set(url, urls_with_preview_data[url], PREVIEW_CACHE_NAME)

        if options["delete"]:
            if options["test_suite"]:
                # Create test users; the MIT ones are needed to test
                # the Zephyr mirroring codepaths.
                testsuite_mit_users = [
                    ("Fred Sipb (MIT)", "*****@*****.**"),
                    ("Athena Consulting Exchange User (MIT)",
                     "*****@*****.**"),
                    ("Esp Classroom (MIT)", "*****@*****.**"),
                ]
                create_users(mit_realm,
                             testsuite_mit_users,
                             tos_version=settings.TOS_VERSION)

                testsuite_lear_users = [
                    ("King Lear", "*****@*****.**"),
                    ("Cordelia Lear", "*****@*****.**"),
                ]
                create_users(lear_realm,
                             testsuite_lear_users,
                             tos_version=settings.TOS_VERSION)

            if not options["test_suite"]:
                # To keep the messages.json fixtures file for the test
                # suite fast, don't add these users and subscriptions
                # when running populate_db for the test suite

                zulip_stream_dict: Dict[str, Dict[str, Any]] = {
                    "devel": {
                        "description": "For developing"
                    },
                    "all": {
                        "description": "For **everything**"
                    },
                    "announce": {
                        "description": "For announcements",
                        'stream_post_policy': Stream.STREAM_POST_POLICY_ADMINS
                    },
                    "design": {
                        "description": "For design"
                    },
                    "support": {
                        "description": "For support"
                    },
                    "social": {
                        "description": "For socializing"
                    },
                    "test": {
                        "description": "For testing `code`"
                    },
                    "errors": {
                        "description": "For errors"
                    },
                    "sales": {
                        "description": "For sales discussion"
                    },
                }

                # Calculate the maximum number of digits in any extra stream's
                # number, since a stream with name "Extra Stream 3" could show
                # up after "Extra Stream 29". (Used later to pad numbers with
                # 0s).
                maximum_digits = len(str(options['extra_streams'] - 1))

                for i in range(options['extra_streams']):
                    # Pad the number with 0s based on `maximum_digits`.
                    number_str = str(i).zfill(maximum_digits)

                    extra_stream_name = 'Extra Stream ' + number_str

                    zulip_stream_dict[extra_stream_name] = {
                        "description": "Auto-generated extra stream.",
                    }

                bulk_create_streams(zulip_realm, zulip_stream_dict)
                # Now that we've created the notifications stream, configure it properly.
                zulip_realm.notifications_stream = get_stream(
                    "announce", zulip_realm)
                zulip_realm.save(update_fields=['notifications_stream'])

                # Add a few default streams
                for default_stream_name in [
                        "design", "devel", "social", "support"
                ]:
                    DefaultStream.objects.create(realm=zulip_realm,
                                                 stream=get_stream(
                                                     default_stream_name,
                                                     zulip_realm))

                # Now subscribe everyone to these streams
                subscribe_users_to_streams(zulip_realm, zulip_stream_dict)

            if not options["test_suite"]:
                # Update pointer of each user to point to the last message in their
                # UserMessage rows with sender_id=user_profile_id.
                users = list(
                    UserMessage.objects.filter(message__sender_id=F(
                        'user_profile_id')).values('user_profile_id').annotate(
                            pointer=Max('message_id')))
                for user in users:
                    UserProfile.objects.filter(
                        id=user['user_profile_id']).update(
                            pointer=user['pointer'])

            create_user_groups()

            if not options["test_suite"]:
                # We populate the analytics database here for
                # development purpose only
                call_command('populate_analytics_db')

        threads = options["threads"]
        jobs: List[Tuple[int, List[List[int]], Dict[str, Any],
                         Callable[[str], int], int]] = []
        for i in range(threads):
            count = options["num_messages"] // threads
            if i < options["num_messages"] % threads:
                count += 1
            jobs.append((count, personals_pairs, options, self.stdout.write,
                         random.randint(0, 10**10)))

        for job in jobs:
            generate_and_send_messages(job)

        if options["delete"]:
            if not options['test_suite']:
                # These bots are not needed by the test suite
                # Also, we don't want interacting with each other
                # in dev setup.
                internal_zulip_users_nosubs = [
                    ("Zulip Commit Bot", "*****@*****.**"),
                    ("Zulip Trac Bot", "*****@*****.**"),
                    ("Zulip Nagios Bot", "*****@*****.**"),
                ]
                create_users(zulip_realm,
                             internal_zulip_users_nosubs,
                             bot_type=UserProfile.DEFAULT_BOT)

            mark_all_messages_as_read()
            self.stdout.write("Successfully populated test database.\n")
Beispiel #3
0
 def save_model(self, request, obj, form, change):
     if not change:
         # since SortableAdminMixin is missing on this class, ordering has to be computed by hand
         max_order = self.base_model.objects.aggregate(max_order=Max('order'))['max_order']
         obj.order = max_order + 1 if max_order else 1
     super(SmartPhoneAdmin, self).save_model(request, obj, form, change)
Beispiel #4
0
def statistic(type, field1, data_list, model=None, form=None, step=None):
    # 這裏的field1 是薪金形態
    if type == 'salary':
        model_used = model.filter(salary_type=form['salary_period'])
    model_used = model.filter(salary_type='月薪')
    # 這裏的field1是 money或是 week_total_hour
    # avg =model_used.aggregate(average=Avg(field1))
    max_list = model_used.aggregate(maximum=Max(field1))
    mini_list = model_used.aggregate(minimum=Min(field1))
    print('max_list:{}, mini_list:{}'.format(max_list, mini_list))
    # 這裏是想取回這個例子的instance,例如清潔工的工作時間和人工是多少
    instance_data = float(form[type])

    # 這兩個是用來計算graph中x-axis的最小和最大值
    try:
        range_min = int(math.floor(float(mini_list['minimum'])))
    except Exception as e:
        print(e)
        range_min = 0
    try:
        range_max = int(math.ceil(float(max_list['maximum'])))
    except Exception as e:
        print(e)
        range_max = 50
    global combine_length
    global combine_number
    combine_length = 0
    combine_number = set()
    max_bar = 13
    # the following is for data generation
    print('step', 'x', 'x+step', 'instance_data', 'color')
    # step=10
    for counter, x in enumerate(range(0, range_max, step)):
        if instance_data >= float(x) and instance_data < float(x + step):
            color = 'RGB(247,147,30)'
        else:
            color = 'RGB(252, 238, 33)'
        print(step, float(x), float(x + step), instance_data, color)
        if type == 'salary':
            if counter > max_bar:
                combine_number.add(math.floor(x / 1000))
                combine_number.add(math.floor((x + step) / 1000))
                sorted(combine_number, key=float)
                combine_length += len(
                    model.filter(money__gte=float(x)).filter(
                        money__lt=float(x + step)))
            else:
                length = len(
                    model.filter(money__gte=float(x)).filter(
                        money__lt=float(x + step)))
                data_list.append({
                    'range':
                    '{}-{}'.format(math.floor(x / 1000),
                                   math.floor((x + step) / 1000)),
                    'number':
                    length,
                    'color':
                    color
                })
        else:
            length = len(
                model.filter(week_total_hour__gte=float(x - step)).filter(
                    week_total_hour__lt=float(x + step)))
            data_list.append({
                'range': '{}-{}'.format(x, x + step),
                'number': length,
                'color': color
            })
    print(type, data_list, max_bar, combine_number)

    if type == 'salary' and len(data_list) > max_bar:
        last_item = '{}以上'.format(min(combine_number))
        print(last_item, combine_length)
        data_list.append({
            'range': last_item,
            'number': combine_length,
            'color': color
        })
        # print({'range':'{}-{}'.format(x, x+step), 'number':length, 'color':color})
        # salary_classification.append({'range':'{}-{}'.format(x, x+5000), 'number':length, 'color':color})
        # print({'range':'{}-{}'.format(x, x+step), 'number':length, 'color':color})

    return data_list
Beispiel #5
0
def leaderboard(request, key=''):
    """Display the leaderboard for top earning or paying profiles.

    Args:
        key (str): The leaderboard display type. Defaults to: quarterly_earners.

    Returns:
        TemplateResponse: The leaderboard template response.

    """
    if not key:
        key = 'quarterly_earners'

    keyword_search = request.GET.get('keyword')
    limit = request.GET.get('limit', 25)

    titles = {
        'quarterly_payers': _('Top Payers'),
        'quarterly_earners': _('Top Earners'),
        'quarterly_orgs': _('Top Orgs'),
        'quarterly_tokens': _('Top Tokens'),
        'quarterly_keywords': _('Top Keywords'),
        'quarterly_kudos': _('Top Kudos'),
        'quarterly_cities': _('Top Cities'),
        'quarterly_countries': _('Top Countries'),
        'quarterly_continents': _('Top Continents'),
        #        'weekly_fulfilled': 'Weekly Leaderboard: Fulfilled Funded Issues',
        #        'weekly_all': 'Weekly Leaderboard: All Funded Issues',
        #        'monthly_fulfilled': 'Monthly Leaderboard',
        #        'monthly_all': 'Monthly Leaderboard: All Funded Issues',
        #        'yearly_fulfilled': 'Yearly Leaderboard: Fulfilled Funded Issues',
        #        'yearly_all': 'Yearly Leaderboard: All Funded Issues',
        #        'all_fulfilled': 'All-Time Leaderboard: Fulfilled Funded Issues',
        #        'all_all': 'All-Time Leaderboard: All Funded Issues',
        # TODO - also include options for weekly, yearly, and all cadences of earning
    }

    if settings.ENV != 'prod':
        # TODO (mbeacom): Re-enable this on live following a fix for leaderboards by location.
        titles['quarterly_cities'] = _('Top Cities')
        titles['quarterly_countries'] = _('Top Countries')
        titles['quarterly_continents'] = _('Top Continents')

    if key not in titles.keys():
        raise Http404

    title = titles[key]
    if keyword_search:
        ranks = LeaderboardRank.objects.filter(
            active=True,
            leaderboard=key,
            tech_keywords__icontains=keyword_search)
    else:
        ranks = LeaderboardRank.objects.filter(active=True, leaderboard=key)

    amount = ranks.values_list('amount').annotate(
        Max('amount')).order_by('-amount')
    items = ranks.order_by('-amount')

    top_earners = ''
    technologies = set()
    for profile_keywords in ranks.values_list('tech_keywords'):
        for techs in profile_keywords:
            for tech in techs:
                technologies.add(tech)

    if amount:
        amount_max = amount[0][0]
        top_earners = ranks.order_by('-amount')[0:3].values_list(
            'github_username', flat=True)
        top_earners = ['@' + username for username in top_earners]
        top_earners = f'The top earners of this period are {", ".join(top_earners)}'
    else:
        amount_max = 0

    profile_keys = [
        '_tokens', '_keywords', '_cities', '_countries', '_continents'
    ]
    is_linked_to_profile = any(sub in key for sub in profile_keys)

    context = {
        'items': items[0:limit],
        'titles': titles,
        'selected': title,
        'is_linked_to_profile': is_linked_to_profile,
        'title': f'Leaderboard: {title}',
        'card_title': f'Leaderboard: {title}',
        'card_desc':
        f'See the most valued members in the Gitcoin community recently . {top_earners}',
        'action_past_tense':
        'Transacted' if 'submitted' in key else 'bountied',
        'amount_max': amount_max,
        'podium_items': items[:3] if items else [],
        'technologies': technologies
    }

    return TemplateResponse(request, 'leaderboard.html', context)
Beispiel #6
0
def get_progress_and_last_active(target_nodes, **kwargs):
    # Prepare dictionaries to output the progress and last active, keyed by content_id
    output_progress_dict = {}
    output_last_active_dict = {}
    # Get a list of all the users that we are querying
    users = list(
        get_members_or_user(kwargs['collection_kind'],
                            kwargs['collection_id']))

    # Get a list of all content ids for all target nodes and their descendants
    content_ids = target_nodes.get_descendants(
        include_self=True).order_by().values_list("content_id", flat=True)
    # get all summary logs for the current user that correspond to the content nodes and descendant content nodes
    # Filter by users and the content ids
    progress_query = ContentSummaryLog.objects\
        .filter(user__in=users, content_id__in=content_ids)
    # Conditionally filter by last active time
    if kwargs.get('last_active_time'):
        progress_query = progress_query.filter(
            end_timestamp__gte=parse(kwargs.get('last_active_time')))
    # Get an annotated list of dicts of type:
    # {
    #   'content_id': <content_id>,
    #   'kind': <kind>,
    #   'total_progress': <sum of all progress for this content>,
    #   'log_count_total': <number of summary logs for this content>,
    #   'log_count_complete': <number of complete summary logs for this content>,
    #   'last_active': <most recent end_timestamp for this content>,
    # }
    progress_list = progress_query.values('content_id', 'kind').annotate(
        total_progress=Sum('progress'),
        log_count_total=Count('pk'),
        log_count_complete=Sum(
            Case(When(progress=1, then=1),
                 default=0,
                 output_field=IntegerField())),
        last_active=Max('end_timestamp'))
    # Evaluate query and make a loop dict of all progress
    progress_dict = {item.get('content_id'): item for item in progress_list}
    if isinstance(target_nodes, ContentNode):
        # Have been passed an individual model
        target_nodes = [target_nodes]
    # Loop through each node to add progress and last active information to the output dicts
    for target_node in target_nodes:
        # In the case of a topic, we need to look at the progress and last active from each of its descendants
        if target_node.kind == content_kinds.TOPIC:
            # Get all the content_ids and kinds of each leaf node as a tuple
            # (about half the size of the dict from 'values' method)
            # Remove topics in generator comprehension, rather than using .exclude as kind is not indexed
            # Use set to remove repeated content
            leaf_nodes = set(node for node in target_node.get_descendants(
                include_self=False).order_by().values_list(
                    'content_id', 'kind') if node[1] != content_kinds.TOPIC)
            # Get a unique set of all non-topic content kinds
            leaf_kinds = sorted(set(leaf_node[1] for leaf_node in leaf_nodes))
            # Create a list of progress summary dicts for each content kind
            progress = [{
                # For total progress sum across all the progress dicts for the descendant content leaf nodes
                'total_progress': reduce(
                    # Reduce with a function that just adds the total_progress of the passed in dict to the accumulator
                    sum_progress_dicts,
                    # Get all dicts of progress for every leaf_id that has some progress recorded
                    # and matches the kind we are aggregating over
                    (progress_dict.get(leaf_node[0]) for leaf_node in leaf_nodes\
                        if leaf_node[0] in progress_dict and leaf_node[1] == kind),
                    # Pass in an initial value of total_progress as zero to initialize the reduce
                    0.0,
                ),
                'kind': kind,
                # Count the number of leaf nodes of this particular kind
                'node_count': reduce(lambda x, y: x + int(y[1] == kind), leaf_nodes, 0)
            } for kind in leaf_kinds]
            # Set the output progress for this topic to this list of progress dicts
            output_progress_dict[target_node.content_id] = progress
            # Create a generator of last active times for the leaf_ids
            last_active_times = map(
                # Return the last active time for this leaf_node
                lambda leaf_node: progress_dict[leaf_node[0]]['last_active'],
                filter(
                    # Filter leaf_nodes to those that are in the progress_dict
                    lambda leaf_node: leaf_node[0] in progress_dict,
                    leaf_nodes))
            # Max does not handle empty iterables, so try this
            try:
                # If it is not empty, great!
                output_last_active_dict[target_node.content_id] = max(
                    last_active_times)
            except (ValueError, TypeError):
                # If it is empty, catch the value error and set the last active time to None
                # If they are all none, catch the TypeError and also set to None
                output_last_active_dict[target_node.content_id] = None
        else:
            if target_node.content_id in progress_dict:
                progress = progress_dict.pop(target_node.content_id)
                output_last_active_dict[target_node.content_id] = progress.pop(
                    'last_active')
                # return as array for consistency in api
                output_progress_dict[target_node.content_id] = [{
                    'total_progress':
                    progress['total_progress'],
                    'log_count_total':
                    progress['log_count_total'],
                    'log_count_complete':
                    progress['log_count_complete'],
                }]
            elif target_node.content_id not in output_progress_dict:
                # Not in the progress dict, but also not in our output, so supply default values
                output_last_active_dict[target_node.content_id] = None
                output_progress_dict[target_node.content_id] = [{
                    'total_progress':
                    0.0,
                    'log_count_total':
                    0,
                    'log_count_complete':
                    0,
                }]
    return output_progress_dict, output_last_active_dict
Beispiel #7
0
    def handle(self, *args, **options):
        #        User.objects.all().delete()
        #        Tag.objects.all().delete()
        #        Profile.objects.all().delete()
        #        Question.objects.all().delete()
        #        Answer.objects.all().delete()
        #        Like.objects.all().delete()

        try:
            u = User.objects.get(username='******')
        except:
            u = User.objects.create(username='******',
                                    first_name='test',
                                    email='*****@*****.**')
            u.set_password('test')
            u.save()
            p = Profile.objects.create(user_id=u.id, rating=20)

        item_list = []

        for i in range(0, int(options['users'])):
            u = User(username=randomword(9) + str(i),
                     first_name=randomword(3) + str(i),
                     email=randomword(10) + str(i) + '@aithelle.com')
            item_list.append(u)
            if i % 10000 == 0:
                User.objects.bulk_create(item_list)
                item_list = []

        User.objects.bulk_create(item_list)
        um = User.objects.aggregate(Min('id'), Max('id'))

        item_list = []

        for i in range(0, int(options['users'])):
            p = Profile(user_id=um['id__max'] - i,
                        rating=random.randint(0, 20))
            item_list.append(p)
            if i % 10000 == 0:
                Profile.objects.bulk_create(item_list)
                item_list = []
        Profile.objects.bulk_create(item_list)
        print 'Users created\n'

        item_list = []
        for i in range(0, int(options['tags'])):
            t = Tag(text=randomword(5))
            item_list.append(t)
            if i % 10000 == 0:
                Tag.objects.bulk_create(item_list)
                item_list = []
        Tag.objects.bulk_create(item_list)

        tm = Tag.objects.aggregate(Min('id'), Max('id'))

        print 'Tags created\n'

        for i in range(0, int(options['questions'])):
            q = Question(author_id=random.randint(um['id__min'],
                                                  um['id__max']),
                         title=randomword(20),
                         text=randomword(10) + ' ' + randomword(20),
                         rating=random.randint(-100, 100))
            q.save()
            q.tags.add(random.randint(tm['id__min'], tm['id__max']), \
                random.randint(tm['id__min'], tm['id__max']), \
                random.randint(tm['id__min'], tm['id__max']))

        qm = Question.objects.aggregate(Min('id'), Max('id'))

        print 'Questions created\n'

        item_list = []
        for i in range(0, int(options['answers'])):
            a = Answer(author_id=random.randint(um['id__min'], um['id__max']),
                       question_id=random.randint(qm['id__min'],
                                                  qm['id__max']),
                       is_right=random.randint(0, 1),
                       text=randomword(10) + ' ' + randomword(10),
                       rating=random.randint(-100, 100))
            item_list.append(a)
            if i % 10000 == 0:
                Answer.objects.bulk_create(item_list)
                item_list = []

        Answer.objects.bulk_create(item_list)

        am = Answer.objects.aggregate(Min('id'), Max('id'))

        print 'Answers created\n'

        item_list = []
        for i in range(0, int(options['likes'])):
            item_type = random.choice(['question', 'answer'])
            if item_type == 'question':
                item = random.randint(qm['id__min'], qm['id__max'])
            else:
                item = random.randint(am['id__min'], am['id__max'])
            l = Like(author_id=random.randint(um['id__min'], um['id__max']),
                     item_type=item_type,
                     item=item,
                     is_like=random.randint(0, 1))
            item_list.append(l)
            if i % 20000 == 0:
                Like.objects.bulk_create(item_list)
                item_list = []
        Like.objects.bulk_create(item_list)

        print 'Likes created\n'
Beispiel #8
0
def signoff_details(request, locale_code, app_code):
    """Details pane loaded on sign-off on a particular revision.

    Requires 'rev' in the query string, supports explicitly passing a 'run'.
    """
    try:
        # rev query arg is required, it's not a url param for caching, and because it's dynamic
        # in the js code, so the {% url %} tag prefers this
        rev = request.GET['rev']
    except:
        raise Http404
    try:
        # there might be a specified run parameter
        runid = int(request.GET['run'])
    except:
        runid = None
    appver = get_object_or_404(AppVersion, code=app_code)
    lang = get_object_or_404(Locale, code=locale_code)
    forest = appver.tree.l10n
    repo = get_object_or_404(Repository, locale=lang, forest=forest)

    run = lastrun = None
    good = False
    try:
        cs = repo.changesets.get(revision__startswith=rev)
    except Changeset.DoesNotExist:
        cs = None
    if cs is not None:
        runs = Run.objects.order_by('-pk').filter(tree=appver.tree_id,
                                                  locale=lang,
                                                  revisions=cs)
        if runid is not None:
            try:
                run = runs.get(id=runid)
            except Run.DoesNotExist:
                pass
        try:
            lastrun = runs[0]
        except IndexError:
            pass
        good = lastrun and (lastrun.errors == 0) and (lastrun.allmissing == 0)

        # check if we have a newer signoff.
        push = cs.pushes.get(repository=repo)
        sos = appver.signoffs.filter(locale=lang, push__gte=push)
        sos = list(sos.annotate(la=Max('action')))
        doubled = None
        newer = []
        if len(sos):
            s2a = dict((so.id, so.la) for so in sos)
            actions = Action.objects.filter(id__in=s2a.values())
            actions = dict(
                (a.signoff_id, a.get_flag_display()) for a in actions)
            for so in sos:
                if so.push_id == push.id:
                    doubled = True
                    good = False
                else:
                    flag = actions[so.id]
                    if flag not in newer:
                        newer.append(flag)
                        good = False
            newer = sorted(newer)

    return render_to_response('shipping/signoff-details.html', {
        'run': run,
        'good': good,
        'doubled': doubled,
        'newer': newer,
    })
def global_var(request):
    user = request.user

    # ---------------- number items in wishlist
    if user.is_authenticated:
        number_items_wish_list = WishList.objects.filter(
            user__user=user).count()
    else:
        number_items_wish_list = 0

    # ---------------- is supplier
    supplier = False
    store = None
    if user.is_authenticated:
        profil = user.profil
        if profil.is_professional and profil.is_supplier:
            if Shop.objects.filter(owner=profil).exists():
                store = get_object_or_404(Shop, owner=profil).id
                supplier = True

    # ---------------- is seller
    seller = False
    if user.is_authenticated:
        profil = user.profil
        if profil.is_seller:
            seller = True

    # ---------------- number items in compare
    if user.is_authenticated:
        number_items_compare = Compare.objects.filter(user__user=user).count()
    else:
        number_items_compare = 0

    # --------------- All Categories ---------------
    categories = CommerceCategory.objects.all()

    # --------------- Hot Categories ---------------
    hot_categories = list()
    featured_sale = Sale.objects.filter(date_end__gte=datetime.date.today()).values(
        'product__cat__category_two__category_one__category') \
                        .annotate(Max('percentage')).order_by('-percentage__max')[:6]
    for el in featured_sale:
        cat = CommerceCategory.objects.get(
            pk=el["product__cat__category_two__category_one__category"])
        hot_categories.append(cat)
        el["product__cat__category_two__category_one__category"] = cat
    # Complete 4 categories
    count = len(hot_categories)
    if count < 4:
        for i in range(0, CommerceCategory.objects.count()):
            category_to_add = CommerceCategory.objects.all()[i]
            if category_to_add not in hot_categories:
                hot_categories.append(category_to_add)
            if len(hot_categories) == 4:
                break
    hot_categories = hot_categories

    # --------------- All Tags ---------------
    tags = Tag.objects.all()

    # --------------- Cart ---------------
    my_cart_result = my_cart(user)
    number_products_in_cart = my_cart_result['number_products_in_cart']
    total_price_in_cart = my_cart_result['total_price_in_cart']
    cart_result = my_cart_result['cart']

    context = {
        # for base.html
        'categories': categories,
        'number_items_wish_list': number_items_wish_list,
        'number_items_compare': number_items_compare,
        'hot_categories': hot_categories[:4],
        'featured_sale': featured_sale,
        'tags': tags,
        'cart': cart_result,
        'total_price_in_cart': total_price_in_cart,
        'number_products_in_cart': number_products_in_cart,
        'is_supplier': supplier,
        'is_seller': seller,
        'store_id': store
    }

    return context
    def test_populate_line_item_daily_summary_table(self, mock_vacuum):
        """Test that the line item daily summary table populates."""
        self.tearDown()
        report_table_name = OCP_REPORT_TABLE_MAP['report']
        summary_table_name = OCP_REPORT_TABLE_MAP['line_item_daily_summary']

        report_table = getattr(self.accessor.report_schema, report_table_name)
        summary_table = getattr(self.accessor.report_schema,
                                summary_table_name)

        for _ in range(25):
            self.creator.create_ocp_usage_line_item(self.reporting_period,
                                                    self.report)

        report_entry = report_table.objects.all().aggregate(
            Min('interval_start'), Max('interval_start'))
        start_date = report_entry['interval_start__min']
        end_date = report_entry['interval_start__max']

        start_date = start_date.replace(hour=0,
                                        minute=0,
                                        second=0,
                                        microsecond=0)
        end_date = end_date.replace(hour=0, minute=0, second=0, microsecond=0)

        query = self.accessor._get_db_obj_query(summary_table_name)
        initial_count = query.count()

        self.accessor.populate_line_item_daily_table(start_date, end_date,
                                                     self.cluster_id)
        self.accessor.populate_line_item_daily_summary_table(
            start_date, end_date, self.cluster_id)

        self.assertNotEqual(query.count(), initial_count)

        summary_entry = summary_table.objects.all().aggregate(
            Min('usage_start'), Max('usage_start'))
        result_start_date = summary_entry['usage_start__min']
        result_end_date = summary_entry['usage_start__max']

        self.assertEqual(result_start_date, start_date)
        self.assertEqual(result_end_date, end_date)

        entry = query.first()

        summary_columns = [
            'cluster_id',
            'namespace',
            'node',
            'node_capacity_cpu_core_hours',
            'node_capacity_cpu_cores',
            'node_capacity_memory_gigabyte_hours',
            'node_capacity_memory_gigabytes',
            'pod',
            'pod_labels',
            'pod_limit_cpu_core_hours',
            'pod_limit_memory_gigabyte_hours',
            'pod_request_cpu_core_hours',
            'pod_request_memory_gigabyte_hours',
            'pod_usage_cpu_core_hours',
            'pod_usage_memory_gigabyte_hours',
            'usage_end',
            'usage_start',
        ]

        for column in summary_columns:
            self.assertIsNotNone(getattr(entry, column))
Beispiel #11
0
 def max_bid(self):
     return self.bid_set.all().aggregate(max_bid=Max('value'), buyer=Max('placer'))
Beispiel #12
0
 def get_week_top10_users(self):
     users = self.session_week.values("user") \
                  .annotate(total=Count("user")) \
                  .annotate(last=Max("date_start")).order_by("-total")[:10]
     return users
Beispiel #13
0
 def get_week_top10_assets(self):
     assets = self.session_week.values("asset")\
         .annotate(total=Count("asset"))\
         .annotate(last=Max("date_start")).order_by("-total")[:10]
     return assets
Beispiel #14
0
def calculate_search_results(kwargs, user):
    pootle_path = kwargs["pootle_path"]
    category = kwargs.get("category")
    checks = kwargs.get("checks")
    offset = kwargs.get("offset", 0)
    limit = kwargs.get("count", 9)
    modified_since = kwargs.get("modified-since")
    month = kwargs.get("month")
    search = kwargs.get("search")
    sfields = kwargs.get("sfields")
    soptions = kwargs.get("soptions", [])
    sort = kwargs.get("sort", None)
    vfolder = kwargs.get("vfolder", None)
    language_code, project_code, dir_path_, filename = (
        split_pootle_path(kwargs["pootle_path"]))
    uids = [
        int(x)
        for x
        in kwargs.get("uids", "").split(",")
        if x]
    unit_filter = kwargs.get("filter")

    if modified_since:
        modified_since = parse_datetime(modified_since)
    if month:
        month = get_date_interval(month)

    path_kwargs = {
        k: v
        for k, v
        in resolve(pootle_path).kwargs.items()
        if k in [
            "language_code",
            "project_code",
            "dir_path",
            "filename"]}
    qs = (
        Unit.objects.get_translatable(user=user, **path_kwargs)
                    .order_by("store", "index"))
    if vfolder is not None:
        qs = qs.filter(store__vfolders=vfolder)
    # if "filter" is present in request vars...
    if unit_filter:
        # filter the results accordingly
        qs = UnitSearchFilter().filter(
            qs,
            unit_filter,
            user=user,
            checks=checks,
            category=get_category_id(category))
        # filter by modified
        if modified_since:
            qs = qs.filter(submitted_on__gt=modified_since).distinct()
        if month is not None:
            qs = qs.filter(
                submitted_on__gte=month[0],
                submitted_on__lte=month[1]).distinct()
        # sort results
        if unit_filter in ["my-suggestions", "user-suggestions"]:
            sort_on = "suggestions"
        elif unit_filter in ["my-submissions", "user-submissions"]:
            sort_on = "submissions"
        else:
            sort_on = "units"
        sort_by = ALLOWED_SORTS[sort_on].get(sort, None)
        if sort_by is not None:
            # filtered sort
            if sort_on in SIMPLY_SORTED:
                qs = qs.order_by(sort_by, "store__pootle_path", "index")
            else:
                max_field, sort_order = get_max_and_order_fields(sort_by)
                qs = (
                    qs.annotate(sort_by_field=Max(max_field))
                      .order_by(sort_order, "store__pootle_path", "index"))
    # text search
    if search and sfields:
        qs = UnitTextSearch(qs).search(
            search,
            [sfields],
            "exact" in soptions)

    find_unit = (
        not offset
        and language_code
        and project_code
        and filename
        and uids)
    start = offset
    total = qs.count()
    if find_unit:
            # find the uid in the Store
        uid_list = list(qs.values_list("pk", flat=True))
        unit_index = uid_list.index(uids[0])
        start = int(unit_index / (2 * limit)) * (2 * limit)
    end = min(start + (2 * limit), total)

    unit_groups = []
    units_by_path = groupby(
        qs.values(*GroupedResults.select_fields)[start:end],
        lambda x: x["store__pootle_path"])
    for pootle_path, units in units_by_path:
        unit_groups.append(
            {pootle_path: StoreResults(units).data})

    total = qs.count()
    return total, start, min(end, total), unit_groups
Beispiel #15
0
 def get_async_indicator_time():
     return AsyncIndicator.objects.exclude(date_queued__isnull=True)\
         .aggregate(Max('date_created'))['date_created__max'] or datetime.now()
Beispiel #16
0
def exam_records_ordered(exam):
	""" Custome Template Tag takes exam object and returns ordered queryset according to student degrees """
	records=ExamRecord.objects.filter(exam=exam).annotate(top_stud=Max('student_degree')).order_by('-top_stud')
	return records
Beispiel #17
0
def score_range():
    return '{} - {}'.format(
        models.tRNA.objects.aggregate(Min('score'))['score__min'],
        models.tRNA.objects.aggregate(Max('score'))['score__max'])
Beispiel #18
0
def query_pvi_info_h5(pvi_name, pvi_info=PVIQueryInfo.Energy_Today):
    '''
    all pv inverter type should implement this function for all PVIQueryInfo
    '''
    logger.debug('query_pvi_info({pvi_name},{pvi_info})'.format(
        pvi_name=pvi_name, pvi_info=pvi_info))
    time_since = (datetime.now() + timedelta(minutes=-30)).time()
    #time_since = datetime.combine(datetime.now().date(),time.min)
    time_until = datetime.now().time()
    logger.debug('query time range %s and %s' %
                 (str(time_since), str(time_until)))

    if pvi_info == PVIQueryInfo.Energy_Today:
        queryset = RegData.objects.filter(
            address=h5.INPUT_REGISTER['Today Wh'][
                h5.REGISTER_ADDRESS_COL]).filter(
                    pvi_name=pvi_name).values('prob_date').annotate(
                        Max('value')).order_by('prob_date')
        total = len(queryset)
        if (total > 0):
            t_date = queryset[total - 1]['prob_date']
            if t_date == datetime.now().date():
                value = queryset[total - 1].get('value__max')
                logger.debug('return %d' % (value * 10))
                return (value * 10)
        else:
            logger.error('empty query result returned')
            return 0
    elif pvi_info == PVIQueryInfo.Energy_This_Month:
        last_month_end_date = date(datetime.now().year,
                                   datetime.now().month,
                                   1) + timedelta(days=-1)
        queryset = RegData.objects.filter(
            address=h5.INPUT_REGISTER['Today Wh'][h5.REGISTER_ADDRESS_COL]
        ).filter(pvi_name=pvi_name).filter(
            prob_date__gt=last_month_end_date).values('prob_date').annotate(
                Max('value')).order_by('prob_date')
        value = 0
        if len(queryset) > 0:
            for entry in queryset:
                value += entry.get('value__max')
            logger.debug('return %d' % (value * 10))
            return (value * 10)
        else:
            logger.error('empty query result returned')
            return 0
    elif pvi_info == PVIQueryInfo.Energy_Until_Now:
        queryset = RegData.objects.filter(
            address=h5.INPUT_REGISTER['DC Life Wh'][h5.REGISTER_ADDRESS_COL]
        ).filter(pvi_name=pvi_name).order_by('-date')
        if len(queryset) > 0:
            value = queryset[0].value * 10
            logger.debug('return %d' % (value))
            return (value)
        else:
            logger.error('empty query result returned')
            return 0
    elif pvi_info == PVIQueryInfo.Energy_Hourly_List:
        return pvi_query_info_energy_hourly_list()

    elif pvi_info == PVIQueryInfo.Energy_Daily_List:
        return pvi_query_info_energy_daily_list()

    elif pvi_info == PVIQueryInfo.AC_Output_Voltage:
        queryset = RegData.objects.filter(
            address=h5.INPUT_REGISTER['Voltage'][h5.REGISTER_ADDRESS_COL]
        ).filter(pvi_name=pvi_name).filter(
            prob_date__exact=datetime.now().date()).filter(
                prob_time__range=[time_since, time_until]).order_by('-date')
        if len(queryset) > 0:
            value = round(queryset[0].value * 0.1, 1)
            logger.debug('return %d' % (value))
            return (value)
        else:
            logger.error('empty query result returned')
    elif pvi_info == PVIQueryInfo.AC_Output_Current:
        queryset = RegData.objects.filter(
            address=h5.INPUT_REGISTER['Current'][h5.REGISTER_ADDRESS_COL]
        ).filter(pvi_name=pvi_name).filter(
            prob_date__exact=datetime.now().date()).filter(
                prob_time__range=[time_since, time_until]).order_by('-date')
        if len(queryset) > 0:
            value = round(queryset[0].value * 0.01, 2)
            logger.debug('return %d' % (value))
            return (value)
        else:
            logger.error('empty query result returned')
    elif pvi_info == PVIQueryInfo.AC_Output_Wattage:
        queryset = RegData.objects.filter(
            address=h5.INPUT_REGISTER['Wattage'][h5.REGISTER_ADDRESS_COL]
        ).filter(pvi_name=pvi_name).filter(
            prob_date__exact=datetime.now().date()).filter(
                prob_time__range=[time_since, time_until]).order_by('-date')
        if len(queryset) > 0:
            value = queryset[0].value
            logger.debug('return %d' % (value))
            return (value)
        else:
            logger.error('empty query result returned')
    else:
        logger.error('unknow query pvi_info %s' % pvi_info)
Beispiel #19
0
def item(request, e_id):
    try:
        product = Auction.objects.filter(state_id=1).get(id=e_id)
    except Auction.DoesNotExist:
        return HttpResponse(status=404)

    current_bid=''
    message = ''
    if request.user.is_authenticated():

        #Ban an auction
        if request.user.is_superuser:
            if request.POST.get('ban'):
                product.state_id=2
                product.save()
                bidders_email = Bidder.objects.filter(item=e_id)
                if bidders_email is not None:
                    for bidders in bidders_email:
                        email = bidders.bidder_name.email
                        send_mail('Banned','This auction has been banned!', request.user.email, [email], fail_silently=True)
                    message= 'Banned successfully'
                    return render_to_response("index.html", {'message': message}, context_instance=RequestContext(request))
        #End ban auction

        # Get the maximum bidding price if exists
        b_price = Bidder.objects.filter(item=e_id).aggregate(Max('price'))
        current_bid = b_price['price__max']
        #if current winning price does not exist compare with the starting price
        if b_price['price__max'] is None:
            if product.owner.id == request.user.id:
                message = 'you cannot bid on your item!'
            else:
                if request.GET:
                    price_val = Decimal(request.GET['price'])
                    #Check first if the bid price is greater than initial value
                    if price_val > product.price_min:
                        bid = Bidder(price=price_val, bidder_name_id=request.user.id, item_id=e_id)
                        bid.save()
                        #send email to the auction owner
                        send_mail('Auction created!','You have auctioned this product!','*****@*****.**' , [request.user.email], fail_silently=True)
                        message = 'Maximum bid is ' + str(price_val)
                    else:
                        message = _('You should bid at least 0.01€ more than the initial value')
        else:
            current_bid = Bidder.objects.filter(item=e_id).get(price=Decimal(b_price['price__max']))

            if product.owner.id == request.user.id:
                message = 'You cannot bid your own item'
            else:
                if request.GET:
                    price_val = Decimal(request.GET['price'])
                    if Decimal(b_price['price__max']) < price_val:
                        #Check if user is bidding while he is winning
                        if current_bid.bidder_name.id == request.user.id:
                            message = _('You already bid, please wait until others bid')
                        else:
                            bid = Bidder(price=price_val, bidder_name_id=request.user.id, item_id=e_id)
                            bid.save()
                            send_mail('Someone has bid on the item you auctioned!','Someone has raised the bid!','*****@*****.**' , [current_bid.bidder_name.email], fail_silently=True)
                            send_mail('Bidding has been made!','This bid has been created!','*****@*****.**' , [request.user.email], fail_silently=True)
                            message = 'Maximum bid is ' + str(price_val)
                        return render_to_response("item.html",
                                                  {'item': product, 'user': request.user,'current': current_bid, 'message': message},
                                                  context_instance=RequestContext(request))

    return render_to_response("item.html",
                              {'item': product, 'user': request.user, 'current': current_bid, 'message': message},
                              context_instance=RequestContext(request))
Beispiel #20
0
def pvi_query_info_energy_hourly_list():
    '''
    provide function for query_pvi_info on PVIQueryInfo.Energy_Hourly_List
    '''
    date_since = (
        datetime.now() +
        timedelta(hours=-pvi.MAX_QUERY_ENERGY_HOURLY_LIST_LEN)).date()
    queryset = RegData.objects.filter(
        address=h5.INPUT_REGISTER['Today Wh'][h5.REGISTER_ADDRESS_COL]).filter(
            prob_date__gt=date_since).values(
                'prob_date', 'prob_hour').annotate(Max('value')).order_by(
                    '-prob_date', '-prob_hour')
    logger.debug('sql cmd: %s' % str(queryset.query))
    info = []
    logger.debug('queryset count %d' % queryset.count())
    max_report_len = pvi.MAX_QUERY_ENERGY_HOURLY_LIST_LEN + 1  # last 48 hours
    if queryset.count() < max_report_len:
        max_report_len = queryset.count()
    for entry in queryset[:max_report_len]:
        #logger.debug(entry['prob_date'])
        #logger.debug(entry['prob_hour'])
        t_hour = entry['prob_hour']
        t_time = time(t_hour, 0, 0)
        #logger.debug(str(t_time))
        info.append([
            datetime.combine(entry['prob_date'], t_time), entry['value__max']
        ])
    logger.debug('query return:\n%s' % str(info))
    info.sort(key=lambda x: x[0])

    if len(info) > 0:
        info = [[entry[0], entry[1] * 10] for entry in info]
    else:
        logger.warning('no energy sample data in database')
        this_hour_time = datetime.combine(datetime.now().date(),
                                          time(datetime.now().hour, 0, 0))
        for i in range(pvi.MAX_QUERY_ENERGY_HOURLY_LIST_LEN):
            info.append([this_hour_time, 0])
            this_hour_time -= timedelta(hours=1)

    info.reverse()
    dataset = info
    info = [[dataset[i][0], dataset[i][1] - dataset[i + 1][1]]
            for i in range(len(dataset) - 2)
            if dataset[i][0].date() == dataset[i + 1][0].date()]
    info.reverse()

    #-> insert zero energy value for missing hour
    dataset = []
    if len(info) > 0:
        dataset.append(info[0])
        t_date = info[0][0]
        i = 1
        while i < len(info):
            t_date = t_date + timedelta(hours=+1)
            if t_date < info[i][0]:
                dataset.append([t_date, 0])
            else:
                dataset.append(info[i])
                i += 1
        dataset.sort(key=lambda x: x[0])

    return dataset
Beispiel #21
0
 def _longest_query_by_view(self, filters):
     values_list = models.Request.objects.filter(*filters).values_list("view_name").annotate(max=Max('time_taken')).order_by('-max')[:5]
     requests = []
     for view_name, _ in values_list:
         request = models.Request.objects.filter(view_name=view_name, *filters).order_by('-time_taken')[0]
         requests.append(request)
     return requests
Beispiel #22
0
 # author_obj.book.add(11, 12)
 # books = Book.objects.filter(id__gt=10)
 # author_obj.book.add(*books)
 # 给Author_obj设置新的books,所有ID大于10的书籍,原有的数据会删除.
 # books = Book.objects.filter(id__gt=10)
 # author_obj.book.set(books)
 # 给Author_obj 删除指定书籍.
 # book_del = Book.objects.get(id=11)
 # author_obj.book.remove(book_del)
 # 清空author_obj关联的所有书籍. 注意数据库中表字段是否能为空.
 # author_obj.book.clear()
 # 聚合函数.
 from django.db.models import Avg, Sum, Max, Min, Count
 price_avg = Book.objects.all().aggregate(Avg("price"))
 price_sum = Book.objects.all().aggregate(Sum("price"))
 price_max = Book.objects.all().aggregate(Max("price"))
 # price_min指定返回数据的KEY.
 price_min = Book.objects.all().aggregate(price_min=Min("price"))
 price_all = Book.objects.all().aggregate(price_min=Min("price"),
                                          price_max=Max("price"),
                                          price_avg=Avg("price"))
 # 分组. 查询所有书籍关联的作者数量,Count后面直接跟表名.
 books_obj = Book.objects.all().annotate(author_num=Count("author"))
 for item in books_obj:
     print(item.name, item.author_num)
 # 所有书籍作者大于1的数据.
 books_obj = Book.objects.all().annotate(author_num=Count("author")).filter(
     author_num__gt=1)
 # F 和 Q 查询.
 from django.db.models import F, Q
 # F 查询,查询所有修改时间大于创建时间的数据.
Beispiel #23
0
def home(request):
    engine = create_engine(
        "mssql+pyodbc://sa:[email protected]:1433/vpcanales?driver=SQL+Server+Native+Client+11.0"
    )
    today = date.today()

    # Resumen de Actividad
    resumenActividad = """select * from index_actividad"""

    ra = pd.read_sql_query(resumenActividad, engine)
    ra.altas = ra.altas.astype(int)
    ra.activaciones = ra.activaciones.astype(int)

    ultimate_date = Activacion.objects.values_list('fecha_actividad',flat=True)\
    .latest('fecha_actividad')

    act = Activacion.objects.filter(
        fecha_actividad__year=ultimate_date.year,
        fecha_actividad__month=ultimate_date.month,
    ).values('cantidad', 'fecha_actividad').aggregate(Sum('cantidad'),
                                                      Max('fecha_actividad'))

    # ca2=ca.get('fecha_actividad__max')

    alt = Alta.objects.filter(
        fecha_actividad__year=ultimate_date.year,
        fecha_actividad__month=ultimate_date.month,
    ).values('cantidad', 'fecha_actividad').aggregate(Sum('cantidad'),
                                                      Max('fecha_actividad'))

    conv = alt.get('cantidad__sum') / act.get('cantidad__sum') * 100


    ultimate_date_venta = Venta.objects.values('tiempo__fecha').exclude(monto__exact=0)\
                                       .order_by('-tiempo__fecha')[0]['tiempo__fecha']

    re=Venta.objects.filter(tiempo__fecha__year=ultimate_date_venta.year,
                            tiempo__fecha__month=ultimate_date_venta.month)\
                            .aggregate(Sum('monto'))

    # Cantidad de activaciones del mes pasado:
    resumenBackoffice = """select * from index_gestionr"""

    rb = pd.read_sql_query(resumenBackoffice, engine)
    rb.escaladas.astype(int)
    rb.resueltas.astype(int)

    es = rb.loc[rb.tail(1).index.item(), 'escaladas']

    aaasi = '''select * from index_arcgis'''
    puntos = pd.read_sql_query(aaasi, engine)

    #MES ABIERTO
    resumenActividad_diaria = """
     SELECT ACTIVACIONES_ABIERTAS.fecha, ACTIVACIONES_ABIERTAS.activaciones , ALTAS_ABIERTAS.altas  FROM
         (SELECT sum(a.CANTIDAD) ACTIVACIONES , CONCAT(DATENAME(month, a.fecha_actividad),' ', DATENAME(YEAR, a.fecha_actividad)) Fecha
          FROM reporteria_activacion a
          WHERE MONTH(FECHA_ACTIVIDAD) IN (
             SELECT month(max(FECHA_ACTIVIDAD)) FROM reporteria_activacion)
          AND YEAR(FECHA_ACTIVIDAD) IN (
             SELECT year(max(FECHA_ACTIVIDAD)) FROM reporteria_activacion)
          GROUP BY CONCAT(DATENAME(month, a.fecha_actividad),' ', DATENAME(YEAR, a.fecha_actividad))
      ) ACTIVACIONES_ABIERTAS,

         (SELECT sum(a.CANTIDAD) ALTAS
          FROM reporteria_alta a
          WHERE MONTH(FECHA_ACTIVIDAD) IN (
             SELECT month(max(FECHA_ACTIVIDAD)) FROM reporteria_alta)
          AND YEAR(FECHA_ACTIVIDAD) IN (
             SELECT year(max(FECHA_ACTIVIDAD)) FROM reporteria_alta)
      ) ALTAS_ABIERTAS
    """

    ma = pd.read_sql_query(resumenActividad_diaria, engine)
    ma.activaciones = ma.activaciones.astype(int)
    ma.altas = ma.altas.astype(int)
    fecha = ma.tail(1).iloc[0, 0]
    ma_act = ma.tail(1).iloc[0, 1]
    ma_alt = ma.tail(1).iloc[0, 2]

    return render(
        request,
        "home.html",
        {
            'ra': ra,
            # 'ca2':ca2,
            'act': act.get('cantidad__sum'),
            'act_fecha': act.get('fecha_actividad__max'),
            'alt_fecha': alt.get('fecha_actividad__max'),
            'alt': alt.get('cantidad__sum'),
            'es': '{0:,d}'.format(es).replace(',', '.'),
            're': re.get('monto__sum'),
            're_fecha': ultimate_date_venta,
            'conv': conv,
            'rb': rb,
            'puntos': puntos,
            'ma': ma
        })
Beispiel #24
0
 def __init__(self, provider, report_type):
     """Constructor."""
     self._mapping = [
         {
             'provider':
             Provider.PROVIDER_AZURE,
             'alias':
             'subscription_guid',  # FIXME: probably wrong
             'annotations': {},
             'end_date':
             'costentrybill__billing_period_end',
             'filters': {
                 'subscription_guid': [
                     {
                         'field': 'subscription_guid',
                         'operation': 'icontains',
                         'composition_key': 'account_filter'
                     },
                 ],
                 'service_name': {
                     'field': 'service_name',
                     'operation': 'icontains'
                 },
                 'resource_location': {
                     'field': 'resource_location',
                     'operation': 'icontains'
                 },
                 'instance_type': {
                     'field': 'instance_type',
                     'operation': 'icontains'
                 }
             },
             'group_by_options': [
                 'service_name', 'subscription_guid', 'resource_location',
                 'instance_type'
             ],
             'tag_column':
             'tags',
             'report_type': {
                 'costs': {
                     'aggregates': {
                         'cost':
                         Sum(
                             Coalesce(F('pretax_cost'),
                                      Value(0,
                                            output_field=DecimalField())) +
                             Coalesce(F('markup_cost'),
                                      Value(0, output_field=DecimalField()))
                         ),
                         'infrastructure_cost':
                         Sum('pretax_cost'),
                         'derived_cost':
                         Sum(Value(0, output_field=DecimalField())),
                         'markup_cost':
                         Sum(
                             Coalesce(F('markup_cost'),
                                      Value(0,
                                            output_field=DecimalField()))),
                     },
                     'aggregate_key':
                     'pretax_cost',
                     'annotations': {
                         'cost':
                         Sum(
                             Coalesce(F('pretax_cost'),
                                      Value(0,
                                            output_field=DecimalField())) +
                             Coalesce(F('markup_cost'),
                                      Value(0, output_field=DecimalField()))
                         ),
                         'infrastructure_cost':
                         Sum('pretax_cost'),
                         'derived_cost':
                         Value(0, output_field=DecimalField()),
                         'markup_cost':
                         Sum(
                             Coalesce(F('markup_cost'),
                                      Value(0,
                                            output_field=DecimalField()))),
                         'cost_units':
                         Coalesce(Max('currency'), Value('USD'))
                     },
                     'delta_key': {
                         'cost':
                         Sum(
                             Coalesce(F('pretax_cost'),
                                      Value(0,
                                            output_field=DecimalField())) +
                             Coalesce(F('markup_cost'),
                                      Value(0, output_field=DecimalField()))
                         )
                     },
                     'filter': [{}],
                     'cost_units_key':
                     'currency',
                     'cost_units_fallback':
                     'USD',
                     'sum_columns': [
                         'cost', 'infrastructure_cost', 'derived_cost',
                         'markup_cost'
                     ],
                     'default_ordering': {
                         'cost': 'desc'
                     },
                 },
                 'instance_type': {
                     'aggregates': {
                         'cost':
                         Sum(
                             Coalesce(F('pretax_cost'),
                                      Value(0,
                                            output_field=DecimalField())) +
                             Coalesce(F('markup_cost'),
                                      Value(0, output_field=DecimalField()))
                         ),
                         'infrastructure_cost':
                         Sum('pretax_cost'),
                         'derived_cost':
                         Sum(Value(0, output_field=DecimalField())),
                         'markup_cost':
                         Sum(
                             Coalesce(F('markup_cost'),
                                      Value(0,
                                            output_field=DecimalField()))),
                         'count':
                         Sum(Value(0, output_field=DecimalField())),
                         'usage':
                         Sum('usage_quantity'),
                     },
                     'aggregate_key':
                     'usage_quantity',
                     'annotations': {
                         'cost':
                         Sum(
                             Coalesce(F('pretax_cost'),
                                      Value(0,
                                            output_field=DecimalField())) +
                             Coalesce(F('markup_cost'),
                                      Value(0, output_field=DecimalField()))
                         ),
                         'infrastructure_cost':
                         Sum('pretax_cost'),
                         'derived_cost':
                         Value(0, output_field=DecimalField()),
                         'markup_cost':
                         Sum(
                             Coalesce(F('markup_cost'),
                                      Value(0,
                                            output_field=DecimalField()))),
                         'cost_units':
                         Coalesce(Max('currency'), Value('USD')),
                         'count':
                         Max('instance_count'),
                         'count_units':
                         Value('instance_types', output_field=CharField()),
                         'usage':
                         Sum('usage_quantity'),
                         # FIXME: Waiting on MSFT for usage_units default
                         'usage_units':
                         Coalesce(Max('unit_of_measure'),
                                  Value('Instance Type Placeholder'))
                     },
                     'delta_key': {
                         'usage': Sum('usage_quantity')
                     },
                     'filter': [{
                         'field': 'instance_type',
                         'operation': 'isnull',
                         'parameter': False
                     }],
                     'group_by': ['instance_type'],
                     'cost_units_key':
                     'currency',
                     'cost_units_fallback':
                     'USD',
                     'usage_units_key':
                     'unit_of_measure',
                     'usage_units_fallback':
                     'Instance Type Placeholder',  # FIXME: Waiting on MSFT
                     'count_units_fallback':
                     'instances',
                     'sum_columns': [
                         'usage', 'cost', 'infrastructure_cost',
                         'derived_cost', 'markup_cost', 'count'
                     ],
                     'default_ordering': {
                         'usage': 'desc'
                     },
                 },
                 'storage': {
                     'aggregates': {
                         'cost':
                         Sum(
                             Coalesce(F('pretax_cost'),
                                      Value(0,
                                            output_field=DecimalField())) +
                             Coalesce(F('markup_cost'),
                                      Value(0, output_field=DecimalField()))
                         ),
                         'usage':
                         Sum('usage_quantity'),
                         'infrastructure_cost':
                         Sum('pretax_cost'),
                         'markup_cost':
                         Sum(
                             Coalesce(F('markup_cost'),
                                      Value(0,
                                            output_field=DecimalField()))),
                         'derived_cost':
                         Sum(Value(0, output_field=DecimalField())),
                     },
                     'aggregate_key':
                     'usage_quantity',
                     'annotations': {
                         'cost':
                         Sum(
                             Coalesce(F('pretax_cost'),
                                      Value(0,
                                            output_field=DecimalField())) +
                             Coalesce(F('markup_cost'),
                                      Value(0, output_field=DecimalField()))
                         ),
                         'infrastructure_cost':
                         Sum('pretax_cost'),
                         'derived_cost':
                         Value(0, output_field=DecimalField()),
                         'markup_cost':
                         Sum(
                             Coalesce(F('markup_cost'),
                                      Value(0,
                                            output_field=DecimalField()))),
                         'cost_units':
                         Coalesce(Max('currency'), Value('USD')),
                         'usage':
                         Sum('usage_quantity'),
                         # FIXME: Waiting on MSFT for usage_units default
                         'usage_units':
                         Coalesce(Max('unit_of_measure'),
                                  Value('Storage Type Placeholder'))
                     },
                     'delta_key': {
                         'usage': Sum('usage_quantity')
                     },
                     'filter': [{
                         'field': 'service_name',
                         'operation': 'contains',
                         'parameter': 'Storage'
                     }],
                     'cost_units_key':
                     'currency',
                     'cost_units_fallback':
                     'USD',
                     'usage_units_key':
                     'unit_of_measure',
                     'usage_units_fallback':
                     'Storage Type Placeholder',  # FIXME
                     'sum_columns': [
                         'usage', 'cost', 'infrastructure_cost',
                         'derived_cost', 'markup_cost'
                     ],
                     'default_ordering': {
                         'usage': 'desc'
                     },
                 },
                 'tags': {
                     'default_ordering': {
                         'cost': 'desc'
                     },
                 },
             },
             'start_date':
             'costentrybill__billing_period_start',
             'tables': {
                 'query': AzureCostEntryLineItemDailySummary,
             },
         },
     ]
     super().__init__(provider, report_type)
Beispiel #25
0
 def max_col(self):
     from django.db.models import Max
     return self.get_cells().aggregate(Max('col'))['col__max']
Beispiel #26
0
def this_year():
    return Project.objects.all().aggregate(
        Max('proj_years'))['proj_years__max']
Beispiel #27
0
   def test_invalid_flight_page(self):
      max_id = Flight.objects.all().aggregate(Max('id'))['id__max']

      c = Client()
      response = c.get(f'/{max_id + 1}')
      self.assertEqual(response.status_code, 404)
Beispiel #28
0
class EntityViewSet(CollectionViewSet):
    """API view for entities."""

    filter_class = EntityFilter
    serializer_class = EntitySerializer

    queryset = Entity.objects.prefetch_related(
        'data', 'descriptor_schema', 'contributor').annotate(
            latest_date=Max('data__modified')).order_by('-latest_date')

    def _check_collection_permissions(self, collection_id, user):
        """Check that collection exists and user has `add` permission."""
        collection_query = Collection.objects.filter(pk=collection_id)
        if not collection_query.exists():
            raise exceptions.ValidationError('Collection id does not exist')

        collection = collection_query.first()
        if not user.has_perm('add_collection', obj=collection):
            if user.is_authenticated():
                raise exceptions.PermissionDenied()
            else:
                raise exceptions.NotFound()

    @detail_route(methods=[u'post'])
    def add_to_collection(self, request, pk=None):
        """Add Entity to a collection."""
        entity = self.get_object()

        if 'ids' not in request.data:
            return Response({"error": "`ids` parameter is required"},
                            status=status.HTTP_400_BAD_REQUEST)

        for collection_id in request.data['ids']:
            self._check_collection_permissions(collection_id, request.user)

        for collection_id in request.data['ids']:
            entity.collections.add(collection_id)

            collection = Collection.objects.get(pk=collection_id)
            for data in entity.data.all():
                collection.data.add(data)

        return Response()

    @detail_route(methods=[u'post'])
    def remove_from_collection(self, request, pk=None):
        """Remove Entity from a collection."""
        entity = self.get_object()

        if 'ids' not in request.data:
            return Response({"error": "`ids` parameter is required"},
                            status=status.HTTP_400_BAD_REQUEST)

        for collection_id in request.data['ids']:
            self._check_collection_permissions(collection_id, request.user)

        for collection_id in request.data['ids']:
            entity.collections.remove(collection_id)

            collection = Collection.objects.get(pk=collection_id)
            for data in entity.data.all():
                collection.data.remove(data)

        return Response()

    @detail_route(methods=[u'post'])
    def add_data(self, request, pk=None):
        """Add data to Entity and it's collection."""
        # add data to entity
        resp = super(EntityViewSet, self).add_data(request, pk)

        # add data to collections in which entity is
        entity = self.get_object()
        for collection in entity.collections.all():
            collection.data.add(*request.data['ids'])

        return resp

    @detail_route(methods=[u'post'])
    def remove_data(self, request, pk=None):
        """Remove Data from Entity and delete it if it is empty."""
        resp = super(EntityViewSet, self).remove_data(request, pk)

        entity = self.get_object()
        if entity.data.count() == 0:
            entity.delete()

        return resp
Beispiel #29
0
 def get_total_distance(self):
     return Trace_point.objects.filter(trace=self).aggregate(
         Max("distance"))["distance__max"]
Beispiel #30
0
def postr(request, r_id, num):
    verification_user_permission_for_write(request)
    s = Settings.objects.filter(field='plantable')[0].value
    table = Plantable.objects.get(pk=int(s))

    if (num > 0) and (request.user.has_perm('plan.view_plan')):
        plans = Plan.objects.filter(plantable_id=table, r_id=r_id)
        count = len(plans)
        if count == 0:
            return render(request, 'plan/post_empty.html')
        if num >= count:
            num = count
        plan = plans[num - 1]
        i_id = plan.id
        form = PlanForm(instance=plan)
        if request.method == "POST":
            form.save()
            context = {
                'num': num,
                'count': count,
                'form': form,
                'r_id': r_id,
                'i_id': i_id
            }
            return render(request, '', context)

        else:
            context = {
                'num': num,
                'count': count,
                'form': form,
                'r_id': r_id,
                'i_id': i_id
            }
            return render(request, 'plan/post.html', context)
    else:
        if request.user.has_perm('plan.add_plan'):
            plan = Plan()
            plan.r_id = Rubric.objects.get(pk=int(r_id))
            plan.content = ''
            plan.responsible = ''
            plan.termin = ''
            plan.generalization = ''
            plan.note = ''
            plan.direction_id = Direction.objects.filter(name='')[0]
            plan.purpose_id = Purpose.objects.filter(name='')[0]
            max_sort_fild = Plan.objects.filter(r_id=r_id).aggregate(
                Max('sort'))
            srt = max_sort_fild['sort__max']
            plan.sort = srt + 1
            plan.plantable_id = Plantable.objects.get(pk=12)
            plan.save()
            form = PlanForm(instance=plan)
            i_id = plan.id
            plans = Plan.objects.filter(r_id=r_id, plantable_id=table)
            count = len(plans)
            num = count
            context = {
                'num': num,
                'count': count,
                'form': form,
                'r_id': r_id,
                'i_id': i_id
            }
            return render(request, 'plan/post.html', context)