Esempio n. 1
0
    def test_run_with_sync(self):
        """
        Test running `pdb_wipe` and sync data from
        test.peeringdb.com
        """

        dates = {}

        for reftag, cls in REFTAG_MAP.items():
            assert cls.objects.all().count() > 1
            dates[reftag] = cls.objects.all().first().created.replace(
                tzinfo=UTC())

        settings.TUTORIAL_MODE = True
        call_command(
            "pdb_wipe",
            commit=True,
            load_data=True,
            load_data_url="https://test.peeringdb.com/api",
        )
        settings.TUTORIAL_MODE = False

        for reftag, cls in REFTAG_MAP.items():
            created = cls.objects.all().first().created.replace(tzinfo=UTC())
            assert created != dates[reftag]
            assert cls.objects.all().count() > 1
Esempio n. 2
0
def test_org_protection_sponsor(db):
    """
    test that organization cannot be deleted if it has
    an active sponsorship going
    """

    now = datetime.datetime.now().replace(tzinfo=UTC())

    org = Organization.objects.create(status="ok", name="SponsorOrg")
    sponsor = Sponsorship.objects.create(
        start_date=now - datetime.timedelta(days=1),
        end_date=now + datetime.timedelta(days=1))
    sponsor.orgs.add(org)

    assert org.sponsorship.active

    assert org.deletable == False
    assert "Organization is currently an active sponsor" in org.not_deletable_reason

    with pytest.raises(ProtectedAction):
        org.delete()

    sponsor.delete()

    org.delete()
Esempio n. 3
0
def setup_data():

    call_command("pdb_generate_test_data", limit=3, commit=True)

    date_past = DATE_PAST.replace(tzinfo=UTC())

    # one object of each type moved to the past

    for tag in ["fac", "net", "org", "ix"]:
        for obj in REFTAG_MAP[tag].objects.all():
            obj.created = obj.updated = date_past
            obj.save()
            break

    # create users

    User = get_user_model()

    for i in range(1, 7):
        User.objects.create_user(
            "user_{}".format(i), "user_{}@localhost".format(i), "secret"
        )

    # move users 4, 5, 6 to the past

    User.objects.filter(pk__in=[4, 5, 6]).update(created=date_past)

    # verify all users except 1 and 4

    for user in User.objects.exclude(pk__in=[1, 4]):
        user.set_verified()
Esempio n. 4
0
    def handle(self, *args, **options):
        date = options.get('date', None)
        if date:
            dt = datetime.datetime.strptime(date, "%Y%m%d")
        else:
            dt = datetime.datetime.now()

        dt = dt.replace(hour=23, minute=23, second=59, tzinfo=UTC())

        print("{}".format(dt.replace(tzinfo=None).strftime("%Y-%m-%d")))
        print("-------------")

        stats = {"users": 0}

        for tag in self.tags:
            model = REFTAG_MAP[tag]
            stats[tag] = 0
            for obj in model.objects.filter(created__lte=dt):
                if self.status_at_date(obj, dt) == "ok":
                    stats[tag] += 1

            print "{}: {}".format(tag, stats[tag])

        for user in get_user_model().objects.filter(created__lte=dt):
            if user.is_verified:
                stats["users"] += 1

        print "users: {}".format(stats["users"])
Esempio n. 5
0
    def handle(self, *args, **options):
        date = options.get('date', None)
        if date:
            dt = datetime.datetime.strptime(date, "%Y%m%d")
        else:
            dt = datetime.datetime.now()

        dt = dt.replace(hour=23, minute=23, second=59, tzinfo=UTC())
        date = dt.replace(tzinfo=None).strftime("%Y-%m-%d")
        stats = {"users": 0}

        for tag in self.tags:
            model = REFTAG_MAP[tag]
            stats[tag] = 0
            for obj in model.objects.filter(created__lte=dt):
                if self.status_at_date(obj, dt) == "ok":
                    stats[tag] += 1

        for user in get_user_model().objects.filter(created__lte=dt):
            if user.is_verified:
                stats["users"] += 1

        codec = options.get("format")
        if codec == "text":
            print(date)
            print("-------------")
            for each in stats.keys():
                print("{}: {}".format(each, stats[each]))

        elif codec == "json":
            print(json.dumps({date: stats}))

        else:
            raise Exception("unknown format {}".format(codec))
Esempio n. 6
0
    def migrate_ixlan_id_sql(self, old_id, new_id):
        """
        Migrate ixlan id so it matches it's parent id

        This is called automatically during `migrate_ixlan_id` and should not be
        called manually

        This executes raw sql queries

        Foreign key checks will be temporarily disabled
        """

        now = datetime.datetime.now().replace(tzinfo=UTC())

        # query that updates the ixlan table

        queries = [
            (
                "update {} set id=%s, updated=%s where id=%s".format(
                    IXLan._meta.db_table),
                [new_id, now, old_id],
            ),
        ]

        # queries that update fk relations

        for model in self.fk_relations:
            queries.append((
                "update {} set ixlan_id=%s, updated=%s where ixlan_id=%s".
                format(model._meta.db_table),
                [new_id, now, old_id],
            ))

        # queries that updated generic relations

        for model in self.generic_relations:
            queries.append((
                "update {} set object_id=%s where object_id=%s and content_type_id=%s"
                .format(model._meta.db_table),
                [new_id, old_id, self.ixlan_content_type_id],
            ))

        if not self.commit:
            return

        # execute queries

        with connection.cursor() as cursor:

            # since we are updated primary keys that are referenced
            # by foreign key constraints we need to temporarily turn
            # OFF foreign key checks

            cursor.execute("set foreign_key_checks=0")
            for query in queries:
                cursor.execute(query[0], query[1])

            cursor.execute("set foreign_key_checks=1")
Esempio n. 7
0
    def status(self, obj):
        now = datetime.datetime.now().replace(tzinfo=UTC())
        if not obj.start_date or not obj.end_date:
            return _("Not Set")

        if obj.start_date <= now and obj.end_date >= now:
            if not obj.logo:
                return _("Logo Missing")
            return _("Active")
        elif now > obj.end_date:
            return _("Over")
        else:
            return _("Waiting")
Esempio n. 8
0
    def generate_for_current_date(self):
        """
        Generate and return stats for current date

        Returns

        `dict` with `stats` and `dt` keys
        """

        dt = datetime.datetime.now().replace(tzinfo=UTC())

        stats = self.stats(dt)

        for tag in self.tags:
            model = REFTAG_MAP[tag]
            stats[tag] = model.objects.filter(status="ok").count()

        return {"stats": stats, "dt": dt}
Esempio n. 9
0
def setup_data():

    call_command("pdb_generate_test_data", limit=3, commit=True)

    date_past = DATE_PAST.replace(tzinfo=UTC())

    # one object of each type moved to the past

    for tag in ["fac", "net", "org", "ix"]:
        for obj in REFTAG_MAP[tag].objects.all():
            obj.created = obj.updated = date_past
            obj.save()
            break

    # create users

    User = get_user_model()

    for i in range(1, 7):
        User.objects.create_user(f"user_{i}", f"user_{i}@localhost", "secret")

    # move users 4, 5, 6 to the past

    User.objects.filter(username__in=["user_4", "user_5", "user_6"]).update(
        created=date_past)

    # verify all users except 1 and 4

    user_group, _ = Group.objects.get_or_create(name="user")
    guest_group, _ = Group.objects.get_or_create(name="guest")

    settings.USER_GROUP_ID = user_group.id
    settings.GUEST_GROUP_ID = guest_group.id

    with override_group_id():
        for user in User.objects.exclude(username__in=["user_1", "user_4"]):
            user.set_verified()
Esempio n. 10
0
    def generate_for_past_date(self, dt):
        """
        Generate and return stats for past date

        Argument(s)

        - dt: `datetime` instance

        Returns

        `dict` with `stats` and `dt` keys
        """

        dt = dt.replace(hour=23, minute=23, second=59, tzinfo=UTC())
        stats = self.stats(dt)

        for tag in self.tags:
            model = REFTAG_MAP[tag]
            stats[tag] = 0
            for obj in model.objects.filter(created__lte=dt):
                if self.status_at_date(obj, dt) == "ok":
                    stats[tag] += 1

        return {"stats": stats, "dt": dt}
Esempio n. 11
0
    def get_queryset(self):
        """
        Prepare the queryset
        """

        qset = self.model.handleref.all()

        self.request.meta_response = {}

        if hasattr(self.serializer_class, "prepare_query"):
            try:
                qset, p_filters = self.serializer_class.prepare_query(
                    qset, **self.request.query_params)
            except ValidationError as inst:
                raise RestValidationError({"detail": str(inst)})
            except ValueError as inst:
                raise RestValidationError({"detail": str(inst)})
            except TypeError as inst:
                raise RestValidationError({"detail": str(inst)})
            except FieldError as inst:
                raise RestValidationError({"detail": "Invalid query"})

        else:
            p_filters = {}

        try:
            since = int(float(self.request.query_params.get("since", 0)))
        except ValueError:
            raise RestValidationError({
                "detail":
                "'since' needs to be a unix timestamp (epoch seconds)"
            })
        try:
            skip = int(self.request.query_params.get("skip", 0))
        except ValueError:
            raise RestValidationError(
                {"detail": "'skip' needs to be a number"})
        try:
            limit = int(self.request.query_params.get("limit", 0))
        except ValueError:
            raise RestValidationError(
                {"detail": "'limit' needs to be a number"})

        try:
            depth = int(self.request.query_params.get("depth", 0))
        except ValueError:
            raise RestValidationError(
                {"detail": "'depth' needs to be a number"})

        field_names = dict([(fld.name, fld)
                            for fld in self.model._meta.get_fields()] +
                           self.serializer_class.queryable_relations())

        date_fields = ["DateTimeField", "DateField"]

        # filters
        filters = {}
        for k, v in list(self.request.query_params.items()):

            v = unidecode.unidecode(v)

            if k[-3:] == "_id" and k not in field_names:
                k = k[:-3]

            xl = self.serializer_class.queryable_field_xl

            # only apply filter if the field actually exists and uses a
            # valid suffix
            m = re.match("^(.+)__(lt|lte|gt|gte|contains|startswith|in)$", k)

            # run queryable field translation
            # on the targeted field so that the filter is actually run on
            # a field that django orm is aware of - which in most cases is
            # identical to the serializer field anyways, but in some cases it
            # may need to be substituted
            if m:
                flt = xl(m.group(1))
                k = k.replace(m.group(1), flt, 1)
                if flt[-3:] == "_id" and flt not in field_names:
                    flt = flt[:-3]
            else:
                k = xl(k)
                flt = None

            # prepare db filters
            if m and flt in field_names:
                # filter by function provided in suffix
                try:
                    intyp = field_names.get(flt).get_internal_type()
                except:
                    intyp = "CharField"

                # for greater than date checks we want to force the time to 1
                # msecond before midnight
                if intyp in date_fields:
                    if m.group(2) in ["gt", "lte"]:
                        if len(v) == 10:
                            v = "%s 23:59:59.999" % v

                    # convert to datetime and make tz aware
                    try:
                        v = DateTimeField().to_python(v)
                    except ValidationError as inst:
                        raise RestValidationError({"detail": str(inst[0])})
                    if timezone.is_naive(v):
                        v = timezone.make_aware(v)
                    if "_ctf" in self.request.query_params:
                        self.request._ctf = {
                            "{}__{}".format(m.group(1), m.group(2)): v
                        }

                # contains should become icontains because we always
                # want it to do case-insensitive checks
                if m.group(2) == "contains":
                    filters["%s__icontains" % flt] = v
                elif m.group(2) == "startswith":
                    filters["%s__istartswith" % flt] = v
                # when the 'in' filters is found attempt to split the
                # provided search value into a list
                elif m.group(2) == "in":
                    filters[k] = v.split(",")
                else:
                    filters[k] = v
            elif k in field_names:
                # filter exact matches
                try:
                    intyp = field_names.get(k).get_internal_type()
                except:
                    intyp = "CharField"
                if intyp == "ForeignKey":
                    filters["%s_id" % k] = v
                elif intyp == "DateTimeField" or intyp == "DateField":
                    filters["%s__startswith" % k] = v
                else:
                    filters["%s__iexact" % k] = v

        if filters:
            try:
                qset = qset.filter(**filters)
            except ValidationError as inst:
                raise RestValidationError({"detail": str(inst[0])})
            except ValueError as inst:
                raise RestValidationError({"detail": str(inst[0])})
            except TypeError as inst:
                raise RestValidationError({"detail": str(inst[0])})
            except FieldError as inst:
                raise RestValidationError({"detail": "Invalid query"})

        # check if request qualifies for a cache load
        filters.update(p_filters)
        api_cache = APICacheLoader(self, qset, filters)
        if api_cache.qualifies():
            raise CacheRedirect(api_cache)

        if not self.kwargs:
            if since > 0:
                # .filter(status__in=["ok","deleted"])
                qset = (qset.since(
                    timestamp=datetime.datetime.fromtimestamp(since).replace(
                        tzinfo=UTC()),
                    deleted=True,
                ).order_by("updated").filter(status__in=["ok", "deleted"]))
            else:
                qset = qset.filter(status="ok")
        else:
            qset = qset.filter(status__in=["ok", "pending"])

        if not self.kwargs:
            if limit > 0:
                qset = qset[skip:skip + limit]
            else:
                qset = qset[skip:]

            adrl = getattr(settings, "API_DEPTH_ROW_LIMIT", 250)
            row_count = qset.count()
            if adrl and depth > 0 and row_count > adrl:
                qset = qset[:adrl]
                self.request.meta_response["truncated"] = (
                    "Your search query (with depth %d) returned more than %d rows and has been truncated. Please be more specific in your filters, use the limit and skip parameters to page through the resultset or drop the depth parameter"
                    % (depth, adrl))

        if depth > 0 or self.kwargs:
            return self.serializer_class.prefetch_related(
                qset, self.request, is_list=(len(self.kwargs) == 0))
        else:
            return qset
Esempio n. 12
0
def search(term):
    """
    Search searchable objects (ixp, network, facility ...) by term

    Returns result dict
    """

    search_tags = ("fac", "ix", "net")
    ref_dict = peeringdb_server.rest.ref_dict()
    t = time.time()

    if not SEARCH_CACHE.get("search_index"):

        # whole db takes 5ish seconds, too slow to cache inline here
        search_index = {
            tag:
            {obj.id: obj
             for obj in model.objects.filter(status__in=["ok"])}
            for tag, model in ref_dict.items() if tag in search_tags
        }

        for typ, stor in search_index.items():
            print "CACHED: %d items in %s" % (len(stor), typ)

        tag_id_re = re.compile("(" + "|".join(search_tags) + "|asn|as)(\d+)")

        # FIXME: for now lets force a flush every 120 seconds, might want to look
        # at an event based update solution instead
        SEARCH_CACHE.update(search_index=search_index,
                            time=t,
                            update_t=t,
                            tag_id_re=tag_id_re)
    else:
        search_index = SEARCH_CACHE.get("search_index")
        tag_id_re = SEARCH_CACHE.get("tag_id_re")

    # while we are using signals to make sure that the search index gets updated whenever
    # a model is saved, right now we still have updates from external sources
    # to which those signals cannot be easily connected (importer, fac_merge command etc.)
    #
    # in order to reflect search index changes made by external sources
    # we need to find new / updated object regularily and update the
    # search index from that
    #
    # FIXME: this can be taken out when we turn the importer off - or just leave it
    # in as a fail-safe as it is fairly unobtrusive
    ut = SEARCH_CACHE.get("update_t", 0)
    if t - ut > 600:
        dut = datetime.datetime.fromtimestamp(ut).replace(tzinfo=UTC())
        print "Updating search index with newly created/updates objects"
        search_index_update = {
            tag: {
                obj.id: obj
                for obj in model.objects.filter(
                    Q(created__gte=dut)
                    | Q(updated__gte=dut)).filter(status="ok")
            }
            for tag, model in ref_dict.items() if tag in search_tags
        }
        for tag, objects in search_index_update.items():
            if tag not in SEARCH_CACHE["search_index"]:
                SEARCH_CACHE["search_index"][tag] = dict([
                    (obj.id, obj)
                    for obj in ref_dict[tag].objects.filter(status="ok")
                ])
            SEARCH_CACHE["search_index"][tag].update(objects)

        SEARCH_CACHE["update_t"] = t

    # FIXME: for some reason this gets unset sometimes - need to figure out
    # why - for now just recreate when its missing
    if not tag_id_re:
        tag_id_re = re.compile("(" + "|".join(search_tags) + "|asn|as)(\d+)")
        SEARCH_CACHE["tag_id_re"] = tag_id_re

    print "Search index retrieval took %.5f seconds" % (time.time() - t)

    result = {tag: [] for tag, model in ref_dict.items()}

    term = unaccent(term)

    # try to convert to int for numeric search matching
    typed_q = {}
    try:
        typed_q["int"] = int(term)
    except ValueError:
        pass

    # check for ref tags
    try:
        match = tag_id_re.match(term)
        if match:
            typed_q[match.group(1)] = match.group(2)

    except ValueError:
        pass

    # FIXME  model should have a search_fields attr on it
    # this whole thing should be replaced with something more modular to get
    # rid of all the ifs
    for tag, index in search_index.items():
        for id, data in index.items():
            if unaccent(data.name).find(term) > -1:
                result[tag].append({
                    "id": id,
                    "name": data.search_result_name,
                    "org_id": data.org_id
                })
                continue

            if hasattr(
                    data,
                    "name_long") and unaccent(data.name_long).find(term) > -1:
                result[tag].append({
                    "id": id,
                    "name": data.search_result_name,
                    "org_id": data.org_id
                })
                continue

            if hasattr(data, "aka") and unaccent(data.aka).find(term) > -1:
                result[tag].append({
                    "id": id,
                    "name": data.search_result_name,
                    "org_id": data.org_id
                })
                continue

            if typed_q:
                if tag in typed_q:
                    if str(data.id).startswith(typed_q[tag]):
                        result[tag].append({
                            "id": id,
                            "name": data.search_result_name,
                            "org_id": data.org_id,
                        })
                        continue

                # search asn on everyting? probably just if asn in search
                # fields
                if hasattr(data, "asn"):
                    asn = typed_q.get(
                        "as", typed_q.get("asn", str(typed_q.get("int", ""))))
                    if asn and str(data.asn).startswith(asn):
                        result[tag].append({
                            "id": id,
                            "name": data.search_result_name,
                            "org_id": data.org_id,
                        })

    for k, items in result.items():
        result[k] = sorted(items, key=lambda row: row.get("name"))

    return result