Example #1
0
        if instance.total_seconds < 0:
            raise ValidationError(
                "Total learning time should always be non-negative.")
        logging.debug("%s: total time (%d): %d seconds" %
                      (instance.user.username, instance.activity_type,
                       instance.total_seconds))

        # Save only completed log items to the UserLogSummary
        UserLogSummary.add_log_to_summary(instance)


@receiver(post_save, sender=UserLog)
def cull_records(sender, **kwargs):
    """
    Listen in to see when videos become available.
    """
    if settings.USER_LOG_MAX_RECORDS_PER_USER and kwargs[
            "created"]:  # Works for None, out of the box
        current_models = UserLog.objects.filter(
            user=kwargs["instance"].user,
            activity_type=kwargs["instance"].activity_type)
        if current_models.count() > settings.USER_LOG_MAX_RECORDS_PER_USER:
            # Unfortunately, could not do an aggregate delete when doing a
            #   slice in query
            to_discard = current_models \
                .order_by("start_datetime")[0:current_models.count() - settings.USER_LOG_MAX_RECORDS_PER_USER]
            UserLog.objects.filter(pk__in=to_discard).delete()


engine.add_syncing_models([VideoLog, ExerciseLog, UserLogSummary])
Example #2
0
        assert user.id, "Your user must have an ID before calling this function."

        if not cls.is_enabled():
            # Must delete, to make sure we don't get out of sync.
            cls.invalidate_cached_password(user=user)

        else:
            try:
                # Set the cached password.
                n_cached_iters = cls.iters_for_user_type(user)
                # TODO(bcipolli) Migrate this to an extended django class
                #   that uses get_or_initialize
                cached_password = get_object_or_None(
                    cls, user=user) or cls(user=user)
                cached_password.password = crypt(raw_password,
                                                 iterations=n_cached_iters)
                cached_password.save()
                logging.debug(
                    "Set cached password for user=%s; iterations=%d" %
                    (user.username, n_cached_iters))
            except Exception as e:
                # If we fail to create a cache item... just keep going--functionality
                #   can still move forward.
                logging.error(e)

    class Meta:
        app_label = "securesync"  # for back-compat reasons


engine.add_syncing_models([Facility, FacilityGroup, FacilityUser])
Example #3
0
        cls.objects.filter(user=user).delete()

    @classmethod
    def set_cached_password(cls, user, raw_password):
        assert user.id, "Your user must have an ID before calling this function."

        if not cls.is_enabled():
            # Must delete, to make sure we don't get out of sync.
            cls.invalidate_cached_password(user=user)

        else:
            try:
                # Set the cached password.
                n_cached_iters = cls.iters_for_user_type(user)
                # TODO(bcipolli) Migrate this to an extended django class
                #   that uses get_or_initialize
                cached_password = get_object_or_None(cls, user=user) or cls(user=user)
                cached_password.password = crypt(raw_password, iterations=n_cached_iters)
                cached_password.save()
                logging.debug("Set cached password for user=%s; iterations=%d" % (user.username, n_cached_iters))
            except Exception as e:
                # If we fail to create a cache item... just keep going--functionality
                #   can still move forward.
                logging.error(e)

    class Meta:
        app_label = "securesync"


engine.add_syncing_models([Facility, FacilityGroup, FacilityUser])
Example #4
0
        # The bottom computation is more strict: user activity is from start until the last "action"
        #   recorded--in the current case, that means from login until the last moment an exercise or
        #   video log was updated.
        #instance.total_seconds = datediff(instance.end_datetime, instance.start_datetime, units="seconds")
        instance.total_seconds = 0 if not instance.last_active_datetime else datediff(instance.last_active_datetime, instance.start_datetime, units="seconds")

        # Confirm the result (output info first for easier debugging)
        if instance.total_seconds < 0:
            raise ValidationError("Total learning time should always be non-negative.")
        logging.debug("%s: total time (%d): %d seconds" % (instance.user.username, instance.activity_type, instance.total_seconds))

        # Save only completed log items to the UserLogSummary
        UserLogSummary.add_log_to_summary(instance)

@receiver(post_save, sender=UserLog)
def cull_records(sender, **kwargs):
    """
    Listen in to see when videos become available.
    """
    if settings.USER_LOG_MAX_RECORDS_PER_USER and kwargs["created"]:  # Works for None, out of the box
        current_models = UserLog.objects.filter(user=kwargs["instance"].user, activity_type=kwargs["instance"].activity_type)
        if current_models.count() > settings.USER_LOG_MAX_RECORDS_PER_USER:
            # Unfortunately, could not do an aggregate delete when doing a
            #   slice in query
            to_discard = current_models \
                .order_by("start_datetime")[0:current_models.count() - settings.USER_LOG_MAX_RECORDS_PER_USER]
            UserLog.objects.filter(pk__in=to_discard).delete()


engine.add_syncing_models([VideoLog, ExerciseLog, UserLogSummary])
Example #5
0
            next_device = next_device or getattr(cur_link["device_zone"],
                                                 "signed_by")
            if next_device in devices_in_chain:
                logging.warn("loop detected.")
                break
            else:
                # So far, we're OK--keep looking for the (valid) end of the chain
                assert next_device.is_trusted() or next_device.get_zone(
                ) == zone
                devices_in_chain.add(next_device)
                chain.append({"device": next_device})

        # Validate the chain of trust to the zone zone_owner
        terminal_link = chain[-1]
        terminal_device = terminal_link["device"]
        obj = terminal_link["zone_invitation"] or terminal_link["device_zone"]
        if obj and not (terminal_device.is_creator(obj)
                        or terminal_device.is_trusted()):
            logging.warn("Could not verify chain of trust.")
        return chain


# No device data gets "synced" through the same sync mechanism as data--it is only synced
#   through the special hand-shaking mechanism
# ... except, now Device, DeviceZone, and Zone do--so that any changes to the models
#    will be synced.  The special handshake is necessary for new Device/Zone objects.
#
# These have circular dependencies, but because they've already been manually added,
#   dependencies aren't any issue.
engine.add_syncing_models([Device, Zone, DeviceZone], dependency_check=False)
Example #6
0
            elif cur_link["device"] == to_device or cur_link["device"].is_trusted():
                logging.debug("Found end of chain!")
                break;
            next_device = getattr(cur_link["zone_invitation"], "invited_by", None)
            next_device = next_device or getattr(cur_link["device_zone"], "signed_by")
            if next_device in devices_in_chain:
                logging.warn("loop detected.")
                break
            else:
                # So far, we're OK--keep looking for the (valid) end of the chain
                assert next_device.is_trusted() or next_device.get_zone() == zone
                devices_in_chain.add(next_device)
                chain.append({"device": next_device})

        # Validate the chain of trust to the zone zone_owner
        terminal_link = chain[-1]
        terminal_device = terminal_link["device"]
        obj = terminal_link["zone_invitation"] or terminal_link["device_zone"]
        if obj and not (terminal_device.is_creator(obj) or terminal_device.is_trusted()):
            logging.warn("Could not verify chain of trust.")
        return chain

# No device data gets "synced" through the same sync mechanism as data--it is only synced
#   through the special hand-shaking mechanism
# ... except, now Device, DeviceZone, and Zone do--so that any changes to the models
#    will be synced.  The special handshake is necessary for new Device/Zone objects.
#
# These have circular dependencies, but because they've already been manually added,
#   dependencies aren't any issue.
engine.add_syncing_models([Device, Zone, DeviceZone], dependency_check=False)