def cleanup(self, quiet=False): idle_max = int(aboutconfig('useractivity.idle_max', IDLE_MAX_DEFAULT)) assert idle_max >= 0 if not quiet: print('Useractivity cleanup, running prune_idlers ' 'with idle_max set to %d seconds.' % idle_max) pruned_users = prune_idlers(idle_max) if not quiet: print('Sent logout signal for %d logged on user(s): %s' % (len(pruned_users), ', '.join(i.username for i in pruned_users))) # Prune the table every now and then. And since we recommend # that this command is ran every minute, this should fix that # the job runs only about once or twice per day. (Or sometimes # more often, or less often :P) if randint(0, 719) == 0: keep_days = int(aboutconfig('useractivity.keep_days', KEEP_DAYS_DEFAULT)) if keep_days > 0: if not quiet: print('Useractivity cleanup, running prune_table keeping ' '%s days.' % keep_days) prune_table(keep_days) else: if not quiet: print('Useractivity cleanup, prune_table disabled.')
def process_request(self, request): # For speed reasons, we'll bypass the regular request.user.* # lookups and get the user_id from session directly. If we use # memcached users, this can save us a query or more. auth_userid = request.session.get('_auth_user_id') # Store the user_id to compare it after the request has been processed try: request._useractivitylog_user_pk = auth_userid except AttributeError: request._useractivitylog_user_pk = None # AnonymousUser has no pk # A cronjob "./manage useractivity cleanup -v0" should be run every # minute or so. It cleans up the activitylog, ridding it of open # entries of which the last_activity is too old. This has to happen # or there will never be an "implicit logout". if settings.DEBUG: # .. or you could use this, sufficient for debug mode. prune_idlers( int(aboutconfig('useractivity.idle_max', IDLE_MAX_DEFAULT))) # Mark users as "active again" before the response is generated. This # way the users logged_in signal can prepare new data for before the # view is loaded (that is, if it triggers an implicit login). if request._useractivitylog_user_pk is not None: user_pk = request._useractivitylog_user_pk ip_address = request.META['REMOTE_ADDR'] mark_active(user_pk, ip_address=ip_address, request=request)
def __init__(self, *args, **kwargs): super(MollieSmsBackend, self).__init__(*args, **kwargs) self.url = aboutconfig('sms.backends.sms_mollie.url', URL_CHOICES[0]).encode('utf-8') self.default_args = default_args = {} default_args['username'] = \ aboutconfig('sms.backends.sms_mollie.username').encode('utf-8') default_args['md5_password'] = \ aboutconfig('sms.backends.sms_mollie.md5pass').encode('utf-8') default_args['gateway'] = \ aboutconfig('sms.backends.sms_mollie.gateway', '2').encode('utf-8') default_args['charset'] = 'UTF-8' default_args['type'] = 'normal' # SMSTYPE_CHOICES default_args['replace_illegal_chars'] = 'true' # If dlrurl is set, we feed it to mollie. If it's not set, we # use the default as set through the mollie interface. delivery_report_url = \ aboutconfig('sms.backends.sms_mollie.dlrurl', '').encode('utf-8') if delivery_report_url != '': default_args['dlrurl'] = delivery_report_url
def _logpath(): ''' Returns the logging path as defined in the aboutconfig autolog.path setting. The autolog.path setting is a colon-delimited list of paths to try. The paths are tried from left to right, if the path is relative it is relative to the first path found in the PYTHONPATH environment variable or the "current directory" if that path is empty. If the path does not exist, _logpath attempts to create it. First when that fails it moves on to the next path. ''' pathstr = select_writable_path(aboutconfig('autolog.path').split(':')) assert pathstr is not None, ('No usable logging path found in ' 'autolog.path setting!') return pathstr
def send_sms(self, message, reply_to=None, tariff_cent=None): is_premium = reply_to is not None append_meta = {} extra_args = { 'BATCHID': '%s-%s' % (self.batch_prefix or 'default', message.id), 'N': message.remote_address, 'M': message.body, 'O': message.local_address, } if is_premium: if tariff_cent is None: extra_args['RATE'] = int(aboutconfig('sms.backends.sms_wireless.tariff', 40)) else: extra_args['RATE'] = tariff_cent # session_id is required for the kickback fee if available session_id = _get_wireless_session_id(reply_to) if session_id is not None: append_meta['wireless_session_id'] = session_id extra_args['SESSIONID'] = session_id # operator is required for the kickback fee if available if reply_to.remote_operator is not None: extra_args['OPR'] = reply_to.remote_operator.entire_code('') message.remote_operator = reply_to.remote_operator sent, info = self.request(REQUEST_TYPE_PSMS, extra_args) else: # message concatenation is only available for normal SMS # otherwise the receiver would be billed tariff_cent for each # received message # can't imagine why, isn't that the whole purpose of this business? if len(message.body) > 160: extra_args['CONCAT'] = '1' sent, info = self.request(REQUEST_TYPE_SMS, extra_args) append_meta['wireless_response'] = info if sent: message.status = 'pnd' else: message.status = 'nak' message.meta_append(append_meta, commit=False) message.save() return sent
def __init__(self, *args, **kwargs): super(WirelessSmsBackend, self).__init__(*args, **kwargs) self.url = aboutconfig('sms.backends.sms_wireless.url', 'http://gateway.wireless-services.nl/').rstrip('/').encode('utf-8') self.backup_url = aboutconfig('sms.backends.sms_wireless.backup_url', 'http://gateway2.wireless-services.nl/').rstrip('/').encode('utf-8') self.password = aboutconfig('sms.backends.sms_wireless.password').encode('utf-8') self.batch_prefix = aboutconfig('sms.backends.sms_wireless.batch_prefix').encode('utf-8') self.default_args = { 'API': API_VERSION, 'AUTHTYPE': 'sha1', 'TEST': aboutconfig('sms.backends.sms_wireless.test', '0').encode('utf-8'), 'UID': aboutconfig('sms.backends.sms_wireless.username').encode('utf-8'), 'VERSION': WIRELESS_API_VERSION, 'ONUM': 1, # 0 is international number or 1 is national shortcode 'NOT': '1', # enable notification request 'MSGID': aboutconfig('sms.backends.sms_wireless.message_id').encode('utf-8'), }
def mark_active(user_pk, ip_address, request=None): ''' Mark already existing useractivitylog entries as not being expired (update the last_activity timestamp). If no entry is found, a new one is created as an "implicit login" and a logged_in signal is sent. The request object is passed so signal listeners can do something with it if needed. ''' # Previously, we did this: # updated = (UserActivityLog.objects # .filter(user__pk=user_pk, explicit_logout=None) # .update(last_activity=datetime.now())) # if updated == 0: mark_login(...) # # However, that turned out to be very deadlock prone in postgres. A # second thread would do the same, and then this first transaction # would fail -- first at the end -- because the other one committed # it: # # (It looks like both threads get the SHARE LOCK and then # try to get the EXCLUSIVE LOCK.) # > 2010-08-26 11:30:45 CEST DETAIL: Process 18487 waits for ShareLock on # transaction 15611944; blocked by process 18488. # > Process 18488 waits for ShareLock on transaction 15611943; blocked by # process 18487. # > Process 18487: # UPDATE "useractivity_useractivitylog" SET "last_activity" = # E'2010-08-26 11:30:44.699673' # WHERE ("useractivity_useractivitylog"."user_id" = 2 AND # "useractivity_useractivitylog"."explicit_logout" IS NULL) # > Process 18488: # UPDATE "useractivity_useractivitylog" SET "last_activity" = # E'2010-08-26 11:30:44.731305' # WHERE ("useractivity_useractivitylog"."user_id" = 2 AND # "useractivity_useractivitylog"."explicit_logout" IS NULL) # > 2010-08-26 11:30:45 CEST HINT: See server log for query details. # > 2010-08-26 11:30:45 CEST STATEMENT: # UPDATE "useractivity_useractivitylog" SET "last_activity" = # E'2010-08-26 11:30:44.699673' # WHERE ("useractivity_useractivitylog"."user_id" = 2 AND # "useractivity_useractivitylog"."explicit_logout" IS NULL) # # So, instead, we separate the UPDATE from the SELECT and only # the timestamp if it is older than a certain amount of time. # # But first, we check the cache to see if we're not overdoing it. cache_key = CACHE_KEY_FMT % (user_pk, ip_address) log_ids = cache.get(cache_key) if log_ids is None: log_ids = list(UserActivityLog.objects.filter( user__pk=user_pk, ip_address=ip_address, explicit_logout=None ).values_list('id', 'last_activity')) if not log_ids: # There were no results? Implicit login mark_login(user_pk, ip_address=ip_address, explicit_login=False, request=request) else: update_ids = [] now = datetime.now() idle_max = int(aboutconfig('useractivity.idle_max', IDLE_MAX_DEFAULT)) need_refresh_after = idle_max / 2 - 10 # 120 seconds -> 50 old = now - timedelta(seconds=need_refresh_after) for i, (log_id, time) in enumerate(log_ids): if time < old: update_ids.append(log_id) log_ids[i] = (log_id, now) # overwrite cache with new value # Update the last_activity on these items if update_ids: rows = UserActivityLog.objects.filter( id__in=update_ids, explicit_logout=None ).update(last_activity=now) # Updating less than expected? if rows != len(update_ids): mark_login(user_pk, ip_address=ip_address, explicit_login=False, request=request) else: cache.set(cache_key, log_ids, 900)
def send_sms(self, message): ''' Use the mollie gateway to send out a message. The only difference between "premium" and "regular" sms is really whether we can charge the user or not. So the tariff_cent decides which type we'll use. Like the other extra parameters this should be found in the TextMessageExtra addon. If there is no such item, the tariff_cent will be assumed to be 0. For all premium sms, we must add a couple of parameters: tariff, (member), shortcode, keyword, (mid). These are required to be found in the TextMessageExtra add-on. For non-subscription premium sms you need the Mollie mid parameter for replies. For subscription premium sms, you do not need the mid, but must use the correct shortcode and keyword. ''' try: extra = message.extra except TextMessageExtra.DoesNotExist: is_premium = False else: is_premium = bool(extra.tariff_cent) # Select optional parameters from the metadata. For now we only # accept 'gateway'. gateway = None # none means default, other means override meta = message.meta if meta and isinstance(meta, list) and isinstance(meta[0], dict): gateway = meta[0].get('gateway') elif not is_premium and message.remote_operator: # Optionally use custom gateway for specific countries, like # 1 (business+) for Vodafone which is unreliable with 2 # (regular). Same for Belgium where 50 is more reliable and # it doesn't unnecessarily quote-enclose the shortcode. # sms.backends.sms_mollie.gateway.204.04 = 1 # sms.backends.sms_mollie.gateway.206 = 50 oper_cc, oper_oc = ( message.remote_operator.entire_code().split('-')) # quick gateway = ( aboutconfig('sms.backends.sms_mollie.gateway.%s.%s' % (oper_cc, oper_oc)) or aboutconfig('sms.backends.sms_mollie.gateway.%s' % (oper_cc, )) or None # aboutconfig returns '' by default if not found ) # Compile premium_args if is_premium: # Attempt to match the tariff to the available choices: # select one of the TARIFF_CHOICES, rounding upwards if # there is no exact match. tariff_cent = int(extra.tariff_cent) tariff_cent = ([ j for i, j in enumerate(TARIFF_CHOICES) if ((-1, ) + TARIFF_CHOICES)[i] < tariff_cent <= j ] # noqa or [TARIFF_CHOICES[-1]])[0] # It is either a reply or a subscription message. mollie_id = extra.foreign_reference or None subscribed = not mollie_id premium_args = { 'tariff': '%03d' % tariff_cent, # a three-char tariff 'member': ('false', 'true')[bool(subscribed)], } if extra.shortcode is not None: premium_args['shortcode'] = \ unicode(extra.shortcode).encode('utf-8') if extra.keyword is not None: premium_args['keyword'] = \ unicode(extra.keyword).encode('utf-8') if mollie_id: premium_args['mid'] = unicode(mollie_id).encode('utf-8') else: premium_args = None # Send it on new_status, body_count = self._send( body=message.body, recipient_list=[message.remote_address], local_address=message.local_address, gateway=gateway, reference=message.id, premium_args=premium_args) # Update info and return status assert body_count == message.body_count or message.body_count == 1, \ ('Expected lazy mans 1 or correct body count (%d != %d for %d)' % (body_count, message.body_count, message.id)) message.body_count = body_count if new_status != 'retry': # some things you have to try again message.status = new_status message.save() return new_status == 'pnd' # moved to pending => success