def socket_post_save_handler(sender, instance, created, using, **kwargs): if instance.is_live: if instance.has_changed('zip_file') and instance.old_value('zip_file'): instance.old_value('zip_file').delete(save=False) if instance.status == Socket.STATUSES.CHECKING: add_post_transaction_success_operation(SocketCheckerTask.delay, using=using, instance_pk=get_current_instance().pk) elif instance.status == Socket.STATUSES.PROCESSING: add_post_transaction_success_operation(SocketProcessorTask.delay, using=using, instance_pk=get_current_instance().pk)
def hosting_post_save_handler(sender, instance, created, using, **kwargs): syncano_instance_pk = get_current_instance().pk new_cname = Hosting.find_cname(instance.domains) if created: old_cname = None else: old_cname = Hosting.find_cname(instance.old_value('domains')) if new_cname != old_cname: with transaction.atomic(): syncano_instance = Instance.objects.select_for_update().get(pk=syncano_instance_pk) if new_cname is not None: add_domains_to_syncano_instance(syncano_instance, domains=[new_cname]) if old_cname is not None: remove_domains_from_syncano_instance(syncano_instance, domains=[old_cname]) syncano_instance.save(update_fields=['domains']) if instance.ssl_status == Hosting.SSL_STATUSES.CHECKING: add_post_transaction_success_operation( HostingAddSecureCustomDomainTask.delay, using=using, hosting_pk=instance.id, domain=instance.get_cname(), instance_pk=syncano_instance_pk, )
def process(self): name = self.dependency['name'] # user profile case if name == 'user': name = 'user_profile' self.name = name try: klass = Klass.objects.select_for_update().get(name=name) if klass.is_locked: raise SocketLockedClass(name) except Klass.DoesNotExist: # Create fresh class with Instance.lock(get_current_instance().pk): klass = self.create_class() else: klass = self.update_class(klass) # Save class self.add_installed(klass) return { name.lower(): {f['name']: f['type'] for f in self.dependency['schema']} }
def create_schedule(self): # Create a fresh schedule if it does not exist yet try: script = self.get_script() except CodeBox.DoesNotExist: script = self.create_script() schedule_limit = AdminLimit.get_for_admin( get_current_instance().owner_id).get_schedules_count() if CodeBoxSchedule.objects.count() >= schedule_limit: raise ScheduleCountExceeded(schedule_limit) schedule_data = { 'label': 'Script dependency of {}'.format(self.socket.name), 'description': 'Schedule created as a dependency of ' 'socket: "{}".'.format(self.socket.name), 'script': script.pk, } schedule_data.update(self.schedule_params) schedule_serializer = CodeBoxScheduleSerializer(data=schedule_data) schedule_serializer.is_valid(raise_exception=True) return schedule_serializer.save( socket=self.socket, codebox=script, event_handler=self.dependency['handler_name'])
def validate_domains(self, value): value_set = set(value) only_domains = [ v for v in value_set if re.match(VALID_DOMAIN_REGEX, v) ] # Only 1 domain per hosting allowed so that we can process SSL properly if len(only_domains) > 1: raise OnlyOneDomainAllowed() # this checks the global domains; if Instance.objects.exclude(pk=get_current_instance().pk).filter( domains__overlap=only_domains).exists(): raise DomainAlreadyUsed() # prevent for creating hosting with the same domain; # but allow to update the same object with the same domains; validate_queryset = Hosting.objects.all() # update case; if empty - create case; current_hosting = self.instance if current_hosting: validate_queryset = validate_queryset.exclude( pk=current_hosting.pk) # use a all values here - this will check if no two hosting objects exists with the same instance name # and suffix combination; if validate_queryset.filter(domains__overlap=list(value_set)).exists(): raise DomainAlreadyUsed() return list(value_set)
def indicator_post_save_handler(sender, instance, created, **kwargs): tenant = get_current_instance() or instance.instance if not created and instance.type == InstanceIndicator.TYPES.STORAGE_SIZE: storage_limit = AdminLimit.get_for_admin(tenant.owner_id).get_storage() if instance.value > instance.old_value('value') and instance.value > storage_limit >= 0: raise StorageLimitReached()
def after_lock_released(self, args, kwargs): if get_current_instance() and self.model_class.objects.filter( **self.query).exists(): options = {} if self.countdown is not None: options['countdown'] = self.countdown self.apply_async(args, kwargs, **options)
def instance_context(instance): previous_instance = get_current_instance() try: set_current_instance(instance) yield finally: set_current_instance(previous_instance)
def create_class(self): klass_limit = AdminLimit.get_for_admin(get_current_instance().owner_id).get_classes_count() if Klass.objects.count() >= klass_limit: raise KlassCountExceeded(klass_limit) klass_data = {'name': self.name, 'description': 'Class created as a dependency of ' 'socket: "{}".'.format(self.socket.name, self.name), 'schema': self.dependency['schema'], 'ignored_target_classes': self.ignored_class_names(), 'metadata': self.dependency['metadata']} # Run validation first serializer = KlassSerializer(data=klass_data) serializer.is_valid(raise_exception=True) fields = {} field_props = {} for field in self.dependency['schema']: fields[field['name']] = [self.socket.pk] field_props[field['name']] = {f_prop: [self.socket.pk] for f_prop, val in field.items() if val is True} refs = { 'managed_by': [self.socket.pk], 'fields': fields, 'props': field_props, } return serializer.save(refs=refs)
def upload_custom_socketenvironment_file_to(instance, filename): _, ext = os.path.splitext(filename) return '{instance_prefix}/env/{filename}{ext}'.format( instance_prefix=get_current_instance().get_storage_prefix(), filename=generate_key(), ext=ext.lower()[:16] # extensions longer than 16 would be kinda strange )
def _get_lookup_key(self, key, type_, target): schema = '0' if type_ == 'model': app_label = target._meta.app_label lookup_name = target._meta.db_table if apps.get_app_config(app_label).name in settings.TENANT_APPS: from apps.instances.helpers import get_current_instance instance = get_current_instance() if instance is None: raise target.DoesNotExist() schema = str(instance.id) return lookup_name, schema lookup_name = key if key is None: lookup_name = target.__module__ if hasattr(target, '__self__') and target.__self__ and hasattr( target.__self__, '__name__'): lookup_name += '.%s' % target.__self__.__name__ lookup_name += '.%s' % target.__name__ if self.version_key is not None: lookup_name += ':%s' % self.version_key return lookup_name, schema
def add_indicators(apps, schema_editor): APNSDevice = apps.get_model('push_notifications.APNSDevice') instance = get_current_instance() InstanceIndicator.objects.update_or_create( type=InstanceIndicator.TYPES.APNS_DEVICES_COUNT, instance=instance, defaults={'value': APNSDevice.objects.filter(is_active=True).count()})
def upload_hosting_file_to(instance, filename): _, ext = os.path.splitext(filename) return '{instance_prefix}/{hosting_id}h/{filename}{ext}'.format( instance_prefix=get_current_instance().get_storage_prefix(), hosting_id=instance.hosting_id, filename=generate_key(), ext=ext.lower()[:16] # extensions longer than 16 would be kinda strange )
def emit(self, request, *args, **kwargs): serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) event = {'source': 'custom'} signal = serializer.data['signal'] payload = serializer.data['payload'] if Trigger.match(get_current_instance().pk, event, signal): HandleTriggerEventTask.delay(instance_pk=get_current_instance().pk, event=event, signal=signal, data=payload) ret = status.HTTP_202_ACCEPTED else: ret = status.HTTP_204_NO_CONTENT return Response(status=ret)
def get_storage_path_for_key(cls, key, path=''): if path: file_root, file_ext = os.path.splitext(path) path = '{}_{}{}'.format(file_root, get_random_string(7), file_ext) return '{instance_prefix}/sockets/{socket_key}/{path}'.format( instance_prefix=get_current_instance().get_storage_prefix(), socket_key=key, path=path)
def _get_current_db(self): instance = get_current_instance() if instance: db = get_instance_db(instance) # Check if we are in atomic block for that connection if connections[db].in_atomic_block: return db return DEFAULT_DB_ALIAS
def get_file_cached(cls, hosting_id, path): def _get_file(): try: return cls.objects.filter(path=path, hosting=hosting_id).get() except cls.DoesNotExist: return 'DoesNotExist' return Cached(_get_file, key='Hosting.GetFile', version_key='i=%d;h=%d;p=%s' % (get_current_instance().id, hosting_id, path))
def save_object(self, obj): # Install socket(s) in database db = get_instance_db(get_current_instance()) with transaction.atomic(db): # Process in reverse order so that we process in FIFO for socket, dependencies, is_partial in self.socket_install['data'][::-1]: socket.zip_file = None if socket.id is None: socket.save() self.install_socket(socket, dependencies, partial=is_partial) super().save_object(socket)
def update_instance_storage_indicator(change): if not change: return instance = get_current_instance() indicator_type = InstanceIndicator.TYPES.STORAGE_SIZE # Get it first before update as we need to validate limits in post_save signal with transaction.atomic(): indicator = InstanceIndicator.objects.filter(instance=instance, type=indicator_type).select_for_update().get() indicator.value += change indicator.save()
def livemodel_post_soft_delete_handler(sender, instance, using, **kwargs): sender_meta = sender._meta instance_pk = None if is_model_in_tenant_apps(sender): instance_pk = get_current_instance().pk # queue actual cleanup job add_post_transaction_success_operation(DeleteLiveObjectTask.delay, using=using, model_class_name='%s.%s' % (sender_meta.app_label, sender_meta.model_name), object_pk=instance.pk, instance_pk=instance_pk)
def process_object(self, socket, **kwargs): if socket.install_url and not socket.zip_file: self.download_socket_zip(socket) self.socket_install = { 'endpoints_count': 0, 'dependencies_count': 0, 'data': [] } is_trusted = Cached(Admin, kwargs={'id': get_current_instance().owner_id}).get().is_trusted dependencies, is_partial = self.importer(socket, is_trusted=is_trusted).process() self.add_socket_for_installation(socket, dependencies, is_partial)
def process(self): self.schedule_params = self.create_schedule_params() try: schedule = CodeBoxSchedule.objects.filter(socket=self.socket, **self.schedule_params).select_related('codebox').get() except CodeBoxSchedule.DoesNotExist: with Instance.lock(get_current_instance().pk): schedule = self.create_schedule() else: schedule = self.update_schedule(schedule) schedule.schedule_next() self.add_installed(schedule) return self.process_handler(schedule)
def update_class(self, klass): schema = copy.deepcopy(klass.schema) refs = klass.refs socket_pk = self.socket.pk # Validate schema first KlassSerializer(instance=klass, data={ 'schema': self.dependency['schema'], 'ignored_target_classes': self.ignored_class_names() }, partial=True).is_valid(raise_exception=True) # Add field references. ref_fields = refs.get('fields', {}) ref_props = refs.get('props', {}) refs['fields'] = ref_fields refs['props'] = ref_props if 'managed_by' in refs and socket_pk not in refs['managed_by']: refs['managed_by'].append(socket_pk) # Check if klass is compatible dep_fields = {f['name']: f for f in self.dependency['schema']} self.merge_class_schema(schema, dep_fields, ref_fields, ref_props) # Cleanup klass references. klass.schema = schema cleanup_data_klass_ref(klass, using=get_instance_db(get_current_instance())) metadata = klass.metadata metadata.update(self.dependency['metadata']) # Run last validation serializer = KlassSerializer(instance=klass, data={ 'schema': klass.schema, 'ignored_target_classes': self.ignored_class_names(), 'metadata': metadata, }, partial=True) serializer.is_valid(raise_exception=True) return serializer.save(refs=refs)
def launch_trigger(instance, serializer_class, event, signal, changes=None, **context): instance_pk = get_current_instance().pk if Trigger.match(instance_pk, event, signal): data = serializer_class(instance, excluded_fields=('links',), context=context).data if changes is not None: changes = changes.intersection(set(data.keys())) add_post_transaction_success_operation(HandleTriggerEventTask.delay, using=router.db_for_write(instance.__class__), instance_pk=instance_pk, event=event, signal=signal, data=data, changes=list(changes) if changes else None)
def cleanup_data_klass_ref(klass, save=False, using=None): # If klass is no longer managed by any socket, delete it. if 'managed_by' in klass.refs and not klass.refs['managed_by']: if klass.is_locked: add_post_transaction_success_operation( KlassOperationQueue.delay, using=using, instance_pk=get_current_instance().pk, klass_pk=klass.pk, op='delete') else: klass.delete() return if klass.is_locked: add_post_transaction_success_operation( KlassOperationQueue.delay, using=using, instance_pk=get_current_instance().pk, klass_pk=klass.pk, op='cleanup_refs') return klass.cleanup_refs(save=save)
def cleanup(cls, socket, installed_objects): # Skip cleanup for new sockets. if socket.pk is None: return installed_klasses = set( [klass.name for klass in installed_objects[Klass.__name__]]) # Clean up class that are no longer referenced. for class_name, field_dict in socket.old_value('installed').get( cls.yaml_type, {}).items(): if class_name not in installed_klasses: unref_data_klass(socket.pk, class_name, field_dict, using=get_instance_db(get_current_instance()))
def db_for_model(self, model, for_read=False, instance=None, context=None, **hints): if model is self.TENANT_MODEL: if context == 'new': return INSTANCES_NEW_DB_ALIAS if context == 'contents': # As instance objects are cached, temporarily use getattr so it works # with objects that miss that field return instance.database or INSTANCES_DB_ALIAS elif is_model_in_tenant_apps(model): return get_instance_db(get_current_instance(), for_read=for_read) return DEFAULT_DB_ALIAS
def authenticate(self, request): api_key = self.get_api_key(request) admin = getattr(request._request, 'user', AnonymousAdmin()) auth = getattr(request._request, 'auth', None) auth_user = getattr(request._request, 'auth_user', None) staff_user = getattr(request._request, 'staff_user', None) instance = getattr(request._request, 'instance', None) # Initialize default auth_user first so that permission classes are sane request.auth_user = None request._request.auth_user = None if api_key: if not API_KEY_REGEX.match(api_key): # Verify if we're dealing with a token admin = self.get_admin_from_token(api_key, instance) if not admin: raise exceptions.AuthenticationFailed('No such API Key.') else: if check_parity(api_key): admin = self.get_admin_by_key(api_key) else: auth = self.get_auth(api_key, instance) if auth and get_current_instance(): auth_user = self.get_auth_user(request) staff_key = self.get_staff_key(request) if staff_key and API_KEY_REGEX.match(staff_key) and check_parity( staff_key): staff_user = self.get_admin_by_key(staff_key) staff_user = staff_user if staff_user.is_staff else None # Save auth user manually request.auth_user = auth_user request.staff_user = staff_user # Save user inside wrapped request so that middlewares also see it request._request.user = admin request._request.auth = auth request._request.auth_user = auth_user request._request.staff_user = staff_user return admin, auth
def _format_key(cls, key, **kwargs): if cls.tenant_model and 'instance' not in kwargs: kwargs['instance'] = get_current_instance() return key.format(**kwargs)
def klass_post_save_handler(sender, instance, using, **kwargs): if instance.index_changes: add_post_transaction_success_operation(IndexKlassTask.delay, using=using, instance_pk=get_current_instance().pk)