def dataset_edit(request, dataset_name): if request.method == "POST": dataset_form = forms.ZFSDataset(request.POST, fs=dataset_name, create=False) if dataset_form.is_valid(): if dataset_form.cleaned_data["dataset_quota"] == "0": dataset_form.cleaned_data["dataset_quota"] = "none" if dataset_form.cleaned_data["dataset_refquota"] == "0": dataset_form.cleaned_data["dataset_refquota"] = "none" error = False errors = {} for attr in ("compression", "atime", "dedup", "reservation", "refreservation", "quota", "refquota"): formfield = "dataset_%s" % attr if dataset_form.cleaned_data[formfield] == "inherit": success, err = notifier().zfs_inherit_option(dataset_name, attr) else: success, err = notifier().zfs_set_option(dataset_name, attr, dataset_form.cleaned_data[formfield]) error |= not success if not success: errors[formfield] = err if not error: return JsonResp(request, message=_("Dataset successfully edited.")) else: for field, err in errors.items(): dataset_form._errors[field] = dataset_form.error_class([err]) return JsonResp(request, form=dataset_form) else: return JsonResp(request, form=dataset_form) else: dataset_form = forms.ZFSDataset(fs=dataset_name, create=False) return render(request, "storage/dataset_edit.html", {"dataset_name": dataset_name, "form": dataset_form})
def save(self): # TODO: new IP address should be added in a side-by-side manner # or the interface wouldn't appear once IP was changed. retval = super(GlobalConfigurationForm, self).save() whattoreload = "hostname" if self.instance._orig_gc_ipv4gateway != self.cleaned_data.get('gc_ipv4gateway'): whattoreload = "networkgeneral" if self.instance._orig_gc_ipv6gateway != self.cleaned_data.get('gc_ipv6gateway'): whattoreload = "networkgeneral" notifier().reload(whattoreload) http_proxy = self.cleaned_data.get('gc_httpproxy') if http_proxy: os.environ['http_proxy'] = http_proxy os.environ['https_proxy'] = http_proxy elif not http_proxy: if 'http_proxy' in os.environ: del os.environ['http_proxy'] if 'https_proxy' in os.environ: del os.environ['https_proxy'] # Reset global opener so ProxyHandler can be recalculated urllib2.install_opener(None) return retval
def smb4_map_groups(): groupmap = notifier().groupmap_list() groups = get_groups() for g in groups: if (not smb4_group_mapped(groupmap, g) and not smb4_groupname_is_username(g)): notifier().groupmap_add(unixgroup=g, ntgroup=g)
def zfsvolume_edit(request, object_id): mp = models.MountPoint.objects.get(pk=object_id) volume_form = forms.ZFSVolume_EditForm(mp=mp) if request.method == "POST": volume_form = forms.ZFSVolume_EditForm(request.POST, mp=mp) if volume_form.is_valid(): volume = mp.mp_volume volume_name = volume.vol_name volume_name = mp.mp_path.replace("/mnt/", "") if volume_form.cleaned_data["volume_refquota"] == "0": volume_form.cleaned_data["volume_refquota"] = "none" error, errors = False, {} for attr in ("compression", "atime", "dedup", "refquota", "refreservation"): formfield = "volume_%s" % attr if volume_form.cleaned_data[formfield] == "inherit": success, err = notifier().zfs_inherit_option(volume_name, attr) else: success, err = notifier().zfs_set_option(volume_name, attr, volume_form.cleaned_data[formfield]) if not success: error = True errors[formfield] = err if not error: return JsonResp(request, message=_("Native dataset successfully edited.")) else: for field, err in errors.items(): volume_form._errors[field] = volume_form.error_class([err]) return render(request, "storage/volume_edit.html", {"mp": mp, "form": volume_form})
def save(self): obj = super(KerberosKeytabCreateForm, self).save() if not self.save_principals(obj): obj.delete() log.debug("save(): unable to save principals") notifier().start("ix-kerberos")
def done(self, request, events): if ( self.instance._original_stg_guiprotocol != self.instance.stg_guiprotocol or self.instance._original_stg_guiaddress != self.instance.stg_guiaddress or self.instance._original_stg_guiport != self.instance.stg_guiport or self.instance._original_stg_guihttpsport != self.instance.stg_guihttpsport or self.instance._original_stg_guihttpsredirect != self.instance.stg_guihttpsredirect ): if self.instance.stg_guiaddress == "0.0.0.0": address = request.META['HTTP_HOST'].split(':')[0] else: address = self.instance.stg_guiaddress if self.instance.stg_guiprotocol == 'httphttps': protocol = 'http' else: protocol = self.instance.stg_guiprotocol newurl = "%s://%s" % ( protocol, address ) if self.instance.stg_guiport and protocol == 'http': newurl += ":" + str(self.instance.stg_guiport) elif self.instance.stg_guihttpsport and protocol == 'https': newurl += ":" + str(self.instance.stg_guihttpsport) if self.instance._original_stg_guiprotocol == 'http': notifier().start_ssl("nginx") events.append("restartHttpd('%s')" % newurl)
def save(self): enable = self.cleaned_data.get("nis_enable") # XXX: We need to have a method to test server connection. try: started = notifier().started("nis") except: raise MiddlewareError(_("Failed to check NIS status.")) finally: super(NISForm, self).save() if enable: if started is True: try: started = notifier().restart("nis") log.debug("Try to restart: %s", started) except: raise MiddlewareError(_("NIS failed to restart.")) if started is False: try: started = notifier().start("nis") log.debug("Try to start: %s", started) except: raise MiddlewareError(_("NIS failed to start.")) if started is False: self.instance.ad_enable = False super(NISForm, self).save() raise MiddlewareError(_("NIS failed to reload.")) else: if started is True: started = notifier().stop("nis")
def plugin_update(request, plugin_id): plugin_id = int(plugin_id) plugin = models.Plugins.objects.get(id=plugin_id) plugin_upload_path = notifier().get_plugin_upload_path() notifier().change_upload_location(plugin_upload_path) if request.method == "POST": form = forms.PBIUpdateForm(request.POST, request.FILES, plugin=plugin) if form.is_valid(): form.done() return JsonResp(request, message=_('Plugin successfully updated'), events=['reloadHttpd()'], ) else: resp = render(request, "plugins/plugin_update.html", { 'form': form, }) resp.content = ( "<html><body><textarea>" + resp.content + "</textarea></boby></html>" ) return resp else: form = forms.PBIUpdateForm(plugin=plugin) return render(request, "plugins/plugin_update.html", { 'form': form, })
def backup_database(): from freenasUI.middleware.notifier import notifier systemdataset, basename = notifier().system_dataset_settings() systempath = notifier().system_dataset_path() if not systempath or not systemdataset: return # Legacy format files = glob.glob('%s/*.db' % systempath) reg = re.compile(r'.*(\d{4}-\d{2}-\d{2})-(\d+)\.db$') files = filter(lambda y: reg.match(y), files) for f in files: try: os.unlink(f) except OSError: pass today = datetime.now().strftime("%Y%m%d") newfile = os.path.join( systempath, 'configs-%s' % systemdataset.get_sys_uuid(), get_sw_version(), '%s.db' % today, ) dirname = os.path.dirname(newfile) if not os.path.exists(dirname): os.makedirs(dirname) shutil.copy('/data/freenas-v1.db', newfile)
def delete(self, *args, **kwargs): qs = Plugins.objects.filter(plugin_jail=self.plugin_jail).exclude( id__exact=self.id ) with transaction.atomic(): jc = JailsConfiguration.objects.order_by('-id')[0] jaildir = "%s/%s" % (jc.jc_path, self.plugin_jail) notifier()._stop_plugins( jail=self.plugin_jail, plugin=self.plugin_name, ) force = kwargs.pop('force', False) if qs.count() > 0: self._do_delete(force=force) else: self._do_delete(force=force) if os.path.exists("%s/.plugins/PLUGIN" % jaildir): try: jail = Jails.objects.get(jail_host=self.plugin_jail) jail.delete(force=True) except Jails.DoesNotExist: log.debug('Could not delete jail %s', self.plugin_jail, exc_info=True) super(Plugins, self).delete(*args, **kwargs) self.plugin_secret.delete()
def get_sys_uuid(self): if not self.__sys_uuid_field: if not notifier().is_freenas() and notifier().failover_node() == "B": self.__sys_uuid_field = "sys_uuid_b" else: self.__sys_uuid_field = "sys_uuid" return getattr(self, self.__sys_uuid_field)
def delete(self): super(LAGGInterface, self).delete() VLAN.objects.filter( vlan_pint=self.lagg_interface.int_interface ).delete() self.lagg_interface.delete() notifier().iface_destroy(self.lagg_interface.int_interface)
def delete(self, *args, **kwargs): super(StaticRoute, self).delete(*args, **kwargs) try: # TODO: async user notification notifier().staticroute_delete(self) except MiddlewareError: pass
def delete(self, using=None, reload=True): from freenasUI.services.models import CIFS if self.bsdusr_builtin is True: raise ValueError(_( "User %s is built-in and can not be deleted!" ) % (self.bsdusr_username)) notifier().user_deleteuser(self.bsdusr_username.encode('utf-8')) if domaincontroller_enabled(): Samba4().user_delete(self.bsdusr_username.encode('utf-8')) try: gobj = self.bsdusr_group count = bsdGroupMembership.objects.filter( bsdgrpmember_group=gobj).count() count2 = bsdUsers.objects.filter(bsdusr_group=gobj).exclude( id=self.id).count() if not gobj.bsdgrp_builtin and count == 0 and count2 == 0: gobj.delete(reload=False, pwdelete=False) except: pass cifs = CIFS.objects.latest('id') if cifs: if cifs.cifs_srv_guest == self.bsdusr_username: cifs.cifs_srv_guest = 'nobody' cifs.save() super(bsdUsers, self).delete(using) if reload: notifier().reload("user")
def done(self, *args, **kwargs): newplugin = [] pjail = self.cleaned_data.get('pjail') if notifier().install_pbi(pjail, newplugin): newplugin = newplugin[0] notifier()._restart_plugins(newplugin.plugin_jail, newplugin.plugin_name)
def bootenv_scrub(request): if request.method == "POST": try: notifier().zfs_scrub('freenas-boot') return JsonResp(request, message=_("Scrubbing the Boot Pool...")) except Exception, e: return JsonResp(request, error=True, message=repr(e))
def dataset_delete(request, name): datasets = zfs.list_datasets(path=name, recursive=True) if request.method == 'POST': form = forms.Dataset_Destroy(request.POST, fs=name, datasets=datasets) if form.is_valid(): if form.hold: proc = subprocess.Popen( 'zfs release freenas:repl %s' % form.hold, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) retval = proc.communicate()[1] if proc.returncode != 0 and retval: raise MiddlewareError(retval) retval = notifier().destroy_zfs_dataset(path=name, recursive=True) if retval == '': notifier().restart("collectd") return JsonResp( request, message=_("Dataset successfully destroyed.")) else: return JsonResp(request, error=True, message=retval) else: form = forms.Dataset_Destroy(fs=name, datasets=datasets) return render(request, 'storage/dataset_confirm_delete.html', { 'name': name, 'form': form, 'datasets': datasets, })
def terminal(request): sid = int(request.POST.get("s", 0)) k = request.POST.get("k") w = int(request.POST.get("w", 80)) h = int(request.POST.get("h", 24)) multiplex = MyServer("/var/run/webshell.sock") alive = False for i in range(3): try: alive = multiplex.proc_keepalive(sid, w, h) break except: notifier().restart("webshell") time.sleep(0.5) try: if alive: if k: multiplex.proc_write(sid, xmlrpclib.Binary(bytearray(k.encode("utf-8")))) time.sleep(0.002) content_data = '<?xml version="1.0" encoding="UTF-8"?>' + multiplex.proc_dump(sid) response = HttpResponse(content_data, content_type="text/xml") return response else: response = HttpResponse("Disconnected") response.status_code = 400 return response except (KeyError, ValueError, IndexError, xmlrpclib.Fault), e: response = HttpResponse("Invalid parameters: %s" % e) response.status_code = 400 return response
def update_save(request): assert request.method == 'POST' try: update = models.Update.objects.order_by('-id')[0] except IndexError: update = models.Update.objects.create() if request.POST.get('autocheck'): if request.POST.get('autocheck') == 'true': update.upd_autocheck = True else: update.upd_autocheck = False update.save() notifier().restart("cron") if request.POST.get('train'): update.upd_train = request.POST.get('train') update.save() return HttpResponse( json.dumps(True), content_type='application/json', )
def save(self): bsduser = super(bsdUserEmailForm, self).save(commit=True) try: notifier().reload("user", timeout=_fs().account.user.timeout.reload) except Exception as e: log.debug("ERROR: failed to reload user: %s", e) return bsduser
def is_valid(self): valid = super(NewPasswordForm, self).is_valid() if valid: qs = models.bsdUsers.objects.filter( bsdusr_uid=0, bsdusr_unixhash='*' ) if qs.exists(): user = qs[0] user.set_password(self.cleaned_data['password']) user.save() self.user_cache = authenticate( username=user.bsdusr_username, password=self.cleaned_data['password'], ) # # XXX hackity hackness XXX # Catch call timeout exceptions. We should really return this to the user # in the UI, but there is no easy way to currently do this. For now this # prevents a stack trace in the UI, which is slightly better than nothing ;-) # This same try/except structure is littered throughout this code. # try: notifier().reload("user", timeout=_fs().account.user.timeout.reload) except Exception as e: log.debug("ERROR: failed to reload user: %s", e) return valid
def smb4_setup(): statedir = "/var/db/samba4" smb4_mkdir("/var/run/samba") smb4_mkdir("/var/db/samba") smb4_mkdir("/var/run/samba4") smb4_mkdir("/var/log/samba4") os.chmod("/var/log/samba4", 0755) smb4_mkdir("/var/etc/private") os.chmod("/var/etc/private", 0700) smb4_unlink("/usr/local/etc/smb.conf") smb4_unlink("/usr/local/etc/smb4.conf") if hasattr(notifier, 'failover_status') and notifier().failover_status() == 'BACKUP': return systemdataset, volume, basename = notifier().system_dataset_settings() if not volume or not volume.is_decrypted(): if os.path.islink(statedir): smb4_unlink(statedir) smb4_mkdir(statedir) return systemdataset_path = notifier().system_dataset_path() or statedir basename_realpath = os.path.join(systemdataset_path, 'samba4') statedir_realpath = os.path.realpath(statedir) if os.path.islink(statedir) and not os.path.exists(statedir): smb4_unlink(statedir) if basename_realpath != statedir_realpath and os.path.exists(basename_realpath): smb4_unlink(statedir) if os.path.exists(statedir): olddir = "%s.%s" % (statedir, time.strftime("%Y%m%d%H%M%S")) try: os.rename(statedir, olddir) except Exception as e: print >> sys.stderr, "Unable to rename '%s' to '%s' (%s)" % ( statedir, olddir, e) sys.exit(1) try: os.symlink(basename_realpath, statedir) except Exception as e: print >> sys.stderr, "Unable to create symlink '%s' -> '%s' (%s)" % ( basename_realpath, statedir, e) sys.exit(1) if os.path.islink(statedir) and not os.path.exists(statedir_realpath): smb4_unlink(statedir) smb4_mkdir(statedir) smb4_mkdir("/var/db/samba4/private") os.chmod("/var/db/samba4/private", 0700)
def save(self, *args, **kwargs): if not self.int_vip: self.int_carp = None elif self.int_carp in (None, ""): used = [i[0] for i in Interfaces.objects.values_list("int_carp")] # More than 50 CARPs? I dont think so, but lets be safe # and avoid counting how many interfaces we have got. # FIXME: concurrency? Two CARP with same number for i in xrange(50): if i in (1, 2): continue if i in used: continue self.int_carp = i break if self.int_vip and not self.int_pass: self.int_pass = "".join( [random.SystemRandom().choice(string.ascii_letters + string.digits) for n in xrange(16)] ) super(Interfaces, self).save(*args, **kwargs) if ( self._original_int_options != self.int_options and re.search(r"mtu \d+", self._original_int_options) and self.int_options.find("mtu") == -1 ): notifier().interface_mtu(self.int_interface, "1500")
def index(request): sw_name = get_sw_name().lower() license, reason = utils.get_license() allow_update = True if hasattr(notifier, 'failover_status'): status = notifier().failover_status() if status not in ('MASTER', 'SINGLE'): allow_update = False context = { 'sw_name': sw_name, 'license': license, 'fc_enabled': utils.fc_enabled(), 'allow_update': allow_update, } for c in appPool.hook_view_context('support.index', request): context.update(c) if not notifier().is_freenas(): form = forms.ProductionForm() if request.method == 'POST': form = forms.ProductionForm(request.POST) if form.is_valid(): form.save() return JsonResp( request, message='Production status successfully updated.' ) context['production_form'] = form return render(request, 'support/home.html', context)
def zvol_create(request, parent): defaults = {'zvol_compression': 'inherit', } if request.method == 'POST': zvol_form = forms.ZVol_CreateForm(request.POST, vol_name=parent) if zvol_form.is_valid(): props = {} cleaned_data = zvol_form.cleaned_data zvol_volsize = cleaned_data.get('zvol_volsize') zvol_blocksize = cleaned_data.get("zvol_blocksize") zvol_name = "%s/%s" % (parent, cleaned_data.get('zvol_name')) zvol_comments = cleaned_data.get('zvol_comments') zvol_compression = cleaned_data.get('zvol_compression') props['compression'] = str(zvol_compression) if zvol_blocksize: props['volblocksize'] = zvol_blocksize errno, errmsg = notifier().create_zfs_vol( name=str(zvol_name), size=str(zvol_volsize), sparse=cleaned_data.get("zvol_sparse", False), props=props) notifier().zfs_set_option(name=str(zvol_name), item="org.freenas:description", value=zvol_comments) if errno == 0: return JsonResp( request, message=_("ZFS Volume successfully added.")) else: zvol_form.set_error(errmsg) else: zvol_form = forms.ZVol_CreateForm( initial=defaults, vol_name=parent) return render(request, 'storage/zvols.html', { 'form': zvol_form, 'volume_name': parent, })
def ipmi(request): if request.method == "POST": form = IPMIForm(request.POST) if form.is_valid(): rv = notifier().ipmi_set_lan(form.cleaned_data) if rv == 0: return JsonResp(request, message=_("IPMI successfully edited")) else: return JsonResp(request, error=True, message=_("IPMI failed")) else: try: ipmi = notifier().ipmi_get_lan() #TODO: There might be a better way to convert netmask to CIDR mask = ipmi.get("SubnetMask") num, cidr = struct.unpack('>I', socket.inet_aton(mask))[0], 0 while num > 0: num = num << 1 & 0xffffffff cidr += 1 initial = { 'dhcp': False if ipmi.get("IpAddressSource") == "Static Address" else True, 'ipv4address': ipmi.get("IpAddress"), 'ipv4gw': ipmi.get("DefaultGatewayIp"), 'ipv4netmaskbit': str(cidr), } except Exception: initial = {} form = IPMIForm(initial=initial) return render(request, 'network/ipmi.html', { 'form': form, })
def ipmi(request): if request.method == "POST": form = IPMIForm(request.POST) if form.is_valid(): rv = notifier().ipmi_set_lan(form.cleaned_data, channel=int(form.cleaned_data.get("channel"))) if rv == 0: return JsonResp(request, message=_("IPMI successfully edited")) else: return JsonResp(request, error=True, message=_("IPMI failed")) else: try: ipmi = notifier().ipmi_get_lan() # TODO: There might be a better way to convert netmask to CIDR mask = ipmi.get("SubnetMask") num, cidr = struct.unpack(">I", socket.inet_aton(mask))[0], 0 while num > 0: num = num << 1 & 0xFFFFFFFF cidr += 1 initial = { "dhcp": False if ipmi.get("IpAddressSource") == "Static Address" else True, "ipv4address": ipmi.get("IpAddress"), "ipv4gw": ipmi.get("DefaultGatewayIp"), "ipv4netmaskbit": str(cidr), "vlanid": ipmi.get("8021qVlanId") if ipmi.get("8021qVlanId") != "Disabled" else "", } except Exception: initial = {} form = IPMIForm(initial=initial) return render(request, "network/ipmi.html", {"form": form})
def run(self): if not Volume.objects.all().exists(): return None if ( hasattr(notifier, 'failover_status') and notifier().failover_status() == 'BACKUP' ): return None systemdataset, basename = notifier().system_dataset_settings() if not systemdataset.sys_pool: return [ Alert( Alert.WARN, "No system pool configured, please configure one in " "Settings->System Dataset->Pool" ), ] if os.path.exists('/var/db/samba4/.alert_cant_migrate'): return [ Alert( Alert.WARN, "Multiple legacy samba4 datasets detected. Auto-migration " "to /mnt/%s/.system/samba4 cannot be done. Please perform " "this step manually and then delete the now-obsolete " "samba4 datasets and /var/db/samba4/.alert_cant_migrate" % systemdataset.sys_pool ), ]
def disk_wipe(request, devname): form = forms.DiskWipeForm() if request.method == "POST": form = forms.DiskWipeForm(request.POST) if form.is_valid(): mounted = [] for geom in notifier().disk_get_consumers(devname): gname = geom.xpathEval("./name")[0].content dev = "/dev/%s" % (gname,) if dev not in mounted and is_mounted(device=dev): mounted.append(dev) for vol in models.Volume.objects.filter(vol_fstype="ZFS"): if devname in vol.get_disks(): mounted.append(vol.vol_name) if mounted: form._errors["__all__"] = form.error_class( ["Umount the following mount points before proceeding:" "<br /> %s" % ("<br /> ".join(mounted),)] ) else: notifier().disk_wipe(devname, form.cleaned_data["method"]) return JsonResp(request, message=_("Disk successfully wiped")) return JsonResp(request, form=form) return render(request, "storage/disk_wipe.html", {"devname": devname, "form": form})
def volume_unlock(request, object_id): volume = models.Volume.objects.get(id=object_id) if volume.vol_encrypt < 2: if request.method == "POST": notifier().start("geli") zimport = notifier().zfs_import( volume.vol_name, id=volume.vol_guid) if zimport and volume.is_decrypted: return JsonResp( request, message=_("Volume unlocked")) else: return JsonResp( request, message=_("Volume failed unlocked")) return render(request, "storage/unlock.html", { }) if request.method == "POST": form = forms.UnlockPassphraseForm(request.POST, request.FILES) if form.is_valid(): form.done(volume=volume) return JsonResp( request, message=_("Volume unlocked")) else: return JsonResp(request, form=form) else: form = forms.UnlockPassphraseForm() return render(request, "storage/unlock_passphrase.html", { 'volume': volume, 'form': form, })
def save(self, *args, **kwargs): if self.em_pass and not self._em_pass_encrypted: self.em_pass = notifier().pwenc_encrypt(self.em_pass) self._em_pass_encrypted = True return super(Email, self).save(*args, **kwargs)
def delete(self, *args, **kwargs): super(StaticRoute, self).delete(*args, **kwargs) notifier().staticroute_delete(self)
def delete(self): notifier().lagg_remove_port( self.lagg_interfacegroup.lagg_interface.int_interface, self.lagg_physnic, ) super(LAGGInterfaceMembers, self).delete()
def delete(self): vint = self.vlan_vint super(VLAN, self).delete() Interfaces.objects.filter(int_interface=vint).delete() notifier().iface_destroy(vint)
def delete(self): super(Alias, self).delete() notifier().stop("netif") notifier().start("network")
def save(self, *args, **kwargs): super(Interfaces, self).save(*args, **kwargs) if self._original_int_options != self.int_options and \ re.search(r'mtu \d+', self._original_int_options) and \ self.int_options.find("mtu") == -1: notifier().interface_mtu(self.int_interface, "1500")
def delete(self): for lagg in self.lagginterface_set.all(): lagg.delete() super(Interfaces, self).delete() notifier().stop("netif") notifier().start("network")
def get_datagrid_context(self, request): volume = models.Volume.objects.get(id=request.GET.get('id')) pool = notifier().zpool_parse(volume.vol_name) return {'pool': pool}
def get_media_status(self): return notifier().iface_media_status(self.int_interface)
def save(self, *args, **kwargs): if self.adv_sed_passwd and not self._adv_sed_passwd_encrypted: self.adv_sed_passwd = notifier().pwenc_encrypt(self.adv_sed_passwd) self._adv_sed_passwd_encrypted = True return super().save(*args, **kwargs)
def save(self, *args, **kwargs): if self.id and self._original_state.get("disk_togglesmart", None) != \ self.__dict__.get("disk_togglesmart"): notifier().restart("smartd") super(Disk, self).save(*args, **kwargs)
def delete(self): super(LAGGInterface, self).delete() VLAN.objects.filter( vlan_pint=self.lagg_interface.int_interface).delete() self.lagg_interface.delete() notifier().iface_destroy(self.lagg_interface.int_interface)
def get_serial(self): n = notifier() return n.serial_from_device( n.identifier_to_device(self.disk_identifier))
def delete(self): rv = super(ReplRemote, self).delete() notifier().reload("ssh") return rv
def delete(self, destroy=True, cascade=True): from freenasUI.system.models import SystemDataset try: systemdataset = SystemDataset.objects.filter( sys_pool=self.vol_name)[0] except IndexError: systemdataset = None with transaction.atomic(): try: svcs, reloads = Volume._delete( self, destroy=destroy, cascade=cascade, systemdataset=systemdataset, ) finally: if not os.path.isdir(self.vol_path): do_reload = False if do_reload: reloads = self.delete_attachments() if self.vol_fstype == 'ZFS': Task.objects.filter( task_filesystem=self.vol_name).delete() Replication.objects.filter( repl_filesystem=self.vol_name).delete() if do_reload: svcs = ('cifs', 'afp', 'nfs', 'iscsitarget') for (svc, dirty) in zip(svcs, reloads): if dirty: notifier().restart(svc) n = notifier() # The framework would cascade delete all database items # referencing this volume. super(Volume, self).delete() # If there's a system dataset on this pool, stop using it. if systemdataset: systemdataset.sys_pool = '' systemdataset.save() n.restart('system_datasets') # Refresh the fstab n.reload("disk") # For scrub tasks n.restart("cron") # Django signal could have been used instead # Do it this way to make sure its ran in the time we want self.post_delete() if self.vol_encryptkey: keyfile = self.get_geli_keyfile() if os.path.exists(keyfile): try: os.unlink(keyfile) except: log.warn("Unable to delete geli key file: %s" % keyfile) for (svc, dirty) in zip(svcs, reloads): if dirty: n.start(svc)
def identifier_to_device(self): """ Get the corresponding device name from disk_identifier field """ return notifier().identifier_to_device(self.disk_identifier)
def get_zvols(self): if self.vol_fstype == 'ZFS': return notifier().list_zfs_vols(self.vol_name)
def delete(self): super(Scrub, self).delete() try: notifier().restart("cron") except: pass
def set_password(self, passwd): self.password = notifier().pwenc_encrypt(passwd)
def _delete(self, destroy=True, cascade=True, systemdataset=None): """ Some places reference a path which will not cascade delete We need to manually find all paths within this volume mount point """ from freenasUI.services.models import iSCSITargetExtent # If we are using this volume to store collectd data # the service needs to be restarted if systemdataset and systemdataset.sys_rrd_usedataset: reload_collectd = True else: reload_collectd = False # TODO: This is ugly. svcs = ('cifs', 'afp', 'nfs', 'iscsitarget', 'jails', 'collectd') reloads = (False, False, False, False, False, reload_collectd) n = notifier() if cascade: reloads = map(sum, zip(reloads, self.delete_attachments())) zvols = n.list_zfs_vols(self.vol_name) for zvol in zvols: qs = iSCSITargetExtent.objects.filter( iscsi_target_extent_path='zvol/' + zvol, iscsi_target_extent_type='ZVOL') if qs.exists(): if destroy: notifier().destroy_zfs_vol(zvol) qs.delete() reloads = map( sum, zip(reloads, (False, False, False, True, False, reload_collectd))) else: attachments = self.has_attachments() reloads = map( sum, zip(reloads, [len(attachments[svc]) for svc in svcs])) # Delete scheduled snapshots for this volume Task.objects.filter( models.Q(task_filesystem=self.vol_name) | models.Q(task_filesystem__startswith="%s/" % self.vol_name)).delete() for (svc, dirty) in zip(svcs, reloads): if dirty: n.stop(svc) n.detach_volume_swaps(self) # Ghosts volumes, does not exists anymore but is in database ghost = False try: status = n.get_volume_status(self.vol_name, self.vol_fstype) ghost = status == 'UNKNOWN' except: ghost = True if ghost: pass elif destroy: n.destroy("volume", self) else: n.volume_detach(self) return (svcs, reloads)
def run(self): _n = notifier() # Skip for standby node if ( not _n.is_freenas() and _n.failover_licensed() and _n.failover_status() != 'MASTER' ): return [] obj = None if os.path.exists(self.ALERT_FILE): with open(self.ALERT_FILE, 'rb') as f: try: obj = pickle.load(f) except Exception: log.debug('Failed to load alert state file', exc_info=True) if not obj: results = {} else: results = obj['results'] rvs = [] node = alert_node() dismisseds = [a.message_id for a in mAlert.objects.filter(node=node)] ids = [] for instance in self.mods: try: if instance.name in results: if instance.fire_once: continue if results.get(instance.name).get( 'lastrun' ) > time.time() - (instance.interval * 60): if results.get(instance.name).get('alerts'): for alert in results.get(instance.name).get('alerts'): ids.append(alert.getId()) rvs.append(alert) continue rv = instance.run() if rv: alerts = [_f for _f in rv if _f] for alert in alerts: ids.append(alert.getId()) if instance.name in results: found = False for i in (results[instance.name]['alerts'] or []): if alert == i: found = i break if found is not False: alert.setTimestamp(found.getTimestamp()) if alert.getId() in dismisseds: alert.setDismiss(True) rvs.extend(alerts) results[instance.name] = { 'lastrun': int(time.time()), 'alerts': rv, } except Exception as e: log.debug("Alert module '%s' failed: %s", instance, e, exc_info=True) log.error("Alert module '%s' failed: %s", instance, e) qs = mAlert.objects.exclude(message_id__in=ids, node=node) if qs.exists(): qs.delete() crits = sorted([a for a in rvs if a and a.getLevel() == Alert.CRIT]) if obj and crits: lastcrits = sorted([ a for a in obj['alerts'] if a and a.getLevel() == Alert.CRIT ]) if crits == lastcrits: crits = [] if crits: self.email(crits) if not notifier().is_freenas(): # Automatically create ticket for new alerts tagged as possible # hardware problem hardware = sorted([a for a in rvs if a and a.getHardware()]) if obj and hardware: lasthardware = sorted([ a for a in obj['alerts'] if a and a.getHardware() ]) if hardware == lasthardware: hardware = [] try: support = Support.objects.order_by('-id')[0] except IndexError: support = Support.objects.create() if hardware and support.is_enabled(): self.ticket(support, hardware) with open(self.ALERT_FILE, 'wb') as f: pickle.dump({ 'last': time.time(), 'alerts': rvs, 'results': results, }, f) return rvs
def get_password(self): return notifier().pwenc_decrypt(self.password)
def delete(self, *args, **kwargs): super(CIFS_Share, self).delete(*args, **kwargs) notifier().reload("cifs")
def delete(self, *args, **kwargs): super(Task, self).delete(*args, **kwargs) try: notifier().restart("cron") except: pass
def delete(self): super(SMARTTest, self).delete() try: notifier().restart("smartd") except: pass
def generate(self, request=None): """ Tree Menu Auto Generate Every app listed at INSTALLED_APPS is scanned 1st - app_name.forms is imported. All its objects/classes are scanned looking for ModelForm classes 2nd - app_name.nav is imported. TreeNode classes are scanned for hard-coded menu entries or overwriting 3rd - app_name.models is imported. models.Model classes are scanned, if a related ModelForm is found several entries are Added to the Menu - Objects - Add (Model) - View (Model) """ self._generated = True self._navs.clear() tree_roots.clear() childs_of = [] if hasattr(notifier, 'failover_status'): fstatus = notifier().failover_status() else: fstatus = 'SINGLE' for app in settings.INSTALLED_APPS: # If the app is listed at settings.BLACKLIST_NAV, skip it! if app in getattr(settings, 'BLACKLIST_NAV', []): continue try: self._generate_app(app, request, tree_roots, childs_of, fstatus) except Exception as e: log.error( "Failed to generate navtree for app %s: %s", app, e) log_traceback(log=log) nav = TreeRoot( 'documentation', name=_('Guide'), icon=get_sw_name() + 'GuideIcon', action='opendocumentation', order=970, ) tree_roots.register(nav) nav = TreeRoot( 'display', name=_('Display System Processes'), action='displayprocs', icon='TopIcon', order=985, ) tree_roots.register(nav) if fstatus in ('MASTER', 'SINGLE'): nav = TreeRoot( 'initialwizard', name=_('Wizard'), icon='WizardIcon', action='wizard', order=980, ) tree_roots.register(nav) nav = TreeRoot( 'shell', name=_('Shell'), icon='ShellIcon', action='shell', order=990, ) tree_roots.register(nav) nav = TreeRoot( 'logout', name=_('Log Out'), icon='LogOutIcon', action='logout', order=995, ) tree_roots.register(nav) nav = TreeRoot( 'reboot', name=_('Reboot'), action='reboot', icon='RebootIcon', type='scary_dialog', view='system_reboot_dialog', order=999, ) tree_roots.register(nav) nav = TreeRoot( 'shutdown', name=_('Shutdown'), icon='ShutdownIcon', type='scary_dialog', view='system_shutdown_dialog', order=1000, ) tree_roots.register(nav) for opt, model in childs_of: for nav in tree_roots: exists = nav.find_gname(model._admin.menu_child_of) if exists is not False: exists.append_child(opt) break if exists is False: log.debug( "Could not find %s to attach %r", model._admin.menu_child_of, opt) self.replace_navs(tree_roots)
def __init__(self, nolagg=False, novlan=False, exclude_configured=True, include_vlan_parent=False, with_alias=False, nobridge=True, noepair=True): pipe = popen("/sbin/ifconfig -l") self._NIClist = pipe.read().strip().split(' ') # Remove lo0 from choices self._NIClist = filter( lambda y: y not in ('lo0', 'pfsync0', 'pflog0', 'ipfw0'), self._NIClist) from freenasUI.middleware.notifier import notifier # Remove internal interfaces for failover if ( hasattr(notifier, 'failover_status') and notifier().failover_licensed() ): for iface in notifier().failover_internal_interfaces(): if iface in self._NIClist: self._NIClist.remove(iface) conn = sqlite3.connect(freenasUI.settings.DATABASES['default']['NAME']) c = conn.cursor() # Remove interfaces that are parent devices of a lagg # Database queries are wrapped in try/except as this is run # before the database is created during syncdb and the queries # will fail try: c.execute("SELECT lagg_physnic FROM network_lagginterfacemembers") except sqlite3.OperationalError: pass else: for interface in c: if interface[0] in self._NIClist: self._NIClist.remove(interface[0]) if nolagg: # vlan devices are not valid parents of laggs self._NIClist = [nic for nic in self._NIClist if not nic.startswith("lagg")] self._NIClist = [nic for nic in self._NIClist if not nic.startswith("vlan")] if novlan: self._NIClist = [nic for nic in self._NIClist if not nic.startswith("vlan")] else: # This removes devices that are parents of vlans. We don't # remove these devices if we are adding a vlan since multiple # vlan devices may share the same parent. # The exception to this case is when we are getting the NIC # list for the GUI, in which case we want the vlan parents # as they may have a valid config on them. if not include_vlan_parent: try: c.execute("SELECT vlan_pint FROM network_vlan") except sqlite3.OperationalError: pass else: for interface in c: if interface[0] in self._NIClist: self._NIClist.remove(interface[0]) if with_alias: try: sql = """ SELECT int_interface FROM network_interfaces as ni INNER JOIN network_alias as na ON na.alias_interface_id = ni.id """ c.execute(sql) except sqlite3.OperationalError: pass else: aliased_nics = [x[0] for x in c] niclist = copy.deepcopy(self._NIClist) for interface in niclist: if interface not in aliased_nics: self._NIClist.remove(interface) if exclude_configured: try: # Exclude any configured interfaces c.execute("SELECT int_interface FROM network_interfaces " "WHERE int_ipv4address != '' OR int_dhcp != '0' " "OR int_ipv6auto != '0' OR int_ipv6address != ''") except sqlite3.OperationalError: pass else: for interface in c: if interface[0] in self._NIClist: self._NIClist.remove(interface[0]) if nobridge: self._NIClist = [nic for nic in self._NIClist if not nic.startswith("bridge")] if noepair: niclist = copy.deepcopy(self._NIClist) for nic in niclist: if nic.startswith('epair'): self._NIClist.remove(nic) self.max_choices = len(self._NIClist)
def delete(self, *args, **kwargs): super(AFP_Share, self).delete(*args, **kwargs) notifier().reload("afp")
def dataset_edit(request, dataset_name): if request.method == 'POST': dataset_form = forms.ZFSDataset( request.POST, fs=dataset_name, create=False ) if dataset_form.is_valid(): if dataset_form.cleaned_data["dataset_quota"] == "0": dataset_form.cleaned_data["dataset_quota"] = "none" if dataset_form.cleaned_data["dataset_refquota"] == "0": dataset_form.cleaned_data["dataset_refquota"] = "none" error = False errors = {} for attr in ( 'compression', 'atime', 'dedup', 'reservation', 'refreservation', 'quota', 'refquota', 'share_type' ): formfield = 'dataset_%s' % attr val = dataset_form.cleaned_data[formfield] if val == "inherit": success, err = notifier().zfs_inherit_option( dataset_name, attr) else: if attr == "share_type": notifier().change_dataset_share_type( dataset_name, val) else: success, err = notifier().zfs_set_option( dataset_name, attr, val) error |= not success if not success: errors[formfield] = err if not error: return JsonResp( request, message=_("Dataset successfully edited.")) else: for field, err in errors.items(): dataset_form._errors[field] = dataset_form.error_class([ err, ]) return JsonResp(request, form=dataset_form) else: return JsonResp(request, form=dataset_form) else: dataset_form = forms.ZFSDataset(fs=dataset_name, create=False) return render(request, 'storage/dataset_edit.html', { 'dataset_name': dataset_name, 'form': dataset_form })
def new_default_plugin_jail(basename): jc = JailsConfiguration.objects.order_by("-id")[0] logfile = "%s/warden.log" % jc.jc_path if not jc.jc_ipv4_dhcp or not jc.jc_ipv6_autoconf: addrs = guess_addresses() if not jc.jc_ipv4_dhcp: if not addrs['high_ipv4']: raise MiddlewareError(_("Unable to determine IPv4 for plugin")) if (jc.jc_ipv6_autoconf or jc.jc_ipv6_network): if not jc.jc_ipv6_autoconf: if not addrs['high_ipv6']: raise MiddlewareError(_("Unable to determine IPv6 for plugin")) jailname = None for i in range(1, 1000): tmpname = "%s_%d" % (basename, i) jails = Jails.objects.filter(jail_host=tmpname) if not jails: jailname = tmpname break w = warden.Warden() template_create_args = {} template = JailTemplate.objects.get(jt_name='pluginjail') template_create_args['nick'] = template.jt_name template_create_args['tar'] = template.jt_url template_create_args['flags'] = warden.WARDEN_TEMPLATE_FLAGS_CREATE | \ warden.WARDEN_TEMPLATE_CREATE_FLAGS_NICK | \ warden.WARDEN_TEMPLATE_CREATE_FLAGS_TAR if template.jt_mtree: template_create_args['mtree'] = template.jt_mtree template_create_args['flags'] = template_create_args['flags'] | \ warden.WARDEN_TEMPLATE_CREATE_FLAGS_MTREE template = None template_list_flags = {} template_list_flags['flags'] = warden.WARDEN_TEMPLATE_FLAGS_LIST templates = w.template(**template_list_flags) for t in templates: if t['nick'] == template_create_args['nick']: template = t break os.environ['EXTRACT_TARBALL_STATUSFILE'] = warden.WARDEN_EXTRACT_STATUS_FILE createfile = "/var/tmp/.templatecreate" if not template: # If for some reason warden does not list the template but the path # exists, we shall try to nuke it template_path = '{}/.warden-template-pluginjail'.format(jc.jc_path) if os.path.exists(template_path): try: notifier().destroy_zfs_dataset(template_path.replace('/mnt/', '')) except: pass try: shutil.rmtree(template_path) except OSError: pass try: cf = open(createfile, "a+") cf.close() w.template(**template_create_args) except Exception as e: if os.path.exists(createfile): os.unlink(createfile) raise MiddlewareError(e) template_list_flags = {} template_list_flags['flags'] = warden.WARDEN_TEMPLATE_FLAGS_LIST templates = w.template(**template_list_flags) for t in templates: if t['nick'] == template_create_args['nick']: template = t break if not template: raise MiddlewareError(_('Unable to find template!')) try: high_ipv4 = "DHCP" if not jc.jc_ipv4_dhcp: high_ipv4 = addrs['high_ipv4'] high_ipv6 = "AUTOCONF" if not jc.jc_ipv6_autoconf: high_ipv6 = addrs['high_ipv6'] create_args = { 'jail': jailname, 'ipv4': high_ipv4, 'ipv6': high_ipv6, 'flags': ( warden.WARDEN_CREATE_FLAGS_LOGFILE | warden.WARDEN_CREATE_FLAGS_TEMPLATE | warden.WARDEN_CREATE_FLAGS_VANILLA | warden.WARDEN_CREATE_FLAGS_SYSLOG | warden.WARDEN_CREATE_FLAGS_IPV4 | warden.WARDEN_CREATE_FLAGS_IPV6 ), 'template': 'pluginjail', 'logfile': logfile } w.create(**create_args) except Exception as e: raise MiddlewareError(_("Failed to install plugin: %s") % e) jaildir = "%s/%s" % (jc.jc_path, jailname) with open('%s/.plugins/PLUGIN' % jaildir, 'w') as f: f.close() w.auto(jail=jailname) w.set( jail=jailname, flags=( warden.WARDEN_SET_FLAGS_VNET_ENABLE ) ) w.start(jail=jailname) obj = Jails.objects.get(jail_host=jailname) add_media_user_and_group(obj.jail_path) return obj