def __init__(self, domain_sid, invocationid=None, schemadn=None, files=None, override_prefixmap=None, additional_prefixmap=None): from samba.provision import setup_path """Load schema for the SamDB from the AD schema files and samba4_schema.ldif :param samdb: Load a schema into a SamDB. :param schemadn: DN of the schema Returns the schema data loaded, to avoid double-parsing when then needing to add it to the db """ self.schemadn = schemadn # We need to have the am_rodc=False just to keep some warnings quiet - # this isn't a real SAM, so it's meaningless. self.ldb = SamDB(global_schema=False, am_rodc=False) if invocationid is not None: self.ldb.set_invocation_id(invocationid) self.schema_data = read_ms_schema( # setup_path('ad-schema/MS-AD_Schema_2K8_R2_Attributes.txt'), # setup_path('ad-schema/MS-AD_Schema_2K8_R2_Classes.txt'), # setup_path('ad-schema/Attributes_for_AD_DS__Windows_Server_2012.ldf'), # setup_path('ad-schema/Classes_for_AD_DS__Windows_Server_2012.ldf')) setup_path('ad-schema/AD_DS_Attributes__Windows_Server_2012_R2.ldf'), setup_path('ad-schema/AD_DS_Classes__Windows_Server_2012_R2.ldf')) if files is not None: for file in files: self.schema_data += open(file, 'r').read() self.schema_data = substitute_var(self.schema_data, {"SCHEMADN": schemadn}) check_all_substituted(self.schema_data) self.schema_dn_modify = read_and_sub_file( setup_path("provision_schema_basedn_modify.ldif"), {"SCHEMADN": schemadn}) descr = b64encode(get_schema_descriptor(domain_sid)) self.schema_dn_add = read_and_sub_file( setup_path("provision_schema_basedn.ldif"), {"SCHEMADN": schemadn, "DESCRIPTOR": descr}) if override_prefixmap is not None: self.prefixmap_data = override_prefixmap else: self.prefixmap_data = open(setup_path("prefixMap.txt"), 'r').read() if additional_prefixmap is not None: for map in additional_prefixmap: self.prefixmap_data += "%s\n" % map self.prefixmap_data = b64encode(self.prefixmap_data) # We don't actually add this ldif, just parse it prefixmap_ldif = "dn: %s\nprefixMap:: %s\n\n" % (self.schemadn, self.prefixmap_data) self.set_from_ldif(prefixmap_ldif, self.schema_data, self.schemadn)
def __init__(self, setup_path, domain_sid, schemadn=None, serverdn=None, files=None, prefixmap=None): """Load schema for the SamDB from the AD schema files and samba4_schema.ldif :param samdb: Load a schema into a SamDB. :param setup_path: Setup path function. :param schemadn: DN of the schema :param serverdn: DN of the server Returns the schema data loaded, to avoid double-parsing when then needing to add it to the db """ self.schemadn = schemadn self.ldb = Ldb() self.schema_data = read_ms_schema( setup_path('ad-schema/MS-AD_Schema_2K8_Attributes.txt'), setup_path('ad-schema/MS-AD_Schema_2K8_Classes.txt')) if files is not None: for file in files: self.schema_data += open(file, 'r').read() self.schema_data = substitute_var(self.schema_data, {"SCHEMADN": schemadn}) check_all_substituted(self.schema_data) self.schema_dn_modify = read_and_sub_file( setup_path("provision_schema_basedn_modify.ldif"), { "SCHEMADN": schemadn, "SERVERDN": serverdn, }) descr = b64encode(get_schema_descriptor(domain_sid)) self.schema_dn_add = read_and_sub_file( setup_path("provision_schema_basedn.ldif"), { "SCHEMADN": schemadn, "DESCRIPTOR": descr }) self.prefixmap_data = open(setup_path("prefixMap.txt"), 'r').read() if prefixmap is not None: for map in prefixmap: self.prefixmap_data += "%s\n" % map self.prefixmap_data = b64encode(self.prefixmap_data) # We don't actually add this ldif, just parse it prefixmap_ldif = "dn: cn=schema\nprefixMap:: %s\n\n" % self.prefixmap_data self.ldb.set_schema_from_ldif(prefixmap_ldif, self.schema_data)
def __init__(self, setup_path, domain_sid, schemadn=None, serverdn=None, files=None, prefixmap=None): """Load schema for the SamDB from the AD schema files and samba4_schema.ldif :param samdb: Load a schema into a SamDB. :param setup_path: Setup path function. :param schemadn: DN of the schema :param serverdn: DN of the server Returns the schema data loaded, to avoid double-parsing when then needing to add it to the db """ self.schemadn = schemadn self.ldb = Ldb() self.schema_data = read_ms_schema(setup_path('ad-schema/MS-AD_Schema_2K8_R2_Attributes.txt'), setup_path('ad-schema/MS-AD_Schema_2K8_R2_Classes.txt')) if files is not None: for file in files: self.schema_data += open(file, 'r').read() self.schema_data = substitute_var(self.schema_data, {"SCHEMADN": schemadn}) check_all_substituted(self.schema_data) self.schema_dn_modify = read_and_sub_file(setup_path("provision_schema_basedn_modify.ldif"), {"SCHEMADN": schemadn, "SERVERDN": serverdn, }) descr = b64encode(get_schema_descriptor(domain_sid)) self.schema_dn_add = read_and_sub_file(setup_path("provision_schema_basedn.ldif"), {"SCHEMADN": schemadn, "DESCRIPTOR": descr }) self.prefixmap_data = open(setup_path("prefixMap.txt"), 'r').read() if prefixmap is not None: for map in prefixmap: self.prefixmap_data += "%s\n" % map self.prefixmap_data = b64encode(self.prefixmap_data) # We don't actually add this ldif, just parse it prefixmap_ldif = "dn: cn=schema\nprefixMap:: %s\n\n" % self.prefixmap_data self.ldb.set_schema_from_ldif(prefixmap_ldif, self.schema_data)
def provision_schema(sam_db, setup_path, names, reporter, ldif, msg, modify_mode=False): """Provision/modify schema using LDIF specified file :param sam_db: sam db where to provision the schema :param setup_path: Path to the setup directory. :param names: provision names object. :param reporter: A progress reporter instance (subclass of AbstractProgressReporter) :param ldif: path to the LDIF file :param msg: reporter message :param modify_mode: whether entries are added or modified """ sam_db.transaction_start() try: reporter.reportNextStep(msg) ldif_params = { "FIRSTORG": names.firstorg, "FIRSTORGDN": names.firstorgdn, "FIRSTOU": names.firstou, "CONFIGDN": names.configdn, "SCHEMADN": names.schemadn, "DOMAINDN": names.domaindn, "DOMAIN": names.domain, "DNSDOMAIN": names.dnsdomain, "NETBIOSNAME": names.netbiosname, "HOSTNAME": names.hostname } if modify_mode: setup_modify_ldif(sam_db, setup_path(ldif), ldif_params) else: full_path = setup_path(ldif) ldif_data = read_and_sub_file(full_path, ldif_params) # schemaIDGUID can raise error if not match what is expected by schema master, if not present it will be automatically filled by the schema master ldif_data = re.sub("^schemaIDGUID:", '#schemaIDGUID:', ldif_data, flags=re.M) # we add elements one by one only to control better the exact position of the error ldif_elements = re.split("\n\n+", ldif_data) elements_to_add = [] for element in ldif_elements: if not element: continue # this match is to assure we have a legit element match = re.search('^\s*dn:\s+(.*)$', element, flags=re.M) if match: elements_to_add.append(element) if elements_to_add: for el in elements_to_add: try: sam_db.add_ldif(el, ['relax:0']) except Exception as ex: print 'Error: "' + str(ex) + '" when adding element:\n' + el + '\n' raise else: raise Exception('No elements to add found in ' + full_path) except: sam_db.transaction_cancel() raise sam_db.transaction_commit() force_schemas_update(sam_db, setup_path)
def setup_modify_ldif(ldb, ldif_path, subst_vars=None,controls=["relax:0"]): """Modify a ldb in the private dir. :param ldb: LDB object. :param ldif_path: LDIF file path. :param subst_vars: Optional dictionary with substitution variables. """ data = read_and_sub_file(ldif_path, subst_vars) ldb.modify_ldif(data, controls)
def setup_modify_ldif(ldb, ldif_path, subst_vars=None, controls=["relax:0"]): """Modify a ldb in the private dir. :param ldb: LDB object. :param ldif_path: LDIF file path. :param subst_vars: Optional dictionary with substitution variables. """ data = read_and_sub_file(ldif_path, subst_vars) ldb.modify_ldif(data, controls)
def setup_add_ldif(ldb, ldif_path, subst_vars=None,controls=["relax:0"]): """Setup a ldb in the private dir. :param ldb: LDB file to import data into :param ldif_path: Path of the LDIF file to load :param subst_vars: Optional variables to subsitute in LDIF. :param nocontrols: Optional list of controls, can be None for no controls """ assert isinstance(ldif_path, str) data = read_and_sub_file(ldif_path, subst_vars) ldb.add_ldif(data, controls)
def setup_add_ldif(ldb, ldif_path, subst_vars=None, controls=["relax:0"]): """Setup a ldb in the private dir. :param ldb: LDB file to import data into :param ldif_path: Path of the LDIF file to load :param subst_vars: Optional variables to subsitute in LDIF. :param nocontrols: Optional list of controls, can be None for no controls """ assert isinstance(ldif_path, str) data = read_and_sub_file(ldif_path, subst_vars) ldb.add_ldif(data, controls)
def ldif_to_samdb(dburl, lp, ldif_file, forced_local_dsa=None): """Routine to import all objects and attributes that are relevent to the KCC algorithms from a previously exported LDIF file. The point of this function is to allow a programmer/debugger to import an LDIF file with non-security relevent information that was previously extracted from a DC database. The LDIF file is used to create a temporary abbreviated database. The KCC algorithm can then run against this abbreviated database for debug or test verification that the topology generated is computationally the same between different OSes and algorithms. :param dburl: path to the temporary abbreviated db to create :param ldif_file: path to the ldif file to import """ if os.path.exists(dburl): raise LdifError("Specify a database (%s) that doesn't already exist." % dburl) # Use ["modules:"] as we are attempting to build a sam # database as opposed to start it here. tmpdb = Ldb(url=dburl, session_info=system_session(), lp=lp, options=["modules:"]) tmpdb.transaction_start() try: data = read_and_sub_file(ldif_file, None) tmpdb.add_ldif(data, None) if forced_local_dsa: tmpdb.modify_ldif("""dn: @ROOTDSE changetype: modify replace: dsServiceName dsServiceName: CN=NTDS Settings,%s """ % forced_local_dsa) tmpdb.add_ldif("""dn: @MODULES @LIST: rootdse,extended_dn_in,extended_dn_out_ldb,objectguid - """) except Exception as estr: tmpdb.transaction_cancel() raise LdifError("Failed to import %s: %s" % (ldif_file, estr)) tmpdb.transaction_commit() # We have an abbreviated list of options here because we have built # an abbreviated database. We use the rootdse and extended-dn # modules only during this re-open samdb = SamDB(url=dburl, session_info=system_session(), lp=lp) return samdb
def provision(self): from samba.provision import ProvisioningError, setup_path # Wipe the directories so we can start shutil.rmtree(os.path.join(self.ldapdir, "db"), True) # Allow the test scripts to turn off fsync() for OpenLDAP as for TDB # and LDB nosync_config = "" if self.nosync: nosync_config = "dbnosync" lnkattr = self.schema.linked_attributes() refint_attributes = "" memberof_config = "# Generated from Samba4 schema\n" for att in lnkattr.keys(): if lnkattr[att] is not None: refint_attributes = refint_attributes + " " + att memberof_config += read_and_sub_file( setup_path("memberof.conf"), { "MEMBER_ATTR": att, "MEMBEROF_ATTR": lnkattr[att] }) refint_config = read_and_sub_file(setup_path("refint.conf"), {"LINK_ATTRS": refint_attributes}) attrs = ["linkID", "lDAPDisplayName"] res = self.schema.ldb.search( expression="(&(objectclass=attributeSchema)" "(searchFlags:1.2.840.113556.1.4.803:=1))", base=self.names.schemadn, scope=SCOPE_ONELEVEL, attrs=attrs) index_config = "" for i in range(0, len(res)): index_attr = res[i]["lDAPDisplayName"][0] if index_attr == "objectGUID": index_attr = "entryUUID" index_config += "index " + index_attr + " eq\n" # generate serverids, ldap-urls and syncrepl-blocks for mmr hosts mmr_on_config = "" mmr_replicator_acl = "" mmr_serverids_config = "" mmr_syncrepl_schema_config = "" mmr_syncrepl_config_config = "" mmr_syncrepl_domaindns_config = "" mmr_syncrepl_forestdns_config = "" mmr_syncrepl_user_config = "" mmr_pass = "" if self.ol_mmr_urls is not None: # For now, make these equal mmr_pass = self.ldapadminpass url_list = filter(None, self.ol_mmr_urls.split(',')) for url in url_list: self.logger.info("Using LDAP-URL: " + url) if len(url_list) == 1: raise ProvisioningError("At least 2 LDAP-URLs needed for MMR!") mmr_on_config = "MirrorMode On" mmr_replicator_acl = " by dn=cn=replicator,cn=samba read" serverid = 0 for url in url_list: serverid = serverid + 1 mmr_serverids_config += read_and_sub_file( setup_path("mmr_serverids.conf"), { "SERVERID": str(serverid), "LDAPSERVER": url }) rid = serverid * 10 rid = rid + 1 mmr_syncrepl_schema_config += read_and_sub_file( setup_path("mmr_syncrepl.conf"), { "RID": str(rid), "MMRDN": self.names.schemadn, "LDAPSERVER": url, "MMR_PASSWORD": mmr_pass }) rid = rid + 1 mmr_syncrepl_config_config += read_and_sub_file( setup_path("mmr_syncrepl.conf"), { "RID": str(rid), "MMRDN": self.names.configdn, "LDAPSERVER": url, "MMR_PASSWORD": mmr_pass }) rid = rid + 1 mmr_syncrepl_domaindns_config += read_and_sub_file( setup_path("mmr_syncrepl.conf"), { "RID": str(rid), "MMRDN": "dc=DomainDNSZones," + self.names.domaindn, "LDAPSERVER": url, "MMR_PASSWORD": mmr_pass }) rid = rid + 1 mmr_syncrepl_forestdns_config += read_and_sub_file( setup_path("mmr_syncrepl.conf"), { "RID": str(rid), "MMRDN": "dc=ForestDNSZones," + self.names.domaindn, "LDAPSERVER": url, "MMR_PASSWORD": mmr_pass }) rid = rid + 1 mmr_syncrepl_user_config += read_and_sub_file( setup_path("mmr_syncrepl.conf"), { "RID": str(rid), "MMRDN": self.names.domaindn, "LDAPSERVER": url, "MMR_PASSWORD": mmr_pass }) # OpenLDAP cn=config initialisation olc_syncrepl_config = "" olc_mmr_config = "" # if mmr = yes, generate cn=config-replication directives # and olc_seed.lif for the other mmr-servers if self.ol_mmr_urls is not None: serverid = 0 olc_serverids_config = "" olc_syncrepl_seed_config = "" olc_mmr_config += read_and_sub_file(setup_path("olc_mmr.conf"), {}) rid = 500 for url in url_list: serverid = serverid + 1 olc_serverids_config += read_and_sub_file( setup_path("olc_serverid.conf"), { "SERVERID": str(serverid), "LDAPSERVER": url }) rid = rid + 1 olc_syncrepl_config += read_and_sub_file( setup_path("olc_syncrepl.conf"), { "RID": str(rid), "LDAPSERVER": url, "MMR_PASSWORD": mmr_pass }) olc_syncrepl_seed_config += read_and_sub_file( setup_path("olc_syncrepl_seed.conf"), { "RID": str(rid), "LDAPSERVER": url }) setup_file( setup_path("olc_seed.ldif"), self.olcseedldif, { "OLC_SERVER_ID_CONF": olc_serverids_config, "OLC_PW": self.ldapadminpass, "OLC_SYNCREPL_CONF": olc_syncrepl_seed_config }) # end olc setup_file( setup_path("slapd.conf"), self.slapdconf, { "DNSDOMAIN": self.names.dnsdomain, "LDAPDIR": self.ldapdir, "DOMAINDN": self.names.domaindn, "CONFIGDN": self.names.configdn, "SCHEMADN": self.names.schemadn, "MEMBEROF_CONFIG": memberof_config, "MIRRORMODE": mmr_on_config, "REPLICATOR_ACL": mmr_replicator_acl, "MMR_SERVERIDS_CONFIG": mmr_serverids_config, "MMR_SYNCREPL_SCHEMA_CONFIG": mmr_syncrepl_schema_config, "MMR_SYNCREPL_CONFIG_CONFIG": mmr_syncrepl_config_config, "MMR_SYNCREPL_DOMAINDNS_CONFIG": mmr_syncrepl_domaindns_config, "MMR_SYNCREPL_FORESTDNS_CONFIG": mmr_syncrepl_forestdns_config, "MMR_SYNCREPL_USER_CONFIG": mmr_syncrepl_user_config, "OLC_SYNCREPL_CONFIG": olc_syncrepl_config, "OLC_MMR_CONFIG": olc_mmr_config, "REFINT_CONFIG": refint_config, "INDEX_CONFIG": index_config, "ADMIN_UID": str(os.getuid()), "NOSYNC": nosync_config, }) self.setup_db_dir(os.path.join(self.ldapdir, "db", "forestdns")) self.setup_db_dir(os.path.join(self.ldapdir, "db", "domaindns")) self.setup_db_dir(os.path.join(self.ldapdir, "db", "user")) self.setup_db_dir(os.path.join(self.ldapdir, "db", "config")) self.setup_db_dir(os.path.join(self.ldapdir, "db", "schema")) self.setup_db_dir(os.path.join(self.ldapdir, "db", "samba")) if self.ol_mmr_urls is not None: mmr = "" else: mmr = "#" cn_samba = read_and_sub_file( setup_path("cn=samba.ldif"), { "LDAPADMINPASS": self.ldapadminpass, "MMR_PASSWORD": mmr_pass, "MMR": mmr }) mapping = "schema-map-openldap-2.3" backend_schema = "backend-schema.schema" f = open(setup_path(mapping), 'r') try: backend_schema_data = self.schema.convert_to_openldap( "openldap", f.read()) finally: f.close() assert backend_schema_data is not None f = open(os.path.join(self.ldapdir, backend_schema), 'w') try: f.write(backend_schema_data) finally: f.close() # now we generate the needed strings to start slapd automatically, if self.ldap_backend_extra_port is not None: # When we use MMR, we can't use 0.0.0.0 as it uses the name # specified there as part of it's clue as to it's own name, # and not to replicate to itself if self.ol_mmr_urls is None: server_port_string = "ldap://0.0.0.0:%d" % self.ldap_backend_extra_port else: server_port_string = "ldap://%s.%s:%d" ( self.names.hostname, self.names.dnsdomain, self.ldap_backend_extra_port) else: server_port_string = "" # Prepare the 'result' information - the commands to return in # particular self.slapd_provision_command = [ self.slapd_path, "-F" + self.olcdir, "-h" ] # copy this command so we have two version, one with -d0 and only # ldapi (or the forced ldap_uri), and one with all the listen commands self.slapd_command = list(self.slapd_provision_command) self.slapd_provision_command.extend([self.ldap_uri, "-d0"]) uris = self.ldap_uri if server_port_string is not "": uris = uris + " " + server_port_string self.slapd_command.append(uris) # Wipe the old sam.ldb databases away shutil.rmtree(self.olcdir, True) os.makedirs(self.olcdir, 0o770) # If we were just looking for crashes up to this point, it's a # good time to exit before we realise we don't have OpenLDAP on # this system if self.ldap_dryrun_mode: sys.exit(0) slapd_cmd = [ self.slapd_path, "-Ttest", "-n", "0", "-f", self.slapdconf, "-F", self.olcdir ] retcode = subprocess.call(slapd_cmd, close_fds=True, shell=False) if retcode != 0: self.logger.error( "conversion from slapd.conf to cn=config failed slapd started with: %s" % "\'" + "\' \'".join(slapd_cmd) + "\'") raise ProvisioningError( "conversion from slapd.conf to cn=config failed") if not os.path.exists(os.path.join(self.olcdir, "cn=config.ldif")): raise ProvisioningError( "conversion from slapd.conf to cn=config failed") # Don't confuse the admin by leaving the slapd.conf around os.remove(self.slapdconf) cn_samba_cmd = [ self.slapd_path, "-Tadd", "-b", "cn=samba", "-F", self.olcdir ] p = subprocess.Popen(cn_samba_cmd, stdin=subprocess.PIPE, shell=False) p.stdin.write(cn_samba) p.communicate()
def provision(self): from samba.provision import ProvisioningError, setup_path if self.ldap_backend_extra_port is not None: serverport = "ServerPort=%d" % self.ldap_backend_extra_port else: serverport = "" setup_file( setup_path("fedorads.inf"), self.fedoradsinf, { "ROOT": self.root, "HOSTNAME": self.hostname, "DNSDOMAIN": self.names.dnsdomain, "LDAPDIR": self.ldapdir, "DOMAINDN": self.names.domaindn, "LDAP_INSTANCE": self.ldap_instance, "LDAPMANAGERDN": self.names.ldapmanagerdn, "LDAPMANAGERPASS": self.ldapadminpass, "SERVERPORT": serverport }) setup_file( setup_path("fedorads-partitions.ldif"), self.partitions_ldif, { "CONFIGDN": self.names.configdn, "SCHEMADN": self.names.schemadn, "SAMBADN": self.sambadn, }) setup_file(setup_path("fedorads-sasl.ldif"), self.sasl_ldif, { "SAMBADN": self.sambadn, }) setup_file( setup_path("fedorads-dna.ldif"), self.dna_ldif, { "DOMAINDN": self.names.domaindn, "SAMBADN": self.sambadn, "DOMAINSID": str(self.domainsid), }) setup_file(setup_path("fedorads-pam.ldif"), self.pam_ldif) lnkattr = self.schema.linked_attributes() f = open(setup_path("fedorads-refint-delete.ldif"), 'r') try: refint_config = f.read() finally: f.close() memberof_config = "" index_config = "" argnum = 3 for attr in lnkattr.keys(): if lnkattr[attr] is not None: refint_config += read_and_sub_file( setup_path("fedorads-refint-add.ldif"), { "ARG_NUMBER": str(argnum), "LINK_ATTR": attr }) memberof_config += read_and_sub_file( setup_path("fedorads-linked-attributes.ldif"), { "MEMBER_ATTR": attr, "MEMBEROF_ATTR": lnkattr[attr] }) index_config += read_and_sub_file( setup_path("fedorads-index.ldif"), {"ATTR": attr}) argnum += 1 f = open(self.refint_ldif, 'w') try: f.write(refint_config) finally: f.close() f = open(self.linked_attrs_ldif, 'w') try: f.write(memberof_config) finally: f.close() attrs = ["lDAPDisplayName"] res = self.schema.ldb.search( expression="(&(objectclass=attributeSchema)" "(searchFlags:1.2.840.113556.1.4.803:=1))", base=self.names.schemadn, scope=SCOPE_ONELEVEL, attrs=attrs) for i in range(0, len(res)): attr = res[i]["lDAPDisplayName"][0] if attr == "objectGUID": attr = "nsUniqueId" index_config += read_and_sub_file( setup_path("fedorads-index.ldif"), {"ATTR": attr}) f = open(self.index_ldif, 'w') try: f.write(index_config) finally: f.close() setup_file(setup_path("fedorads-samba.ldif"), self.samba_ldif, { "SAMBADN": self.sambadn, "LDAPADMINPASS": self.ldapadminpass }) mapping = "schema-map-fedora-ds-1.0" backend_schema = "99_ad.ldif" # Build a schema file in Fedora DS format f = open(setup_path(mapping), 'r') try: backend_schema_data = self.schema.convert_to_openldap( "fedora-ds", f.read()) finally: f.close() assert backend_schema_data is not None f = open(os.path.join(self.ldapdir, backend_schema), 'w') try: f.write(backend_schema_data) finally: f.close() self.credentials.set_bind_dn(self.names.ldapmanagerdn) # Destory the target directory, or else setup-ds.pl will complain fedora_ds_dir = \ os.path.join(self.ldapdir, "slapd-" + self.ldap_instance) shutil.rmtree(fedora_ds_dir, True) self.slapd_provision_command = [ self.slapd_path, "-D", fedora_ds_dir, "-i", self.slapd_pid ] # In the 'provision' command line, stay in the foreground so we can # easily kill it self.slapd_provision_command.append("-d0") # the command for the final run is the normal script self.slapd_command = \ [os.path.join(self.ldapdir, "slapd-" + self.ldap_instance, "start-slapd")] # If we were just looking for crashes up to this point, it's a # good time to exit before we realise we don't have Fedora DS on if self.ldap_dryrun_mode: sys.exit(0) # Try to print helpful messages when the user has not specified the # path to the setup-ds tool if self.setup_ds_path is None: raise ProvisioningError( "Fedora DS LDAP-Backend must be setup with path to setup-ds, e.g. --setup-ds-path=\"/usr/sbin/setup-ds.pl\"!" ) if not os.path.exists(self.setup_ds_path): self.logger.warning("Path (%s) to slapd does not exist!", self.setup_ds_path) # Run the Fedora DS setup utility retcode = subprocess.call( [self.setup_ds_path, "--silent", "--file", self.fedoradsinf], close_fds=True, shell=False) if retcode != 0: raise ProvisioningError("setup-ds failed") # Load samba-admin retcode = subprocess.call([ os.path.join(self.ldapdir, "slapd-" + self.ldap_instance, "ldif2db"), "-s", self.sambadn, "-i", self.samba_ldif ], close_fds=True, shell=False) if retcode != 0: raise ProvisioningError("ldif2db failed")
def deprovision_schema(setup_path, names, lp, creds, reporter, ldif, msg, modify_mode=False): """Deprovision/unmodify schema using LDIF specified file, by reverting the modifications contained therein. :param setup_path: Path to the setup directory. :param names: provision names object. :param lp: Loadparm context :param creds: Credentials Context :param reporter: A progress reporter instance (subclass of AbstractProgressReporter) :param ldif: path to the LDIF file :param msg: reporter message :param modify_mode: whether entries are added or modified """ session_info = system_session() db = SamDB(url=get_ldb_url(lp, creds, names), session_info=session_info, credentials=creds, lp=lp) db.transaction_start() try: reporter.reportNextStep(msg) ldif_content = read_and_sub_file(setup_path(ldif), {"FIRSTORG": names.firstorg, "FIRSTORGDN": names.firstorgdn, "CONFIGDN": names.configdn, "SCHEMADN": names.schemadn, "DOMAINDN": names.domaindn, "DOMAIN": names.domain, "DNSDOMAIN": names.dnsdomain, "NETBIOSNAME": names.netbiosname, "HOSTNAME": names.hostname }) if modify_mode: lines = ldif_content.splitlines() keep_line = False entries = [] current_entry = [] entries.append(current_entry) for line in lines: skip_this_line = False if line.startswith("dn:") or line == "": # current_entry.append("") current_entry = [] entries.append(current_entry) keep_line = True elif line.startswith("add:"): keep_line = True line = "delete:" + line[4:] elif line.startswith("replace:"): keep_line = False elif line.startswith("#") or line.strip() == "": skip_this_line = True if keep_line and not skip_this_line: current_entry.append(line) entries.reverse() for entry in entries: ldif_content = "\n".join(entry) print ldif_content try: db.modify_ldif(ldif_content) except: pass else: lines = ldif_content.splitlines() lines.reverse() for line in lines: if line.startswith("dn:"): db.delete(line[4:]) except: db.transaction_cancel() raise db.transaction_commit()
def provision(self): # Wipe the directories so we can start shutil.rmtree(os.path.join(self.paths.ldapdir, "db"), True) #Allow the test scripts to turn off fsync() for OpenLDAP as for TDB and LDB nosync_config = "" if self.nosync: nosync_config = "dbnosync" lnkattr = self.schema.linked_attributes() refint_attributes = "" memberof_config = "# Generated from Samba4 schema\n" for att in lnkattr.keys(): if lnkattr[att] is not None: refint_attributes = refint_attributes + " " + att memberof_config += read_and_sub_file(self.setup_path("memberof.conf"), { "MEMBER_ATTR" : att , "MEMBEROF_ATTR" : lnkattr[att] }) refint_config = read_and_sub_file(self.setup_path("refint.conf"), { "LINK_ATTRS" : refint_attributes}) attrs = ["linkID", "lDAPDisplayName"] res = self.schema.ldb.search(expression="(&(objectclass=attributeSchema)(searchFlags:1.2.840.113556.1.4.803:=1))", base=self.names.schemadn, scope=SCOPE_ONELEVEL, attrs=attrs) index_config = "" for i in range (0, len(res)): index_attr = res[i]["lDAPDisplayName"][0] if index_attr == "objectGUID": index_attr = "entryUUID" index_config += "index " + index_attr + " eq\n" # generate serverids, ldap-urls and syncrepl-blocks for mmr hosts mmr_on_config = "" mmr_replicator_acl = "" mmr_serverids_config = "" mmr_syncrepl_schema_config = "" mmr_syncrepl_config_config = "" mmr_syncrepl_user_config = "" if self.ol_mmr_urls is not None: # For now, make these equal mmr_pass = self.ldapadminpass url_list=filter(None,self.ol_mmr_urls.split(' ')) if (len(url_list) == 1): url_list=filter(None,self.ol_mmr_urls.split(',')) mmr_on_config = "MirrorMode On" mmr_replicator_acl = " by dn=cn=replicator,cn=samba read" serverid=0 for url in url_list: serverid=serverid+1 mmr_serverids_config += read_and_sub_file(self.setup_path("mmr_serverids.conf"), { "SERVERID" : str(serverid), "LDAPSERVER" : url }) rid=serverid*10 rid=rid+1 mmr_syncrepl_schema_config += read_and_sub_file(self.setup_path("mmr_syncrepl.conf"), { "RID" : str(rid), "MMRDN": self.names.schemadn, "LDAPSERVER" : url, "MMR_PASSWORD": mmr_pass}) rid=rid+1 mmr_syncrepl_config_config += read_and_sub_file(self.setup_path("mmr_syncrepl.conf"), { "RID" : str(rid), "MMRDN": self.names.configdn, "LDAPSERVER" : url, "MMR_PASSWORD": mmr_pass}) rid=rid+1 mmr_syncrepl_user_config += read_and_sub_file(self.setup_path("mmr_syncrepl.conf"), { "RID" : str(rid), "MMRDN": self.names.domaindn, "LDAPSERVER" : url, "MMR_PASSWORD": mmr_pass }) # OpenLDAP cn=config initialisation olc_syncrepl_config = "" olc_mmr_config = "" # if mmr = yes, generate cn=config-replication directives # and olc_seed.lif for the other mmr-servers if self.ol_mmr_urls is not None: serverid=0 olc_serverids_config = "" olc_syncrepl_seed_config = "" olc_mmr_config += read_and_sub_file(self.setup_path("olc_mmr.conf"),{}) rid=1000 for url in url_list: serverid=serverid+1 olc_serverids_config += read_and_sub_file(self.setup_path("olc_serverid.conf"), { "SERVERID" : str(serverid), "LDAPSERVER" : url }) rid=rid+1 olc_syncrepl_config += read_and_sub_file(self.setup_path("olc_syncrepl.conf"), { "RID" : str(rid), "LDAPSERVER" : url, "MMR_PASSWORD": mmr_pass}) olc_syncrepl_seed_config += read_and_sub_file(self.setup_path("olc_syncrepl_seed.conf"), { "RID" : str(rid), "LDAPSERVER" : url}) setup_file(self.setup_path("olc_seed.ldif"), self.paths.olcseedldif, {"OLC_SERVER_ID_CONF": olc_serverids_config, "OLC_PW": self.ldapadminpass, "OLC_SYNCREPL_CONF": olc_syncrepl_seed_config}) # end olc setup_file(self.setup_path("slapd.conf"), self.paths.slapdconf, {"DNSDOMAIN": self.names.dnsdomain, "LDAPDIR": self.paths.ldapdir, "DOMAINDN": self.names.domaindn, "CONFIGDN": self.names.configdn, "SCHEMADN": self.names.schemadn, "MEMBEROF_CONFIG": memberof_config, "MIRRORMODE": mmr_on_config, "REPLICATOR_ACL": mmr_replicator_acl, "MMR_SERVERIDS_CONFIG": mmr_serverids_config, "MMR_SYNCREPL_SCHEMA_CONFIG": mmr_syncrepl_schema_config, "MMR_SYNCREPL_CONFIG_CONFIG": mmr_syncrepl_config_config, "MMR_SYNCREPL_USER_CONFIG": mmr_syncrepl_user_config, "OLC_SYNCREPL_CONFIG": olc_syncrepl_config, "OLC_MMR_CONFIG": olc_mmr_config, "REFINT_CONFIG": refint_config, "INDEX_CONFIG": index_config, "NOSYNC": nosync_config}) setup_db_config(self.setup_path, os.path.join(self.paths.ldapdir, "db", "user")) setup_db_config(self.setup_path, os.path.join(self.paths.ldapdir, "db", "config")) setup_db_config(self.setup_path, os.path.join(self.paths.ldapdir, "db", "schema")) if not os.path.exists(os.path.join(self.paths.ldapdir, "db", "samba", "cn=samba")): os.makedirs(os.path.join(self.paths.ldapdir, "db", "samba", "cn=samba"), 0700) setup_file(self.setup_path("cn=samba.ldif"), os.path.join(self.paths.ldapdir, "db", "samba", "cn=samba.ldif"), { "UUID": str(uuid.uuid4()), "LDAPTIME": timestring(int(time.time()))} ) setup_file(self.setup_path("cn=samba-admin.ldif"), os.path.join(self.paths.ldapdir, "db", "samba", "cn=samba", "cn=samba-admin.ldif"), {"LDAPADMINPASS_B64": b64encode(self.ldapadminpass), "UUID": str(uuid.uuid4()), "LDAPTIME": timestring(int(time.time()))} ) if self.ol_mmr_urls is not None: setup_file(self.setup_path("cn=replicator.ldif"), os.path.join(self.paths.ldapdir, "db", "samba", "cn=samba", "cn=replicator.ldif"), {"MMR_PASSWORD_B64": b64encode(mmr_pass), "UUID": str(uuid.uuid4()), "LDAPTIME": timestring(int(time.time()))} ) mapping = "schema-map-openldap-2.3" backend_schema = "backend-schema.schema" backend_schema_data = self.schema.ldb.convert_schema_to_openldap("openldap", open(self.setup_path(mapping), 'r').read()) assert backend_schema_data is not None open(os.path.join(self.paths.ldapdir, backend_schema), 'w').write(backend_schema_data) # now we generate the needed strings to start slapd automatically, # first ldapi_uri... if self.ldap_backend_extra_port is not None: # When we use MMR, we can't use 0.0.0.0 as it uses the name # specified there as part of it's clue as to it's own name, # and not to replicate to itself if self.ol_mmr_urls is None: server_port_string = "ldap://0.0.0.0:%d" % self.ldap_backend_extra_port else: server_port_string = "ldap://" + self.names.hostname + "." + self.names.dnsdomain +":%d" % self.ldap_backend_extra_port else: server_port_string = "" # Prepare the 'result' information - the commands to return in particular self.slapd_provision_command = [self.slapd_path] self.slapd_provision_command.append("-F" + self.paths.olcdir) self.slapd_provision_command.append("-h") # copy this command so we have two version, one with -d0 and only ldapi, and one with all the listen commands self.slapd_command = list(self.slapd_provision_command) self.slapd_provision_command.append(self.ldapi_uri) self.slapd_provision_command.append("-d0") uris = self.ldapi_uri if server_port_string is not "": uris = uris + " " + server_port_string self.slapd_command.append(uris) # Set the username - done here because Fedora DS still uses the admin DN and simple bind self.credentials.set_username("samba-admin") # If we were just looking for crashes up to this point, it's a # good time to exit before we realise we don't have OpenLDAP on # this system if self.ldap_dryrun_mode: sys.exit(0) # Finally, convert the configuration into cn=config style! if not os.path.isdir(self.paths.olcdir): os.makedirs(self.paths.olcdir, 0770) retcode = subprocess.call([self.slapd_path, "-Ttest", "-f", self.paths.slapdconf, "-F", self.paths.olcdir], close_fds=True, shell=False) # We can't do this, as OpenLDAP is strange. It gives an error # output to the above, but does the conversion sucessfully... # # if retcode != 0: # raise ProvisioningError("conversion from slapd.conf to cn=config failed") if not os.path.exists(os.path.join(self.paths.olcdir, "cn=config.ldif")): raise ProvisioningError("conversion from slapd.conf to cn=config failed") # Don't confuse the admin by leaving the slapd.conf around os.remove(self.paths.slapdconf)
def provision(self): from samba.provision import ProvisioningError, setup_path if self.ldap_backend_extra_port is not None: serverport = "ServerPort=%d" % self.ldap_backend_extra_port else: serverport = "" setup_file(setup_path("fedorads.inf"), self.fedoradsinf, {"ROOT": self.root, "HOSTNAME": self.hostname, "DNSDOMAIN": self.names.dnsdomain, "LDAPDIR": self.ldapdir, "DOMAINDN": self.names.domaindn, "LDAP_INSTANCE": self.ldap_instance, "LDAPMANAGERDN": self.names.ldapmanagerdn, "LDAPMANAGERPASS": self.ldapadminpass, "SERVERPORT": serverport}) setup_file(setup_path("fedorads-partitions.ldif"), self.partitions_ldif, {"CONFIGDN": self.names.configdn, "SCHEMADN": self.names.schemadn, "SAMBADN": self.sambadn, }) setup_file(setup_path("fedorads-sasl.ldif"), self.sasl_ldif, {"SAMBADN": self.sambadn, }) setup_file(setup_path("fedorads-dna.ldif"), self.dna_ldif, {"DOMAINDN": self.names.domaindn, "SAMBADN": self.sambadn, "DOMAINSID": str(self.domainsid), }) setup_file(setup_path("fedorads-pam.ldif"), self.pam_ldif) lnkattr = self.schema.linked_attributes() f = open(setup_path("fedorads-refint-delete.ldif"), 'r') try: refint_config = f.read() finally: f.close() memberof_config = "" index_config = "" argnum = 3 for attr in lnkattr.keys(): if lnkattr[attr] is not None: refint_config += read_and_sub_file( setup_path("fedorads-refint-add.ldif"), { "ARG_NUMBER" : str(argnum), "LINK_ATTR" : attr }) memberof_config += read_and_sub_file( setup_path("fedorads-linked-attributes.ldif"), { "MEMBER_ATTR" : attr, "MEMBEROF_ATTR" : lnkattr[attr] }) index_config += read_and_sub_file( setup_path("fedorads-index.ldif"), { "ATTR" : attr }) argnum += 1 f = open(self.refint_ldif, 'w') try: f.write(refint_config) finally: f.close() f = open(self.linked_attrs_ldif, 'w') try: f.write(memberof_config) finally: f.close() attrs = ["lDAPDisplayName"] res = self.schema.ldb.search( expression="(&(objectclass=attributeSchema)" "(searchFlags:1.2.840.113556.1.4.803:=1))", base=self.names.schemadn, scope=SCOPE_ONELEVEL, attrs=attrs) for i in range(0, len(res)): attr = res[i]["lDAPDisplayName"][0] if attr == "objectGUID": attr = "nsUniqueId" index_config += read_and_sub_file( setup_path("fedorads-index.ldif"), { "ATTR" : attr }) f = open(self.index_ldif, 'w') try: f.write(index_config) finally: f.close() setup_file(setup_path("fedorads-samba.ldif"), self.samba_ldif, { "SAMBADN": self.sambadn, "LDAPADMINPASS": self.ldapadminpass }) mapping = "schema-map-fedora-ds-1.0" backend_schema = "99_ad.ldif" # Build a schema file in Fedora DS format f = open(setup_path(mapping), 'r') try: backend_schema_data = self.schema.convert_to_openldap("fedora-ds", f.read()) finally: f.close() assert backend_schema_data is not None f = open(os.path.join(self.ldapdir, backend_schema), 'w') try: f.write(backend_schema_data) finally: f.close() self.credentials.set_bind_dn(self.names.ldapmanagerdn) # Destory the target directory, or else setup-ds.pl will complain fedora_ds_dir = os.path.join(self.ldapdir, "slapd-" + self.ldap_instance) shutil.rmtree(fedora_ds_dir, True) self.slapd_provision_command = [self.slapd_path, "-D", fedora_ds_dir, "-i", self.slapd_pid] # In the 'provision' command line, stay in the foreground so we can # easily kill it self.slapd_provision_command.append("-d0") #the command for the final run is the normal script self.slapd_command = [os.path.join(self.ldapdir, "slapd-" + self.ldap_instance, "start-slapd")] # If we were just looking for crashes up to this point, it's a # good time to exit before we realise we don't have Fedora DS on if self.ldap_dryrun_mode: sys.exit(0) # Try to print helpful messages when the user has not specified the # path to the setup-ds tool if self.setup_ds_path is None: raise ProvisioningError("Fedora DS LDAP-Backend must be setup with path to setup-ds, e.g. --setup-ds-path=\"/usr/sbin/setup-ds.pl\"!") if not os.path.exists(self.setup_ds_path): self.logger.warning("Path (%s) to slapd does not exist!", self.setup_ds_path) # Run the Fedora DS setup utility retcode = subprocess.call([self.setup_ds_path, "--silent", "--file", self.fedoradsinf], close_fds=True, shell=False) if retcode != 0: raise ProvisioningError("setup-ds failed") # Load samba-admin retcode = subprocess.call([ os.path.join(self.ldapdir, "slapd-" + self.ldap_instance, "ldif2db"), "-s", self.sambadn, "-i", self.samba_ldif], close_fds=True, shell=False) if retcode != 0: raise ProvisioningError("ldif2db failed")
def __init__(self, domain_sid, invocationid=None, schemadn=None, files=None, override_prefixmap=None, additional_prefixmap=None, base_schema=None): from samba.provision import setup_path """Load schema for the SamDB from the AD schema files and samba4_schema.ldif :param samdb: Load a schema into a SamDB. :param schemadn: DN of the schema Returns the schema data loaded, to avoid double-parsing when then needing to add it to the db """ if base_schema is None: base_schema = Schema.default_base_schema() self.base_schema = base_schema self.schemadn = schemadn # We need to have the am_rodc=False just to keep some warnings quiet - # this isn't a real SAM, so it's meaningless. self.ldb = SamDB(global_schema=False, am_rodc=False) if invocationid is not None: self.ldb.set_invocation_id(invocationid) self.schema_data = read_ms_schema( setup_path('ad-schema/%s' % Schema.base_schemas[base_schema][0]), setup_path('ad-schema/%s' % Schema.base_schemas[base_schema][1])) if files is not None: for file in files: self.schema_data += open(file, 'rb').read() self.schema_data = substitute_var(self.schema_data, {"SCHEMADN": schemadn}) check_all_substituted(self.schema_data) schema_version = str(Schema.get_version(base_schema)) self.schema_dn_modify = read_and_sub_file( setup_path("provision_schema_basedn_modify.ldif"), { "SCHEMADN": schemadn, "OBJVERSION": schema_version }) descr = b64encode(get_schema_descriptor(domain_sid)).decode('utf8') self.schema_dn_add = read_and_sub_file( setup_path("provision_schema_basedn.ldif"), { "SCHEMADN": schemadn, "DESCRIPTOR": descr }) if override_prefixmap is not None: self.prefixmap_data = override_prefixmap else: self.prefixmap_data = open(setup_path("prefixMap.txt"), 'rb').read() if additional_prefixmap is not None: for map in additional_prefixmap: self.prefixmap_data += "%s\n" % map self.prefixmap_data = b64encode(self.prefixmap_data).decode('utf8') # We don't actually add this ldif, just parse it prefixmap_ldif = "dn: %s\nprefixMap:: %s\n\n" % (self.schemadn, self.prefixmap_data) self.set_from_ldif(prefixmap_ldif, self.schema_data, self.schemadn)
def provision(self): from samba.provision import ProvisioningError, setup_path # Wipe the directories so we can start shutil.rmtree(os.path.join(self.ldapdir, "db"), True) # Allow the test scripts to turn off fsync() for OpenLDAP as for TDB # and LDB nosync_config = "" if self.nosync: nosync_config = "dbnosync" lnkattr = self.schema.linked_attributes() refint_attributes = "" memberof_config = "# Generated from Samba4 schema\n" for att in lnkattr.keys(): if lnkattr[att] is not None: refint_attributes = refint_attributes + " " + att memberof_config += read_and_sub_file( setup_path("memberof.conf"), { "MEMBER_ATTR": att, "MEMBEROF_ATTR": lnkattr[att]}) refint_config = read_and_sub_file( setup_path("refint.conf"), {"LINK_ATTRS": refint_attributes}) attrs = ["linkID", "lDAPDisplayName"] res = self.schema.ldb.search( expression="(&(objectclass=attributeSchema)" "(searchFlags:1.2.840.113556.1.4.803:=1))", base=self.names.schemadn, scope=SCOPE_ONELEVEL, attrs=attrs) index_config = "" for i in range(0, len(res)): index_attr = res[i]["lDAPDisplayName"][0] if index_attr == "objectGUID": index_attr = "entryUUID" index_config += "index " + index_attr + " eq\n" # generate serverids, ldap-urls and syncrepl-blocks for mmr hosts mmr_on_config = "" mmr_replicator_acl = "" mmr_serverids_config = "" mmr_syncrepl_schema_config = "" mmr_syncrepl_config_config = "" mmr_syncrepl_domaindns_config = "" mmr_syncrepl_forestdns_config = "" mmr_syncrepl_user_config = "" mmr_pass = "" if self.ol_mmr_urls is not None: # For now, make these equal mmr_pass = self.ldapadminpass url_list = filter(None,self.ol_mmr_urls.split(',')) for url in url_list: self.logger.info("Using LDAP-URL: "+url) if len(url_list) == 1: raise ProvisioningError("At least 2 LDAP-URLs needed for MMR!") mmr_on_config = "MirrorMode On" mmr_replicator_acl = " by dn=cn=replicator,cn=samba read" serverid = 0 for url in url_list: serverid = serverid + 1 mmr_serverids_config += read_and_sub_file( setup_path("mmr_serverids.conf"), { "SERVERID": str(serverid), "LDAPSERVER": url }) rid = serverid * 10 rid = rid + 1 mmr_syncrepl_schema_config += read_and_sub_file( setup_path("mmr_syncrepl.conf"), { "RID" : str(rid), "MMRDN": self.names.schemadn, "LDAPSERVER" : url, "MMR_PASSWORD": mmr_pass}) rid = rid + 1 mmr_syncrepl_config_config += read_and_sub_file( setup_path("mmr_syncrepl.conf"), { "RID" : str(rid), "MMRDN": self.names.configdn, "LDAPSERVER" : url, "MMR_PASSWORD": mmr_pass}) rid = rid + 1 mmr_syncrepl_domaindns_config += read_and_sub_file( setup_path("mmr_syncrepl.conf"), { "RID" : str(rid), "MMRDN": "dc=DomainDNSZones," + self.names.domaindn, "LDAPSERVER" : url, "MMR_PASSWORD": mmr_pass}) rid = rid + 1 mmr_syncrepl_forestdns_config += read_and_sub_file( setup_path("mmr_syncrepl.conf"), { "RID" : str(rid), "MMRDN": "dc=ForestDNSZones," + self.names.domaindn, "LDAPSERVER" : url, "MMR_PASSWORD": mmr_pass}) rid = rid + 1 mmr_syncrepl_user_config += read_and_sub_file( setup_path("mmr_syncrepl.conf"), { "RID" : str(rid), "MMRDN": self.names.domaindn, "LDAPSERVER" : url, "MMR_PASSWORD": mmr_pass }) # OpenLDAP cn=config initialisation olc_syncrepl_config = "" olc_mmr_config = "" # if mmr = yes, generate cn=config-replication directives # and olc_seed.lif for the other mmr-servers if self.ol_mmr_urls is not None: serverid = 0 olc_serverids_config = "" olc_syncrepl_seed_config = "" olc_mmr_config += read_and_sub_file( setup_path("olc_mmr.conf"), {}) rid = 500 for url in url_list: serverid = serverid + 1 olc_serverids_config += read_and_sub_file( setup_path("olc_serverid.conf"), { "SERVERID" : str(serverid), "LDAPSERVER" : url }) rid = rid + 1 olc_syncrepl_config += read_and_sub_file( setup_path("olc_syncrepl.conf"), { "RID" : str(rid), "LDAPSERVER" : url, "MMR_PASSWORD": mmr_pass}) olc_syncrepl_seed_config += read_and_sub_file( setup_path("olc_syncrepl_seed.conf"), { "RID" : str(rid), "LDAPSERVER" : url}) setup_file(setup_path("olc_seed.ldif"), self.olcseedldif, {"OLC_SERVER_ID_CONF": olc_serverids_config, "OLC_PW": self.ldapadminpass, "OLC_SYNCREPL_CONF": olc_syncrepl_seed_config}) # end olc setup_file(setup_path("slapd.conf"), self.slapdconf, {"DNSDOMAIN": self.names.dnsdomain, "LDAPDIR": self.ldapdir, "DOMAINDN": self.names.domaindn, "CONFIGDN": self.names.configdn, "SCHEMADN": self.names.schemadn, "MEMBEROF_CONFIG": memberof_config, "MIRRORMODE": mmr_on_config, "REPLICATOR_ACL": mmr_replicator_acl, "MMR_SERVERIDS_CONFIG": mmr_serverids_config, "MMR_SYNCREPL_SCHEMA_CONFIG": mmr_syncrepl_schema_config, "MMR_SYNCREPL_CONFIG_CONFIG": mmr_syncrepl_config_config, "MMR_SYNCREPL_DOMAINDNS_CONFIG": mmr_syncrepl_domaindns_config, "MMR_SYNCREPL_FORESTDNS_CONFIG": mmr_syncrepl_forestdns_config, "MMR_SYNCREPL_USER_CONFIG": mmr_syncrepl_user_config, "OLC_SYNCREPL_CONFIG": olc_syncrepl_config, "OLC_MMR_CONFIG": olc_mmr_config, "REFINT_CONFIG": refint_config, "INDEX_CONFIG": index_config, "ADMIN_UID": str(os.getuid()), "NOSYNC": nosync_config,}) self.setup_db_dir(os.path.join(self.ldapdir, "db", "forestdns")) self.setup_db_dir(os.path.join(self.ldapdir, "db", "domaindns")) self.setup_db_dir(os.path.join(self.ldapdir, "db", "user")) self.setup_db_dir(os.path.join(self.ldapdir, "db", "config")) self.setup_db_dir(os.path.join(self.ldapdir, "db", "schema")) self.setup_db_dir(os.path.join(self.ldapdir, "db", "samba")) if self.ol_mmr_urls is not None: mmr = "" else: mmr = "#" cn_samba = read_and_sub_file( setup_path("cn=samba.ldif"), { "LDAPADMINPASS": self.ldapadminpass, "MMR_PASSWORD": mmr_pass, "MMR": mmr }) mapping = "schema-map-openldap-2.3" backend_schema = "backend-schema.schema" f = open(setup_path(mapping), 'r') try: backend_schema_data = self.schema.convert_to_openldap( "openldap", f.read()) finally: f.close() assert backend_schema_data is not None f = open(os.path.join(self.ldapdir, backend_schema), 'w') try: f.write(backend_schema_data) finally: f.close() # now we generate the needed strings to start slapd automatically, if self.ldap_backend_extra_port is not None: # When we use MMR, we can't use 0.0.0.0 as it uses the name # specified there as part of it's clue as to it's own name, # and not to replicate to itself if self.ol_mmr_urls is None: server_port_string = "ldap://0.0.0.0:%d" % self.ldap_backend_extra_port else: server_port_string = "ldap://%s.%s:%d" (self.names.hostname, self.names.dnsdomain, self.ldap_backend_extra_port) else: server_port_string = "" # Prepare the 'result' information - the commands to return in # particular self.slapd_provision_command = [self.slapd_path, "-F" + self.olcdir, "-h"] # copy this command so we have two version, one with -d0 and only # ldapi (or the forced ldap_uri), and one with all the listen commands self.slapd_command = list(self.slapd_provision_command) self.slapd_provision_command.extend([self.ldap_uri, "-d0"]) uris = self.ldap_uri if server_port_string is not "": uris = uris + " " + server_port_string self.slapd_command.append(uris) # Wipe the old sam.ldb databases away shutil.rmtree(self.olcdir, True) os.makedirs(self.olcdir, 0o770) # If we were just looking for crashes up to this point, it's a # good time to exit before we realise we don't have OpenLDAP on # this system if self.ldap_dryrun_mode: sys.exit(0) slapd_cmd = [self.slapd_path, "-Ttest", "-n", "0", "-f", self.slapdconf, "-F", self.olcdir] retcode = subprocess.call(slapd_cmd, close_fds=True, shell=False) if retcode != 0: self.logger.error("conversion from slapd.conf to cn=config failed slapd started with: %s" % "\'" + "\' \'".join(slapd_cmd) + "\'") raise ProvisioningError("conversion from slapd.conf to cn=config failed") if not os.path.exists(os.path.join(self.olcdir, "cn=config.ldif")): raise ProvisioningError("conversion from slapd.conf to cn=config failed") # Don't confuse the admin by leaving the slapd.conf around os.remove(self.slapdconf) cn_samba_cmd = [self.slapd_path, "-Tadd", "-b", "cn=samba", "-F", self.olcdir] p = subprocess.Popen(cn_samba_cmd, stdin=subprocess.PIPE, shell=False) p.stdin.write(cn_samba) p.communicate()
def deprovision_schema(setup_path, names, lp, creds, reporter, ldif, msg, modify_mode=False): """Deprovision/unmodify schema using LDIF specified file, by reverting the modifications contained therein. :param setup_path: Path to the setup directory. :param names: provision names object. :param lp: Loadparm context :param creds: Credentials Context :param reporter: A progress reporter instance (subclass of AbstractProgressReporter) :param ldif: path to the LDIF file :param msg: reporter message :param modify_mode: whether entries are added or modified """ db = get_schema_master_samdb(names, lp, creds) db.transaction_start() try: reporter.reportNextStep(msg) ldif_content = read_and_sub_file(setup_path(ldif), {"FIRSTORG": names.firstorg, "FIRSTORGDN": names.firstorgdn, "FIRSTOU": names.firstou, "CONFIGDN": names.configdn, "SCHEMADN": names.schemadn, "DOMAINDN": names.domaindn, "DOMAIN": names.domain, "DNSDOMAIN": names.dnsdomain, "NETBIOSNAME": names.netbiosname, "HOSTNAME": names.hostname }) if modify_mode: lines = ldif_content.splitlines() keep_line = False entries = [] current_entry = [] entries.append(current_entry) for line in lines: skip_this_line = False if line.startswith("dn:") or line == "": # current_entry.append("") current_entry = [] entries.append(current_entry) keep_line = True elif line.startswith("add:"): keep_line = True line = "delete:" + line[4:] elif line.startswith("replace:"): keep_line = False elif line.startswith("#") or line.strip() == "": skip_this_line = True if keep_line and not skip_this_line: current_entry.append(line) entries.reverse() for entry in entries: ldif_content = "\n".join(entry) try: db.modify_ldif(ldif_content) except Exception as err: print ("[!] error: %s" % str(err)) else: lines = ldif_content.splitlines() lines.reverse() for line in lines: if line.startswith("dn:"): try: dn = line[4:] ret = db.search(dn, scope=ldb.SCOPE_BASE) if len(ret) != 0: db.delete(line[4:], ["tree_delete:0"]) except ldb.LdbError, (enum, estr): if enum == ldb.ERR_NO_SUCH_OBJECT: pass else: print "[!] error: %s" % estr except: db.transaction_cancel() raise db.transaction_commit()
def modify_ldif(ldb, ldif_file, subst_vars, controls=["relax:0"]): ldif_file_path = os.path.join(samba.param.setup_dir(), ldif_file) data = read_and_sub_file(ldif_file_path, subst_vars) ldb.modify_ldif(data, controls)
def __init__(self, domain_sid, invocationid=None, schemadn=None, files=None, override_prefixmap=None, additional_prefixmap=None, base_schema=None): from samba.provision import setup_path """Load schema for the SamDB from the AD schema files and samba4_schema.ldif :param samdb: Load a schema into a SamDB. :param schemadn: DN of the schema Returns the schema data loaded, to avoid double-parsing when then needing to add it to the db """ if base_schema is None: base_schema = Schema.default_base_schema() self.base_schema = base_schema self.schemadn = schemadn # We need to have the am_rodc=False just to keep some warnings quiet - # this isn't a real SAM, so it's meaningless. self.ldb = SamDB(global_schema=False, am_rodc=False) if invocationid is not None: self.ldb.set_invocation_id(invocationid) self.schema_data = read_ms_schema( setup_path('ad-schema/%s' % Schema.base_schemas[base_schema][0]), setup_path('ad-schema/%s' % Schema.base_schemas[base_schema][1])) if files is not None: for file in files: self.schema_data += open(file, 'r').read() self.schema_data = substitute_var(self.schema_data, {"SCHEMADN": schemadn}) check_all_substituted(self.schema_data) schema_version = str(Schema.get_version(base_schema)) self.schema_dn_modify = read_and_sub_file( setup_path("provision_schema_basedn_modify.ldif"), {"SCHEMADN": schemadn, "OBJVERSION" : schema_version}) descr = b64encode(get_schema_descriptor(domain_sid)).decode('utf8') self.schema_dn_add = read_and_sub_file( setup_path("provision_schema_basedn.ldif"), {"SCHEMADN": schemadn, "DESCRIPTOR": descr}) if override_prefixmap is not None: self.prefixmap_data = override_prefixmap else: self.prefixmap_data = open(setup_path("prefixMap.txt"), 'r').read() if additional_prefixmap is not None: for map in additional_prefixmap: self.prefixmap_data += "%s\n" % map self.prefixmap_data = b64encode(self.prefixmap_data).decode('utf8') # We don't actually add this ldif, just parse it prefixmap_ldif = "dn: %s\nprefixMap:: %s\n\n" % (self.schemadn, self.prefixmap_data) self.set_from_ldif(prefixmap_ldif, self.schema_data, self.schemadn)
def setup_ad_dns(samdb, names, hostip=None, hostip6=None): domaindn = names.domaindn dnsdomain = names.dnsdomain.lower() hostname = names.netbiosname.lower() dnsname = "%s.%s" % (hostname, dnsdomain) site = names.sitename dns_ldif = os.path.join(samba.param.setup_dir(), "provision_dns_add.ldif") dns_data = read_and_sub_file(dns_ldif, { "DOMAINDN": domaindn, "DNSDOMAIN" : dnsdomain }) samdb.add_ldif(dns_data, ["relax:0"]) soa_subrecords = [] dns_records = [] # @ entry for the domain at_soa_record = SOARecord(dnsname, "hostmaster.%s" % dnsdomain) soa_subrecords.append(ndr_pack(at_soa_record)) at_ns_record = NSRecord(dnsname) soa_subrecords.append(ndr_pack(at_ns_record)) if hostip is not None: # A record at_a_record = ARecord(hostip) dns_records.append(ndr_pack(at_a_record)) if hostip6 is not None: at_aaaa_record = AAAARecord(hostip6) dns_records.append(ndr_pack(at_aaaa_record)) msg = ldb.Message(ldb.Dn(samdb, "DC=@,DC=%s,CN=MicrosoftDNS,CN=System,%s" %\ (dnsdomain, domaindn ))) msg["objectClass"] = ["top", "dnsNode"] msg["dnsRecord"] = ldb.MessageElement(soa_subrecords + dns_records, ldb.FLAG_MOD_ADD, "dnsRecord") samdb.add(msg) # _gc._tcp record gc_tcp_record = SRVRecord(dnsname, 3268) msg = ldb.Message(ldb.Dn(samdb, "DC=_gc._tcp,DC=%s,CN=MicrosoftDNS,CN=System,%s" % \ (dnsdomain, domaindn))) msg["objectClass"] = ["top", "dnsNode"] msg["dnsRecord"] = [ndr_pack(gc_tcp_record)] samdb.add(msg) # _gc._tcp.sitename._site record msg = ldb.Message(ldb.Dn(samdb, "DC=_gc._tcp.%s._sites,DC=%s,CN=MicrosoftDNS,CN=System,%s" % \ (names.sitename, dnsdomain, domaindn))) msg["objectClass"] = ["top", "dnsNode"] msg["dnsRecord"] = [ndr_pack(gc_tcp_record)] samdb.add(msg) # _kerberos._tcp record kerberos_record = SRVRecord(dnsname, 88) msg = ldb.Message(ldb.Dn(samdb, "DC=_kerberos._tcp,DC=%s,CN=MicrosoftDNS,CN=System,%s" % \ (dnsdomain, domaindn))) msg["objectClass"] = ["top", "dnsNode"] msg["dnsRecord"] = [ndr_pack(kerberos_record)] samdb.add(msg) # _kerberos._tcp.sitename._site record msg = ldb.Message(ldb.Dn(samdb, "DC=_kerberos._tcp.%s._sites,DC=%s,CN=MicrosoftDNS,CN=System,%s" % \ (site, dnsdomain, domaindn))) msg["objectClass"] = ["top", "dnsNode"] msg["dnsRecord"] = [ndr_pack(kerberos_record)] samdb.add(msg) # _kerberos._udp record msg = ldb.Message(ldb.Dn(samdb, "DC=_kerberos._udp,DC=%s,CN=MicrosoftDNS,CN=System,%s" % \ (dnsdomain, domaindn))) msg["objectClass"] = ["top", "dnsNode"] msg["dnsRecord"] = [ndr_pack(kerberos_record)] samdb.add(msg) # _kpasswd._tcp record kpasswd_record = SRVRecord(dnsname, 464) msg = ldb.Message(ldb.Dn(samdb, "DC=_kpasswd._tcp,DC=%s,CN=MicrosoftDNS,CN=System,%s" % \ (dnsdomain, domaindn))) msg["objectClass"] = ["top", "dnsNode"] msg["dnsRecord"] = [ndr_pack(kpasswd_record)] samdb.add(msg) # _kpasswd._udp record msg = ldb.Message(ldb.Dn(samdb, "DC=_kpasswd._udp,DC=%s,CN=MicrosoftDNS,CN=System,%s" % \ (dnsdomain, domaindn))) msg["objectClass"] = ["top", "dnsNode"] msg["dnsRecord"] = [ndr_pack(kpasswd_record)] samdb.add(msg) # _ldap._tcp record ldap_record = SRVRecord(dnsname, 389) msg = ldb.Message(ldb.Dn(samdb, "DC=_ldap._tcp,DC=%s,CN=MicrosoftDNS,CN=System,%s" % \ (dnsdomain, domaindn))) msg["objectClass"] = ["top", "dnsNode"] msg["dnsRecord"] = [ndr_pack(ldap_record)] samdb.add(msg) # _ldap._tcp.sitename._site record msg = ldb.Message(ldb.Dn(samdb, "DC=_ldap._tcp.%s._site,DC=%s,CN=MicrosoftDNS,CN=System,%s" % \ (site, dnsdomain, domaindn))) msg["objectClass"] = ["top", "dnsNode"] msg["dnsRecord"] = [ndr_pack(ldap_record)] samdb.add(msg) # _msdcs record msdcs_record = NSRecord(dnsname) msg = ldb.Message(ldb.Dn(samdb, "DC=_msdcs,DC=%s,CN=MicrosoftDNS,CN=System,%s" % \ (dnsdomain, domaindn))) msg["objectClass"] = ["top", "dnsNode"] msg["dnsRecord"] = [ndr_pack(msdcs_record)] samdb.add(msg) # the host's own record # Only do this if there's IP addresses to set up. # This is a bit weird, but the samba4.blackbox.provision.py test apparently # doesn't set up any IPs if len(dns_records) > 0: msg = ldb.Message(ldb.Dn(samdb, "DC=%s,DC=%s,CN=MicrosoftDNS,CN=System,%s" % \ (hostname, dnsdomain, domaindn))) msg["objectClass"] = ["top", "dnsNode"] msg["dnsRecord"] = ldb.MessageElement(dns_records, ldb.FLAG_MOD_ADD, "dnsRecord") samdb.add(msg) # DomainDnsZones record msg = ldb.Message(ldb.Dn(samdb, "DC=DomainDnsZones,DC=%s,CN=MicrosoftDNS,CN=System,%s" % \ (dnsdomain, domaindn))) msg["objectClass"] = ["top", "dnsNode"] msg["dnsRecord"] = ldb.MessageElement(dns_records, ldb.FLAG_MOD_ADD, "dnsRecord") samdb.add(msg)
def deprovision_schema(setup_path, names, lp, creds, reporter, ldif, msg, modify_mode=False): """Deprovision/unmodify schema using LDIF specified file, by reverting the modifications contained therein. :param setup_path: Path to the setup directory. :param names: provision names object. :param lp: Loadparm context :param creds: Credentials Context :param reporter: A progress reporter instance (subclass of AbstractProgressReporter) :param ldif: path to the LDIF file :param msg: reporter message :param modify_mode: whether entries are added or modified """ session_info = system_session() db = SamDB(url=get_ldb_url(lp, creds, names), session_info=session_info, credentials=creds, lp=lp) db.transaction_start() try: reporter.reportNextStep(msg) ldif_content = read_and_sub_file( setup_path(ldif), { "FIRSTORG": names.firstorg, "FIRSTORGDN": names.firstorgdn, "CONFIGDN": names.configdn, "SCHEMADN": names.schemadn, "DOMAINDN": names.domaindn, "DOMAIN": names.domain, "DNSDOMAIN": names.dnsdomain, "NETBIOSNAME": names.netbiosname, "HOSTNAME": names.hostname }) if modify_mode: lines = ldif_content.splitlines() keep_line = False entries = [] current_entry = [] entries.append(current_entry) for line in lines: skip_this_line = False if line.startswith("dn:") or line == "": # current_entry.append("") current_entry = [] entries.append(current_entry) keep_line = True elif line.startswith("add:"): keep_line = True line = "delete:" + line[4:] elif line.startswith("replace:"): keep_line = False elif line.startswith("#") or line.strip() == "": skip_this_line = True if keep_line and not skip_this_line: current_entry.append(line) entries.reverse() for entry in entries: ldif_content = "\n".join(entry) print ldif_content try: db.modify_ldif(ldif_content) except: pass else: lines = ldif_content.splitlines() lines.reverse() for line in lines: if line.startswith("dn:"): db.delete(line[4:]) except: db.transaction_cancel() raise db.transaction_commit()