Example #1
0
def main():
    _mkdirs(SRCDIR, INSTALLDIR)
    setup_logging()
    fetch_and_build()
    for db in ('sqlite3', 'mysql'):
        shell('rm -rf {}/*'.format(INSTALLDIR))
        setup_and_test(db)
Example #2
0
def list_apt_updates(apt_update=True):
	# See if we have this information cached recently.
	# Keep the information for 8 hours.
	global _apt_updates
	if _apt_updates is not None and _apt_updates[0] > datetime.datetime.now() - datetime.timedelta(hours=8):
		return _apt_updates[1]

	# Run apt-get update to refresh package list. This should be running daily
	# anyway, so on the status checks page don't do this because it is slow.
	if apt_update:
		shell("check_call", ["/usr/bin/apt-get", "-qq", "update"])

	# Run apt-get upgrade in simulate mode to get a list of what
	# it would do.
	simulated_install = shell("check_output", ["/usr/bin/apt-get", "-qq", "-s", "upgrade"])
	pkgs = []
	for line in simulated_install.split('\n'):
		if line.strip() == "":
			continue
		if re.match(r'^Conf .*', line):
			 # remove these lines, not informative
			continue
		m = re.match(r'^Inst (.*) \[(.*)\] \((\S*)', line)
		if m:
			pkgs.append({ "package": m.group(1), "version": m.group(3), "current_version": m.group(2) })
		else:
			pkgs.append({ "package": "[" + line + "]", "version": "", "current_version": "" })

	# Cache for future requests.
	_apt_updates = (datetime.datetime.now(), pkgs)

	return pkgs
Example #3
0
def ensure_ssl_certificate_exists(domain, ssl_key, ssl_certificate, env):
	# For domains besides PRIMARY_HOSTNAME, generate a self-signed certificate if
	# a certificate doesn't already exist. See setup/mail.sh for documentation.

	if domain == env['PRIMARY_HOSTNAME']:
		return

	# Sanity check. Shouldn't happen. A non-primary domain might use this
	# certificate (see above), but then the certificate should exist anyway.
	if ssl_certificate == os.path.join(env["STORAGE_ROOT"], 'ssl/ssl_certificate.pem'):
		return

	if os.path.exists(ssl_certificate):
		return

	os.makedirs(os.path.dirname(ssl_certificate), exist_ok=True)

	# Generate a new self-signed certificate using the same private key that we already have.

	# Start with a CSR written to a temporary file.
	with tempfile.NamedTemporaryFile(mode="w") as csr_fp:
		csr_fp.write(create_csr(domain, ssl_key, env))
		csr_fp.flush() # since we won't close until after running 'openssl x509', since close triggers delete.

		# And then make the certificate.
		shell("check_call", [
			"openssl", "x509", "-req",
			"-days", "365",
			"-in", csr_fp.name,
			"-signkey", ssl_key,
			"-out", ssl_certificate])
Example #4
0
def ensure_ssl_certificate_exists(domain, ssl_key, ssl_certificate, csr_path, env):
	# For domains besides PRIMARY_HOSTNAME, generate a self-signed certificate if one doesn't
	# already exist. See setup/mail.sh for documentation.

	if domain == env['PRIMARY_HOSTNAME']:
		return

	if os.path.exists(ssl_certificate):
		return

	os.makedirs(os.path.dirname(ssl_certificate), exist_ok=True)

	# Generate a new self-signed certificate using the same private key that we already have.

	# Start with a CSR.
	shell("check_call", [
		"openssl", "req", "-new",
		"-key", ssl_key,
		"-out",  csr_path,
		"-subj", "/C=%s/ST=/L=/O=/CN=%s" % (env["CSR_COUNTRY"], domain)])

	# And then make the certificate.
	shell("check_call", [
		"openssl", "x509", "-req",
		"-days", "365",
		"-in", csr_path,
		"-signkey", ssl_key,
		"-out", ssl_certificate])
Example #5
0
	def check_imap_login(self, email, pw, env):
		# Validate a user's credentials.

		# Sanity check.
		if email == "" or pw == "":
			return "Enter an email address and password."

		# Authenticate.
		try:
			# Use doveadm to check credentials. doveadm will return
			# a non-zero exit status if the credentials are no good,
			# and check_call will raise an exception in that case.
			utils.shell('check_call', [
				"/usr/bin/doveadm",
				"auth", "test",
				email, pw
				])
		except:
			# Login failed.
			return "Invalid email address or password."

		# Authorize.
		# (This call should never fail on a valid user.)
		privs = get_mail_user_privileges(email, env)
		if isinstance(privs, tuple): raise Exception("Error getting privileges.")
		if "admin" not in privs:
			return "You are not an administrator for this system."

		return "OK"
def install_cert(domain, ssl_cert, ssl_chain, env):
	# Write the combined cert+chain to a temporary path and validate that it is OK.
	# The certificate always goes above the chain.
	import tempfile
	fd, fn = tempfile.mkstemp('.pem')
	os.write(fd, (ssl_cert + '\n' + ssl_chain).encode("ascii"))
	os.close(fd)

	# Do validation on the certificate before installing it.
	ssl_private_key = os.path.join(os.path.join(env["STORAGE_ROOT"], 'ssl', 'ssl_private_key.pem'))
	cert_status, cert_status_details = check_certificate(domain, fn, ssl_private_key)
	if cert_status != "OK":
		if cert_status == "SELF-SIGNED":
			cert_status = "This is a self-signed certificate. I can't install that."
		os.unlink(fn)
		if cert_status_details is not None:
			cert_status += " " + cert_status_details
		return cert_status

	# Where to put it?
	# Make a unique path for the certificate.
	from cryptography.hazmat.primitives import hashes
	from binascii import hexlify
	cert = load_pem(load_cert_chain(fn)[0])
	all_domains, cn = get_certificate_domains(cert)
	path = "%s-%s-%s.pem" % (
		safe_domain_name(cn), # common name, which should be filename safe because it is IDNA-encoded, but in case of a malformed cert make sure it's ok to use as a filename
		cert.not_valid_after.date().isoformat().replace("-", ""), # expiration date
		hexlify(cert.fingerprint(hashes.SHA256())).decode("ascii")[0:8], # fingerprint prefix
		)
	ssl_certificate = os.path.join(os.path.join(env["STORAGE_ROOT"], 'ssl', path))

	# Install the certificate.
	os.makedirs(os.path.dirname(ssl_certificate), exist_ok=True)
	shutil.move(fn, ssl_certificate)

	ret = ["OK"]

	# When updating the cert for PRIMARY_HOSTNAME, symlink it from the system
	# certificate path, which is hard-coded for various purposes, and then
	# restart postfix and dovecot.
	if domain == env['PRIMARY_HOSTNAME']:
		# Update symlink.
		system_ssl_certificate = os.path.join(os.path.join(env["STORAGE_ROOT"], 'ssl', 'ssl_certificate.pem'))
		os.unlink(system_ssl_certificate)
		os.symlink(ssl_certificate, system_ssl_certificate)

		# Restart postfix and dovecot so they pick up the new file.
		shell('check_call', ["/usr/sbin/service", "postfix", "restart"])
		shell('check_call', ["/usr/sbin/service", "dovecot", "restart"])
		ret.append("mail services restarted")

		# The DANE TLSA record will remain valid so long as the private key
		# hasn't changed. We don't ever change the private key automatically.
		# If the user does it, they must manually update DNS.

	# Update the web configuration so nginx picks up the new certificate file.
	from web_update import do_web_update
	ret.append( do_web_update(env) )
	return "\n".join(ret)
Example #7
0
def do_web_update(env):
	# Build an nginx configuration file.
	nginx_conf = open(os.path.join(os.path.dirname(__file__), "../conf/nginx-top.conf")).read()

	# Add configuration for each web domain.
	template1 = open(os.path.join(os.path.dirname(__file__), "../conf/nginx.conf")).read()
	template2 = open(os.path.join(os.path.dirname(__file__), "../conf/nginx-primaryonly.conf")).read()
	for domain in get_web_domains(env):
		nginx_conf += make_domain_config(domain, template1, template2, env)

	# Did the file change? If not, don't bother writing & restarting nginx.
	nginx_conf_fn = "/etc/nginx/conf.d/local.conf"
	if os.path.exists(nginx_conf_fn):
		with open(nginx_conf_fn) as f:
			if f.read() == nginx_conf:
				return ""

	# Save the file.
	with open(nginx_conf_fn, "w") as f:
		f.write(nginx_conf)

	# Kick nginx. Since this might be called from the web admin
	# don't do a 'restart'. That would kill the connection before
	# the API returns its response. A 'reload' should be good
	# enough and doesn't break any open connections.
	shell('check_call', ["/usr/sbin/service", "nginx", "reload"])

	return "web updated\n"
Example #8
0
def ensure_ssl_certificate_exists(domain, ssl_key, ssl_certificate, csr_path, env):
	# For domains besides PRIMARY_HOSTNAME, generate a self-signed certificate if
	# a certificate doesn't already exist. See setup/mail.sh for documentation.

	if domain == env['PRIMARY_HOSTNAME']:
		return

	# Sanity check. Shouldn't happen. A non-primary domain might use this
	# certificate (see above), but then the certificate should exist anyway.
	if ssl_certificate == os.path.join(env["STORAGE_ROOT"], 'ssl/ssl_certificate.pem'):
		return

	if os.path.exists(ssl_certificate):
		return

	os.makedirs(os.path.dirname(ssl_certificate), exist_ok=True)

	# Generate a new self-signed certificate using the same private key that we already have.

	# Start with a CSR.
	shell("check_call", [
		"openssl", "req", "-new",
		"-key", ssl_key,
		"-out",  csr_path,
		"-sha256",
		"-subj", "/C=%s/ST=/L=/O=/CN=%s" % (env["CSR_COUNTRY"], domain)])

	# And then make the certificate.
	shell("check_call", [
		"openssl", "x509", "-req",
		"-days", "365",
		"-in", csr_path,
		"-signkey", ssl_key,
		"-out", ssl_certificate])
Example #9
0
 def clone(self):
     if exists(self.name):
         with cd(self.name):
             shell('git fetch origin --tags')
     else:
         shell('git clone --depth=1 --branch {} {}'.format(self.branch,
                                                           self.url))
Example #10
0
def transformBDTVariables(infn, outfn, varmap):
    #
    # apply a functional transformation f(x) to all 
    # nodes with cuts '< X' such that the node definition becomes '< f(X)'
    #
    # varmap is a map of variable to valid TFormula expression, eg. { 'x' : 'pow(x,2)/1.0e3' }
    #
    outxml = outfn.replace('.gz', '')
    rootnode = xmlparser.parse(xmlreader(infn).handle())
    varlist = []
    for v in rootnode.xpath('//Variable'):
        k = v.attrib['Title']
        if k in varmap:
            v.attrib['Min'] = str(eval(varmap[k].replace(k,v.attrib['Min'])))
            v.attrib['Max'] = str(eval(varmap[k].replace(k,v.attrib['Max'])))
        varlist += [k]
    for n in rootnode.xpath('//Node'):
        cutvar = varlist[int(n.attrib['IVar'])]
        if cutvar in varmap:
            n.attrib['Cut'] = str(eval(varmap[cutvar].replace(cutvar,n.attrib['Cut'])))
    hndl = open(outxml,'w')
    hndl.write('<?xml version="1.0"?>\n')
    rootnode.write(hndl)
    hndl.write('\n')
    hndl.close()
    if outfn.endswith('.gz'):
        shell('gzip %s'%(outxml))
Example #11
0
def run (args):
  """Removes all Mygrate data (cancels mygrate init)."""
  cmds.init.require_init()
  path = repo.repopath() + '/.mygrate'
  print "Removing everything under %s." % path
  utils.shell ('rm -rf %s' % path)
  print "Mygrate repo successfully removed. Use mygrate init to reinitialize it."
Example #12
0
def swapBDTVariables(infn, outfn, varmap, resetRange=False):
    #
    # rename variables using some new expression, for example
    # 'X' -> 'y', useful if BDT is applied to a tree
    # with different structure than training data
    #
    outxml = outfn.replace('.gz', '')
    rootnode = xmlparser.parse(xmlreader(infn).handle())
    for v in rootnode.xpath('//Variable'):
        k = v.attrib['Title']
        if k not in varmap:
            continue
        v.attrib['Expression'] = varmap[k]
        v.attrib['Label'] = varmap[k]
        v.attrib['Title'] = varmap[k]
        v.attrib['Internal'] = varmap[k]
        if resetRange:
            v.attrib['Min'] = '-1.0e100'
            v.attrib['Max'] = '+1.0e100'
    hndl = open(outxml,'w')
    hndl.write('<?xml version="1.0"?>\n')
    rootnode.write(hndl)
    hndl.write('\n')
    hndl.close()
    if outfn.endswith('.gz'):
        shell('gzip %s'%(outxml))
Example #13
0
def add_mail_user(email, pw, privs, env):
	# accept IDNA domain names but normalize to Unicode before going into database
	email = sanitize_idn_email_address(email)

	# validate email
	if email.strip() == "":
		return ("No email address provided.", 400)
	elif not validate_email(email):
		return ("Invalid email address.", 400)
	elif not validate_email(email, mode='user'):
		return ("User account email addresses may only use the ASCII letters A-Z, the digits 0-9, underscore (_), hyphen (-), and period (.).", 400)

	validate_password(pw)

	# validate privileges
	if privs is None or privs.strip() == "":
		privs = []
	else:
		privs = privs.split("\n")
		for p in privs:
			validation = validate_privilege(p)
			if validation: return validation

	# get the database
	conn, c = open_database(env, with_connection=True)

	# hash the password
	pw = hash_password(pw)

	# add the user to the database
	try:
		c.execute("INSERT INTO users (email, password, privileges) VALUES (?, ?, ?)",
			(email, pw, "\n".join(privs)))
	except sqlite3.IntegrityError:
		return ("User already exists.", 400)

	# write databasebefore next step
	conn.commit()

	# Create & subscribe the user's INBOX, Trash, Spam, and Drafts folders.
	# * Our sieve rule for spam expects that the Spam folder exists.
	# * Roundcube will show an error if the user tries to delete a message before the Trash folder exists (#359).
	# * K-9 mail will poll every 90 seconds if a Drafts folder does not exist, so create it
	#   to avoid unnecessary polling.

	# Check if the mailboxes exist before creating them. When creating a user that had previously
	# been deleted, the mailboxes will still exist because they are still on disk.
	try:
		existing_mboxes = utils.shell('check_output', ["doveadm", "mailbox", "list", "-u", email, "-8"], capture_stderr=True).split("\n")
	except subprocess.CalledProcessError as e:
		c.execute("DELETE FROM users WHERE email=?", (email,))
		conn.commit()
		return ("Failed to initialize the user: "******"utf8"), 400)

	for folder in ("INBOX", "Trash", "Spam", "Drafts"):
		if folder not in existing_mboxes:
			utils.shell('check_call', ["doveadm", "mailbox", "create", "-u", email, "-s", folder])

	# Update things in case any new domains are added.
	return kick(env, "mail user added")
Example #14
0
def setup_server(cfg, db):
    '''Setup seafile server with the setup-seafile.sh script. We use pexpect to
    interactive with the setup process of the script.
    '''
    info('uncompressing server tarball')
    shell('tar xf seafile-server_{}_x86-64.tar.gz -C {}'
          .format(cfg.version, cfg.installdir))
    if db == 'mysql':
        autosetup_mysql(cfg)
    else:
        autosetup_sqlite3(cfg)

    with open(join(cfg.installdir, 'conf/seahub_settings.py'), 'a') as fp:
        fp.write('\n')
        fp.write('DEBUG = True')
        fp.write('\n')
        fp.write('''\
REST_FRAMEWORK = {
    'DEFAULT_THROTTLE_RATES': {
        'ping': '600/minute',
        'anon': '1000/minute',
        'user': '******',
    },
}''')
        fp.write('\n')
Example #15
0
def add_mail_user(email, pw, privs, env):
	# validate email
	if email.strip() == "":
		return ("No email address provided.", 400)
	if not validate_email(email, mode='user'):
		return ("Invalid email address.", 400)

	# validate password
	if pw.strip() == "":
		return ("No password provided.", 400)
	if re.search(r"[\s]", pw):
		return ("Passwords cannot contain spaces.", 400)
	if len(pw) < 4:
		return ("Passwords must be at least four characters.", 400)

	# validate privileges
	if privs is None or privs.strip() == "":
		privs = []
	else:
		privs = privs.split("\n")
		for p in privs:
			validation = validate_privilege(p)
			if validation: return validation

	# get the database
	conn, c = open_database(env, with_connection=True)

	# hash the password
	pw = utils.shell('check_output', ["/usr/bin/doveadm", "pw", "-s", "SHA512-CRYPT", "-p", pw]).strip()

	# add the user to the database
	try:
		c.execute("INSERT INTO users (email, password, privileges) VALUES (?, ?, ?)",
			(email, pw, "\n".join(privs)))
	except sqlite3.IntegrityError:
		return ("User already exists.", 400)

	# write databasebefore next step
	conn.commit()

	# Create the user's INBOX, Spam, and Drafts folders, and subscribe them.
	# K-9 mail will poll every 90 seconds if a Drafts folder does not exist, so create it
	# to avoid unnecessary polling.

	# Check if the mailboxes exist before creating them. When creating a user that had previously
	# been deleted, the mailboxes will still exist because they are still on disk.
	try:
		existing_mboxes = utils.shell('check_output', ["doveadm", "mailbox", "list", "-u", email, "-8"], capture_stderr=True).split("\n")
	except subprocess.CalledProcessError as e:
		c.execute("DELETE FROM users WHERE email=?", (email,))
		conn.commit()
		return ("Failed to initialize the user: "******"utf8"), 400)

	for folder in ("INBOX", "Spam", "Drafts"):
		if folder not in existing_mboxes:
			utils.shell('check_call', ["doveadm", "mailbox", "create", "-u", email, "-s", folder])

	# Update things in case any new domains are added.
	return kick(env, "mail user added")
Example #16
0
 def copy_dist(self):
     self.make_dist()
     tarball = glob.glob('*.tar.gz')[0]
     info('copying %s to %s', tarball, SRCDIR)
     shell('cp {} {}'.format(tarball, SRCDIR))
     m = re.match('{}-(.*).tar.gz'.format(self.name), basename(tarball))
     if m:
         self.version = m.group(1)
Example #17
0
def autosetup_sqlite3(cfg):
    setup_script = get_script(cfg, 'setup-seafile.sh')
    shell('''sed -i -e '/^check_root;.*/d' "{}"'''.format(setup_script))

    if cfg.initmode == 'prompt':
        setup_sqlite3_prompt(setup_script)
    else:
        setup_sqlite3_auto(setup_script)
Example #18
0
def run_checks(env, output):
	# clear the DNS cache so our DNS checks are most up to date
	shell('check_call', ["/usr/sbin/service", "bind9", "restart"])
	
	# perform checks
	env["out"] = output
	run_system_checks(env)
	run_network_checks(env)
	run_domain_checks(env)
Example #19
0
 def make_dist(self):
     cmds = [
         # 'git add -f media/css/*.css',
         # 'git commit -a -m "%s"' % msg,
         './tools/gen-tarball.py --version={} --branch=HEAD >/dev/null'
         .format(seafile_version),
     ]
     for cmd in cmds:
         shell(cmd, env=make_build_env())
Example #20
0
def migration_9(env):
	# Add a column to the aliases table to store permitted_senders,
	# which is a list of user account email addresses that are
	# permitted to send mail using this alias instead of their own
	# address. This was motivated by the addition of #427 ("Reject
	# outgoing mail if FROM does not match Login") - which introduced
	# the notion of outbound permitted-senders.
	db = os.path.join(env["STORAGE_ROOT"], 'mail/users.sqlite')
	shell("check_call", ["sqlite3", db, "ALTER TABLE aliases ADD permitted_senders TEXT"])
Example #21
0
def run_duplicity_restore(args):
    env = load_environment()
    config = get_backup_config(env)
    backup_cache_dir = os.path.join(env["STORAGE_ROOT"], "backup", "cache")
    shell(
        "check_call",
        ["/usr/bin/duplicity", "restore", "--archive-dir", backup_cache_dir, config["target"]] + args,
        get_env(env),
    )
Example #22
0
def run_checks(env, output):
	# clear bind9's DNS cache so our DNS checks are up to date
	shell('check_call', ["/usr/sbin/rndc", "flush"])
	
	# perform checks
	env["out"] = output
	run_system_checks(env)
	run_network_checks(env)
	run_domain_checks(env)
Example #23
0
def show_updates():
	utils.shell("check_call", ["/usr/bin/apt-get", "-qq", "update"])
	simulated_install = utils.shell("check_output", ["/usr/bin/apt-get", "-qq", "-s", "upgrade"])
	pkgs = []
	for line in simulated_install.split('\n'):
		if re.match(r'^Conf .*', line): continue # remove these lines, not informative
		line = re.sub(r'^Inst (.*) \[(.*)\] \((\S*).*', r'Updated Package Available: \1 (\3)', line) # make these lines prettier
		pkgs.append(line)
	return "\n".join(pkgs)
Example #24
0
def clean(options, module_name):
    """
    Clean up before and after building when specified by the
    user (-c|clean) command line option.

    :param options: The command line options.
    :type options: optparse.Options
    """
    path = os.path.join(options['working_dir'], module_name)
    utils.shell('rm -rf %s' % path)
Example #25
0
def main():
    _mkdirs(SRCDIR, INSTALLDIR)
    setup_logging()
    fetch_and_build()
    for db in ('sqlite3', 'mysql'):
        if db == 'mysql':
            shell('mysqladmin -u root password %s' % MYSQL_ROOT_PASSWD)
        for i in ('prompt', 'auto'):
            shell('rm -rf {}/*'.format(INSTALLDIR))
            setup_and_test(db, i)
Example #26
0
def setup_mysql_auto(setup_script):
    info('setting up seafile server in auto mode, script %s', setup_script)
    env = os.environ.copy()
    env['MYSQL_USER'] = '******'
    env['MYSQL_USER_PASSWD'] = 'seafile'
    env['MYSQL_ROOT_PASSWD']= MYSQL_ROOT_PASSWD
    env['CCNET_DB'] = 'ccnet-new'
    env['SEAFILE_DB'] = 'seafile-new'
    env['SEAHUB_DB'] = 'seahub-new'
    shell('%s auto -n my-seafile -e 0' % setup_script, env=env)
Example #27
0
File: deploy.py Project: T2BE/lfm
def deploy_dir(path, kwargs):
	with utils.directory(path):
		config = LambdaConfig().load_from_cwd().update_config(kwargs)
		config.verify()
		# Remove ignore paths
		for e in config.get('ignore', []) + ['.git/', '.gitignore']:
			utils.delete_resource(e)
		# Run install command
		if 'install' in config:
			utils.shell(config.get('install'))
		upload(config.get_config())
Example #28
0
def run_duplicity_restore(args):
	env = load_environment()
	config = get_backup_config(env)
	backup_cache_dir = os.path.join(env["STORAGE_ROOT"], 'backup', 'cache')
	shell('check_call', [
		"/usr/bin/duplicity",
		"restore",
		"--archive-dir", backup_cache_dir,
		config["target"],
		] + rsync_ssh_options + args,
	get_env(env))
Example #29
0
def do_web_update(env):
    # Pre-load what SSL certificates we will use for each domain.
    ssl_certificates = get_ssl_certificates(env)

    # Build an nginx configuration file.
    nginx_conf = open(os.path.join(os.path.dirname(__file__), "../conf/nginx-top.conf")).read()

    # Load the templates.
    template0 = open(os.path.join(os.path.dirname(__file__), "../conf/nginx.conf")).read()
    template1 = open(os.path.join(os.path.dirname(__file__), "../conf/nginx-alldomains.conf")).read()
    template2 = open(os.path.join(os.path.dirname(__file__), "../conf/nginx-primaryonly.conf")).read()
    template3 = "\trewrite ^(.*) https://$REDIRECT_DOMAIN$1 permanent;\n"

    # Add the PRIMARY_HOST configuration first so it becomes nginx's default server.
    nginx_conf += make_domain_config(env["PRIMARY_HOSTNAME"], [template0, template1, template2], ssl_certificates, env)

    # Add configuration all other web domains.
    has_root_proxy_or_redirect = get_web_domains_with_root_overrides(env)
    web_domains_not_redirect = get_web_domains(env, include_www_redirects=False)
    for domain in get_web_domains(env):
        if domain == env["PRIMARY_HOSTNAME"]:
            # PRIMARY_HOSTNAME is handled above.
            continue
        if domain in web_domains_not_redirect:
            # This is a regular domain.
            if domain not in has_root_proxy_or_redirect:
                nginx_conf += make_domain_config(domain, [template0, template1], ssl_certificates, env)
            else:
                nginx_conf += make_domain_config(domain, [template0], ssl_certificates, env)
        else:
            # Add default 'www.' redirect.
            nginx_conf += make_domain_config(domain, [template0, template3], ssl_certificates, env)

            # Did the file change? If not, don't bother writing & restarting nginx.
    nginx_conf_fn = "/etc/nginx/conf.d/local.conf"
    if os.path.exists(nginx_conf_fn):
        with open(nginx_conf_fn) as f:
            if f.read() == nginx_conf:
                return ""

                # Save the file.
    with open(nginx_conf_fn, "w") as f:
        f.write(nginx_conf)

        # Kick nginx. Since this might be called from the web admin
        # don't do a 'restart'. That would kill the connection before
        # the API returns its response. A 'reload' should be good
        # enough and doesn't break any open connections.
    shell("check_call", ["/usr/sbin/service", "nginx", "reload"])

    return "web updated\n"
Example #30
0
def build_server(libsearpc, ccnet, seafile):
    cmd = [
        'python',
        join(TOPDIR, 'seafile/scripts/build/build-server.py'),
        '--yes',
        '--version=%s' % seafile.version,
        '--libsearpc_version=%s' % libsearpc.version,
        '--ccnet_version=%s' % ccnet.version,
        '--seafile_version=%s' % seafile.version,
        '--thirdpartdir=%s' % THIRDPARTDIR,
        '--srcdir=%s' % SRCDIR,
        '--jobs=4',
    ]
    shell(cmd, shell=False, env=make_build_env())
Example #31
0
	def __init__(self):
		self.buf = sys.stdout

		# Do nice line-wrapping according to the size of the terminal.
		# The 'stty' program queries standard input for terminal information.
		if sys.stdin.isatty():
			try:
				self.width = int(shell('check_output', ['stty', 'size']).split()[1])
			except:
				self.width = 76

		else:
			# However if standard input is not a terminal, we would get
			# "stty: standard input: Inappropriate ioctl for device". So
			# we test with sys.stdin.isatty first, and if it is not a
			# terminal don't do any line wrapping. When this script is
			# run from cron, or if stdin has been redirected, this happens.
			self.width = None
Example #32
0
def run_network_checks(env, output):
	# Also see setup/network-checks.sh.

	output.add_heading("Network")

	check_ufw(env, output)

	# Stop if we cannot make an outbound connection on port 25. Many residential
	# networks block outbound port 25 to prevent their network from sending spam.
	# See if we can reach one of Google's MTAs with a 5-second timeout.
	code, ret = shell("check_call", ["/bin/nc", "-z", "-w5", "aspmx.l.google.com", "25"], trap=True)
	if ret == 0:
		output.print_ok("Outbound mail (SMTP port 25) is not blocked.")
	else:
		output.print_warning("""Outbound mail (SMTP port 25) seems to be blocked by your network. You
			will not be able to send any mail without a SMTP relay. Many residential networks block port 25 to prevent
			hijacked machines from being able to send spam. A quick connection test to Google's mail server on port 25
			failed.""")

	# Stop if the IPv4 address is listed in the ZEN Spamhaus Block List.
	# The user might have ended up on an IP address that was previously in use
	# by a spammer, or the user may be deploying on a residential network. We
	# will not be able to reliably send mail in these cases.
	rev_ip4 = ".".join(reversed(env['PUBLIC_IP'].split('.')))
	zen = query_dns(rev_ip4+'.zen.spamhaus.org', 'A', nxdomain=None)
	if zen is None:
		output.print_ok("IP address is not blacklisted by zen.spamhaus.org.")
	else:
		output.print_error("""The IP address of this machine %s is listed in the Spamhaus Block List (code %s),
			which may prevent recipients from receiving your email. See http://www.spamhaus.org/query/ip/%s."""
			% (env['PUBLIC_IP'], zen, env['PUBLIC_IP']))

	# Check if a SMTP relay is set up. It's not strictly required, but on some providers
	# it might be needed.
	config = load_settings(env)
	if config.get("SMTP_RELAY_ENABLED"):
		if config.get("SMTP_RELAY_AUTH"):
			output.print_ok("An authenticated SMTP relay has been set up via port 587.")
		else:
			output.print_warning("A SMTP relay has been set up, but it is not authenticated.")
	elif ret == 0:
		output.print_ok("No SMTP relay has been set up (but that's ok since port 25 is not blocked).")
	else:
		output.print_error("No SMTP relay has been set up. Since port 25 is blocked, you will probably not be able to send any mail.")
Example #33
0
def check_service(i, service, env):
	import socket
	output = BufferedOutput()
	running = False
	fatal = False
	s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
	s.settimeout(1)
	try:
		try:
			s.connect((
				"127.0.0.1" if not service["public"] else env['PUBLIC_IP'],
				service["port"]))
			running = True
		except OSError as e1:
			if service["public"] and service["port"] != 53:
				# For public services (except DNS), try the private IP as a fallback.
				s1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
				s1.settimeout(1)
				try:
					s1.connect(("127.0.0.1", service["port"]))
					output.print_error("%s is running but is not publicly accessible at %s:%d (%s)." % (service['name'], env['PUBLIC_IP'], service['port'], str(e1)))
				except:
					raise e1
				finally:
					s1.close()
			else:
				raise

	except OSError as e:
		output.print_error("%s is not running (%s; port %d)." % (service['name'], str(e), service['port']))

		# Why is nginx not running?
		if service["port"] in (80, 443):
			output.print_line(shell('check_output', ['nginx', '-t'], capture_stderr=True, trap=True)[1].strip())

		# Flag if local DNS is not running.
		if service["port"] == 53 and service["public"] == False:
			fatal = True
	finally:
		s.close()

	return (i, running, fatal, output)
Example #34
0
def check_ufw(env, output):
    ufw = shell('check_output', ['ufw', 'status']).splitlines()

    if ufw[0] == "Status: active":
        not_allowed_ports = 0
        for service in get_services():
            if service["public"] and not is_port_allowed(ufw, service["port"]):
                not_allowed_ports += 1
                output.print_error(
                    "Port %s (%s) should be allowed in the firewall, please re-run the setup."
                    % (service["port"], service["name"]))

        if not_allowed_ports == 0:
            output.print_ok("Firewall is active.")
    else:
        output.print_warning(
            """The firewall is disabled on this machine. This might be because the system
			is protected by an external firewall. We can't protect the system against bruteforce attacks
			without the local firewall active. Connect to the system via ssh and try to run: ufw enable."""
        )
Example #35
0
def index():
    # Render the control panel. This route does not require user authentication
    # so it must be safe!

    no_users_exist = (len(get_mail_users(env)) == 0)
    no_admins_exist = (len(get_admins(env)) == 0)

    utils.fix_boto()  # must call prior to importing boto
    import boto.s3
    backup_s3_hosts = [(r.name, r.endpoint) for r in boto.s3.regions()]
    lsb = utils.shell("check_output", ["/usr/bin/lsb_release", "-d"])

    return render_template(
        'index.html',
        hostname=env['PRIMARY_HOSTNAME'],
        storage_root=env['STORAGE_ROOT'],
        no_users_exist=no_users_exist,
        no_admins_exist=no_admins_exist,
        backup_s3_hosts=backup_s3_hosts,
        csr_country_codes=csr_country_codes,
    )
Example #36
0
def main():
    args = check_argv()

    print(datetime.datetime.now())

    print("Reading HTK features from directory:", args.htk_dir)
    npz_dict = {}
    n_feat_files = 0
    for feat_fn in glob.glob(path.join(args.htk_dir, "*." + args.extension)):
        hlist_output = shell("HList -r " + feat_fn)
        features = [[float(i) for i in line.split(" ") if i != ""]
                    for line in hlist_output.split("\n") if line != ""]
        key = path.splitext(path.split(feat_fn)[-1])[0]
        npz_dict[key] = np.array(features)
        n_feat_files += 1
    print("Read", n_feat_files, "feature files")

    print("Writing NumPy archive:", args.npz_fn)
    np.savez(args.npz_fn, **npz_dict)

    print(datetime.datetime.now())
Example #37
0
def build_tlsa_record(env):
    # A DANE TLSA record in DNS specifies that connections on a port
    # must use TLS and the certificate must match a particular certificate.
    #
    # Thanks to http://blog.huque.com/2012/10/dnssec-and-certificates.html
    # for explaining all of this!

    # Get the hex SHA256 of the DER-encoded server certificate:
    certder = shell("check_output", [
        "/usr/bin/openssl", "x509", "-in",
        os.path.join(env["STORAGE_ROOT"], "ssl", "ssl_certificate.pem"),
        "-outform", "DER"
    ],
                    return_bytes=True)
    certhash = hashlib.sha256(certder).hexdigest()

    # Specify the TLSA parameters:
    # 3: This is the certificate that the client should trust. No CA is needed.
    # 0: The whole certificate is matched.
    # 1: The certificate is SHA256'd here.
    return "3 0 1 " + certhash
Example #38
0
class ConsoleOutput:
    try:
        terminal_columns = int(
            shell('check_output', ['stty', 'size']).split()[1])
    except:
        terminal_columns = 76

    def add_heading(self, heading):
        print()
        print(heading)
        print("=" * len(heading))

    def print_ok(self, message):
        self.print_block(message, first_line="✓  ")

    def print_error(self, message):
        self.print_block(message, first_line="✖  ")

    def print_warning(self, message):
        self.print_block(message, first_line="?  ")

    def print_block(self, message, first_line="   "):
        print(first_line, end='')
        message = re.sub("\n\s*", " ", message)
        words = re.split("(\s+)", message)
        linelen = 0
        for w in words:
            if linelen + len(w) > self.terminal_columns - 1 - len(first_line):
                print()
                print("   ", end="")
                linelen = 0
            if linelen == 0 and w.strip() == "": continue
            print(w, end="")
            linelen += len(w)
        print()

    def print_line(self, message, monospace=False):
        for line in message.split("\n"):
            self.print_block(line)
Example #39
0
def mailgraph():
    if request.query_string:
        query = request.query_string.decode('utf-8', 'ignore')
        if '&' in query:
            query = query.split('&')[0]

        print("QUERY_STRING=%s" % query, file=sys.stderr)

        code, bin_out = utils.shell("check_output",
                                    ["/usr/share/mailgraph/mailgraph.cgi"],
                                    env={"QUERY_STRING": query},
                                    return_bytes=True,
                                    trap=True)

        if code != 0:
            return ('Error generating mailgraph image: %s' % query, 500)

        headers, image_bytes = bin_out.split(b'\n\n', 1)

        return base64.b64encode(image_bytes)

    return ('Mailgraph: no image requested', 500)
Example #40
0
def build_sshfp_records():
    # The SSHFP record is a way for us to embed this server's SSH public
    # key fingerprint into the DNS so that remote hosts have an out-of-band
    # method to confirm the fingerprint. See RFC 4255 and RFC 6594. This
    # depends on DNSSEC.
    #
    # On the client side, set SSH's VerifyHostKeyDNS option to 'ask' to
    # include this info in the key verification prompt or 'yes' to trust
    # the SSHFP record.
    #
    # See https://github.com/xelerance/sshfp for inspiriation.

    algorithm_number = {
        "ssh-rsa": 1,
        "ssh-dss": 2,
        "ecdsa-sha2-nistp256": 3,
    }

    # Get our local fingerprints by running ssh-keyscan. The output looks
    # like the known_hosts file: hostname, keytype, fingerprint. The order
    # of the output is arbitrary, so sort it to prevent spurrious updates
    # to the zone file (that trigger bumping the serial number).
    keys = shell("check_output", ["ssh-keyscan", "localhost"])
    for key in sorted(keys.split("\n")):
        if key.strip() == "" or key[0] == "#": continue
        try:
            host, keytype, pubkey = key.split(" ")
            yield "%d %d ( %s )" % (
                algorithm_number[keytype],
                2,  # specifies we are using SHA-256 on next line
                hashlib.sha256(base64.b64decode(pubkey)).hexdigest().upper(),
            )
        except:
            # Lots of things can go wrong. Don't let it disturb the DNS
            # zone.
            pass
Example #41
0
def run_network_checks(env):
    # Also see setup/network-checks.sh.

    output = BufferedOutput()
    output.add_heading("Network")

    # Stop if we cannot make an outbound connection on port 25. Many residential
    # networks block outbound port 25 to prevent their network from sending spam.
    # See if we can reach one of Google's MTAs with a 5-second timeout.
    code, ret = shell("check_call",
                      ["/bin/nc", "-z", "-w5", "aspmx.l.google.com", "25"],
                      trap=True)
    if ret == 0:
        output.print_ok("Outbound mail (SMTP port 25) is not blocked.")
    else:
        output.print_error(
            """Outbound mail (SMTP port 25) seems to be blocked by your network. You
			will not be able to send any mail. Many residential networks block port 25 to prevent hijacked
			machines from being able to send spam. A quick connection test to Google's mail server on port 25
			failed.""")

    # Stop if the IPv4 address is listed in the ZEN Spamhaus Block List.
    # The user might have ended up on an IP address that was previously in use
    # by a spammer, or the user may be deploying on a residential network. We
    # will not be able to reliably send mail in these cases.
    rev_ip4 = ".".join(reversed(env['PUBLIC_IP'].split('.')))
    zen = query_dns(rev_ip4 + '.zen.spamhaus.org', 'A', nxdomain=None)
    if zen is None:
        output.print_ok("IP address is not blacklisted by zen.spamhaus.org.")
    else:
        output.print_error(
            """The IP address of this machine %s is listed in the Spamhaus Block List (code %s),
			which may prevent recipients from receiving your email. See http://www.spamhaus.org/query/ip/%s."""
            % (env['PUBLIC_IP'], zen, env['PUBLIC_IP']))

    return output
def post_install_func(env):
    ret = []

    # Get the certificate to use for PRIMARY_HOSTNAME.
    ssl_certificates = get_ssl_certificates(env)
    cert = get_domain_ssl_files(env['PRIMARY_HOSTNAME'],
                                ssl_certificates,
                                env,
                                use_main_cert=False)
    if not cert:
        # Ruh-row, we don't have any certificate usable
        # for the primary hostname.
        ret.append("there is no valid certificate for " +
                   env['PRIMARY_HOSTNAME'])

    # Symlink the best cert for PRIMARY_HOSTNAME to the system
    # certificate path, which is hard-coded for various purposes, and then
    # restart postfix, dovecot and openldap.
    system_ssl_certificate = os.path.join(
        os.path.join(env["STORAGE_ROOT"], 'ssl', 'ssl_certificate.pem'))
    if cert and os.readlink(system_ssl_certificate) != cert['certificate']:
        # Update symlink.
        ret.append("updating primary certificate")
        ssl_certificate = cert['certificate']
        os.unlink(system_ssl_certificate)
        os.symlink(ssl_certificate, system_ssl_certificate)

        # Restart postfix and dovecot so they pick up the new file.
        shell('check_call', ["/usr/sbin/service", "slapd", "restart"])
        shell('check_call', ["/usr/sbin/service", "postfix", "restart"])
        shell('check_call', ["/usr/sbin/service", "dovecot", "restart"])
        ret.append("mail services restarted")

        # The DANE TLSA record will remain valid so long as the private key
        # hasn't changed. We don't ever change the private key automatically.
        # If the user does it, they must manually update DNS.

    # Update the web configuration so nginx picks up the new certificate file.
    from web_update import do_web_update
    ret.append(do_web_update(env))

    return ret
Example #43
0
def setup_and_test(db):
    cfg = ServerConfig(
        installdir=INSTALLDIR,
        tarball=join(TOPDIR, 'seafile-server_{}_x86-64.tar.gz'.format(
            seafile_version)),
        version=seafile_version)
    info('Setting up seafile server with %s database', db)
    setup_server(cfg, db)
    # enable webdav, we're going to seafdav tests later
    shell('''sed -i -e "s/enabled = false/enabled = true/g" {}'''
          .format(join(INSTALLDIR, 'conf/seafdav.conf')))
    try:
        start_server(cfg)
        info('Testing seafile server with %s database', db)
        create_test_user(cfg)
        run_tests(cfg)
    except:
        for logfile in glob.glob('{}/logs/*.log'.format(INSTALLDIR)):
            shell('echo {0}; cat {0}'.format(logfile))
        for logfile in glob.glob('{}/seafile-server-{}/runtime/*.log'.format(
                INSTALLDIR, seafile_version)):
            shell('echo {0}; cat {0}'.format(logfile))
        raise
Example #44
0
def list_target_files(config):
    import urllib.parse
    try:
        target = urllib.parse.urlparse(config["target"])
    except ValueError:
        return "invalid target"

    if target.scheme == "file":
        return [(fn, os.path.getsize(os.path.join(target.path, fn)))
                for fn in os.listdir(target.path)]

    elif target.scheme == "rsync":
        rsync_fn_size_re = re.compile(r'.*    ([^ ]*) [^ ]* [^ ]* (.*)')
        rsync_target = '{host}:{path}'

        target_path = target.path
        if not target_path.endswith('/'):
            target_path = target_path + '/'
        if target_path.startswith('/'):
            target_path = target_path[1:]

        rsync_command = [
            'rsync', '-e',
            '/usr/bin/ssh -i /root/.ssh/id_rsa_miab -oStrictHostKeyChecking=no -oBatchMode=yes',
            '--list-only', '-r',
            rsync_target.format(host=target.netloc, path=target_path)
        ]

        code, listing = shell('check_output',
                              rsync_command,
                              trap=True,
                              capture_stderr=True)
        if code == 0:
            ret = []
            for l in listing.split('\n'):
                match = rsync_fn_size_re.match(l)
                if match:
                    ret.append((match.groups()[1],
                                int(match.groups()[0].replace(',', ''))))
            return ret
        else:
            if 'Permission denied (publickey).' in listing:
                reason = "Invalid user or check you correctly copied the SSH key."
            elif 'No such file or directory' in listing:
                reason = "Provided path {} is invalid.".format(target_path)
            elif 'Network is unreachable' in listing:
                reason = "The IP address {} is unreachable.".format(
                    target.hostname)
            elif 'Could not resolve hostname':
                reason = "The hostname {} cannot be resolved.".format(
                    target.hostname)
            else:
                reason = "Unknown error." \
                  "Please check running 'python management/backup.py --verify'" \
                  "from mailinabox sources to debug the issue."
            raise ValueError(
                "Connection to rsync host failed: {}".format(reason))

    elif target.scheme == "s3":
        # match to a Region
        fix_boto()  # must call prior to importing boto
        import boto.s3
        from boto.exception import BotoServerError
        custom_region = False
        for region in boto.s3.regions():
            if region.endpoint == target.hostname:
                break
        else:
            # If region is not found this is a custom region
            custom_region = True

        bucket = target.path[1:].split('/')[0]
        path = '/'.join(target.path[1:].split('/')[1:]) + '/'

        # Create a custom region with custom endpoint
        if custom_region:
            from boto.s3.connection import S3Connection
            region = boto.s3.S3RegionInfo(name=bucket,
                                          endpoint=target.hostname,
                                          connection_cls=S3Connection)

        # If no prefix is specified, set the path to '', otherwise boto won't list the files
        if path == '/':
            path = ''

        if bucket == "":
            raise ValueError("Enter an S3 bucket name.")

        # connect to the region & bucket
        try:
            conn = region.connect(aws_access_key_id=config["target_user"],
                                  aws_secret_access_key=config["target_pass"])
            bucket = conn.get_bucket(bucket)
        except BotoServerError as e:
            if e.status == 403:
                raise ValueError("Invalid S3 access key or secret access key.")
            elif e.status == 404:
                raise ValueError("Invalid S3 bucket name.")
            elif e.status == 301:
                raise ValueError("Incorrect region for this bucket.")
            raise ValueError(e.reason)

        return [(key.name[len(path):], key.size)
                for key in bucket.list(prefix=path)]

    else:
        raise ValueError(config["target"])
Example #45
0
def do_dns_update(env, force=False):
    # What domains (and their zone filenames) should we build?
    domains = get_dns_domains(env)
    zonefiles = get_dns_zones(env)

    # Custom records to add to zones.
    additional_records = get_custom_dns_config(env)

    # Write zone files.
    os.makedirs('/etc/nsd/zones', exist_ok=True)
    updated_domains = []
    for i, (domain, zonefile) in enumerate(zonefiles):
        # Build the records to put in the zone.
        records = build_zone(domain, domains, additional_records, env)

        # See if the zone has changed, and if so update the serial number
        # and write the zone file.
        if not write_nsd_zone(domain, "/etc/nsd/zones/" + zonefile, records,
                              env, force):
            # Zone was not updated. There were no changes.
            continue

        # If this is a .justtesting.email domain, then post the update.
        try:
            justtestingdotemail(domain, records)
        except:
            # Hmm. Might be a network issue. If we stop now, will we end
            # up in an inconsistent state? Let's just continue.
            pass

        # Mark that we just updated this domain.
        updated_domains.append(domain)

        # Sign the zone.
        #
        # Every time we sign the zone we get a new result, which means
        # we can't sign a zone without bumping the zone's serial number.
        # Thus we only sign a zone if write_nsd_zone returned True
        # indicating the zone changed, and thus it got a new serial number.
        # write_nsd_zone is smart enough to check if a zone's signature
        # is nearing expiration and if so it'll bump the serial number
        # and return True so we get a chance to re-sign it.
        sign_zone(domain, zonefile, env)

    # Now that all zones are signed (some might not have changed and so didn't
    # just get signed now, but were before) update the zone filename so nsd.conf
    # uses the signed file.
    for i in range(len(zonefiles)):
        zonefiles[i][1] += ".signed"

    # Write the main nsd.conf file.
    if write_nsd_conf(zonefiles, env):
        # Make sure updated_domains contains *something* if we wrote an updated
        # nsd.conf so that we know to restart nsd.
        if len(updated_domains) == 0:
            updated_domains.append("DNS configuration")

    # Kick nsd if anything changed.
    if len(updated_domains) > 0:
        shell('check_call', ["/usr/sbin/service", "nsd", "restart"])

    # Write the OpenDKIM configuration tables.
    write_opendkim_tables(zonefiles, env)

    # Kick opendkim.
    shell('check_call', ["/usr/sbin/service", "opendkim", "restart"])

    if len(updated_domains) == 0:
        # if nothing was updated (except maybe OpenDKIM's files), don't show any output
        return ""
    else:
        return "updated DNS: " + ",".join(updated_domains) + "\n"
Example #46
0
def backup_status(env):
    # If backups are dissbled, return no status.
    config = get_backup_config(env)
    if config["target"] == "off":
        return {}

    # Query duplicity to get a list of all full and incremental
    # backups available.

    backups = {}
    now = datetime.datetime.now(dateutil.tz.tzlocal())
    backup_root = os.path.join(env["STORAGE_ROOT"], 'backup')
    backup_cache_dir = os.path.join(backup_root, 'cache')

    def reldate(date, ref, clip):
        if ref < date: return clip
        rd = dateutil.relativedelta.relativedelta(ref, date)
        if rd.years > 1: return "%d years, %d months" % (rd.years, rd.months)
        if rd.years == 1: return "%d year, %d months" % (rd.years, rd.months)
        if rd.months > 1: return "%d months, %d days" % (rd.months, rd.days)
        if rd.months == 1: return "%d month, %d days" % (rd.months, rd.days)
        if rd.days >= 7: return "%d days" % rd.days
        if rd.days > 1: return "%d days, %d hours" % (rd.days, rd.hours)
        if rd.days == 1: return "%d day, %d hours" % (rd.days, rd.hours)
        return "%d hours, %d minutes" % (rd.hours, rd.minutes)

    # Get duplicity collection status and parse for a list of backups.
    def parse_line(line):
        keys = line.strip().split()
        date = dateutil.parser.parse(keys[1]).astimezone(dateutil.tz.tzlocal())
        return {
            "date": keys[1],
            "date_str": date.strftime("%Y-%m-%d %X") + " " + now.tzname(),
            "date_delta": reldate(date, now, "the future?"),
            "full": keys[0] == "full",
            "size": 0,  # collection-status doesn't give us the size
            "volumes": int(
                keys[2]
            ),  # number of archive volumes for this backup (not really helpful)
        }

    code, collection_status = shell('check_output',
                                    [
                                        "/usr/bin/duplicity",
                                        "collection-status",
                                        "--archive-dir",
                                        backup_cache_dir,
                                        "--gpg-options",
                                        "--cipher-algo=AES256",
                                        "--log-fd",
                                        "1",
                                        config["target"],
                                    ] + rsync_ssh_options,
                                    get_env(env),
                                    trap=True)
    if code != 0:
        # Command failed. This is likely due to an improperly configured remote
        # destination for the backups or the last backup job terminated unexpectedly.
        raise Exception("Something is wrong with the backup: " +
                        collection_status)
    for line in collection_status.split('\n'):
        if line.startswith(" full") or line.startswith(" inc"):
            backup = parse_line(line)
            backups[backup["date"]] = backup

    # Look at the target directly to get the sizes of each of the backups. There is more than one file per backup.
    # Starting with duplicity in Ubuntu 18.04, "signatures" files have dates in their
    # filenames that are a few seconds off the backup date and so don't line up
    # with the list of backups we have. Track unmatched files so we know how much other
    # space is used for those.
    unmatched_file_size = 0
    for fn, size in list_target_files(config):
        m = re.match(
            r"duplicity-(full|full-signatures|(inc|new-signatures)\.(?P<incbase>\d+T\d+Z)\.to)\.(?P<date>\d+T\d+Z)\.",
            fn)
        if not m: continue  # not a part of a current backup chain
        key = m.group("date")
        if key in backups:
            backups[key]["size"] += size
        else:
            unmatched_file_size += size

    # Ensure the rows are sorted reverse chronologically.
    # This is relied on by should_force_full() and the next step.
    backups = sorted(backups.values(), key=lambda b: b["date"], reverse=True)

    # Get the average size of incremental backups, the size of the
    # most recent full backup, and the date of the most recent
    # backup and the most recent full backup.
    incremental_count = 0
    incremental_size = 0
    first_date = None
    first_full_size = None
    first_full_date = None
    for bak in backups:
        if first_date is None:
            first_date = dateutil.parser.parse(bak["date"])
        if bak["full"]:
            first_full_size = bak["size"]
            first_full_date = dateutil.parser.parse(bak["date"])
            break
        incremental_count += 1
        incremental_size += bak["size"]

    # When will the most recent backup be deleted? It won't be deleted if the next
    # backup is incremental, because the increments rely on all past increments.
    # So first guess how many more incremental backups will occur until the next
    # full backup. That full backup frees up this one to be deleted. But, the backup
    # must also be at least min_age_in_days old too.
    deleted_in = None
    if incremental_count > 0 and incremental_size > 0 and first_full_size is not None:
        # How many days until the next incremental backup? First, the part of
        # the algorithm based on increment sizes:
        est_days_to_next_full = (.5 * first_full_size - incremental_size) / (
            incremental_size / incremental_count)
        est_time_of_next_full = first_date + datetime.timedelta(
            days=est_days_to_next_full)

        # ...And then the part of the algorithm based on full backup age:
        est_time_of_next_full = min(
            est_time_of_next_full, first_full_date +
            datetime.timedelta(days=config["min_age_in_days"] * 10 + 1))

        # It still can't be deleted until it's old enough.
        est_deleted_on = max(
            est_time_of_next_full,
            first_date + datetime.timedelta(days=config["min_age_in_days"]))

        deleted_in = "approx. %d days" % round(
            (est_deleted_on - now).total_seconds() / 60 / 60 / 24 + .5)

    # When will a backup be deleted? Set the deleted_in field of each backup.
    saw_full = False
    for bak in backups:
        if deleted_in:
            # The most recent increment in a chain and all of the previous backups
            # it relies on are deleted at the same time.
            bak["deleted_in"] = deleted_in
        if bak["full"]:
            # Reset when we get to a full backup. A new chain start *next*.
            saw_full = True
            deleted_in = None
        elif saw_full and not deleted_in:
            # We're now on backups prior to the most recent full backup. These are
            # free to be deleted as soon as they are min_age_in_days old.
            deleted_in = reldate(
                now,
                dateutil.parser.parse(bak["date"]) +
                datetime.timedelta(days=config["min_age_in_days"]),
                "on next daily backup")
            bak["deleted_in"] = deleted_in

    return {
        "backups": backups,
        "unmatched_file_size": unmatched_file_size,
    }
Example #47
0
def perform_backup(full_backup):
    env = load_environment()

    # Create an global exclusive lock so that the backup script
    # cannot be run more than one.
    Lock(die=True).forever()

    config = get_backup_config(env)
    backup_root = os.path.join(env["STORAGE_ROOT"], 'backup')
    backup_cache_dir = os.path.join(backup_root, 'cache')
    backup_dir = os.path.join(backup_root, 'encrypted')

    # Are backups disabled?
    if config["target"] == "off":
        return

    # On the first run, always do a full backup. Incremental
    # will fail. Otherwise do a full backup when the size of
    # the increments since the most recent full backup are
    # large.
    try:
        full_backup = full_backup or should_force_full(config, env)
    except Exception as e:
        # This was the first call to duplicity, and there might
        # be an error already.
        print(e)
        sys.exit(1)

    # Stop services.
    def service_command(service, command, quit=None):
        # Execute silently, but if there is an error then display the output & exit.
        code, ret = shell('check_output',
                          ["/usr/sbin/service", service, command],
                          capture_stderr=True,
                          trap=True)
        if code != 0:
            print(ret)
            if quit:
                sys.exit(code)

    service_command("php7.2-fpm", "stop", quit=True)
    service_command("postfix", "stop", quit=True)
    service_command("dovecot", "stop", quit=True)

    # Execute a pre-backup script that copies files outside the homedir.
    # Run as the STORAGE_USER user, not as root. Pass our settings in
    # environment variables so the script has access to STORAGE_ROOT.
    pre_script = os.path.join(backup_root, 'before-backup')
    if os.path.exists(pre_script):
        shell('check_call',
              ['su', env['STORAGE_USER'], '-c', pre_script, config["target"]],
              env=env)

    # Run a backup of STORAGE_ROOT (but excluding the backups themselves!).
    # --allow-source-mismatch is needed in case the box's hostname is changed
    # after the first backup. See #396.
    try:
        shell('check_call', [
            "/usr/bin/duplicity", "full" if full_backup else "incr",
            "--verbosity", "warning", "--no-print-statistics", "--archive-dir",
            backup_cache_dir, "--exclude", backup_root, "--volsize", "250",
            "--gpg-options", "--cipher-algo=AES256", env["STORAGE_ROOT"],
            config["target"], "--allow-source-mismatch"
        ] + rsync_ssh_options, get_env(env))
    finally:
        # Start services again.
        service_command("dovecot", "start", quit=False)
        service_command("postfix", "start", quit=False)
        service_command("php7.2-fpm", "start", quit=False)

    # Remove old backups. This deletes all backup data no longer needed
    # from more than 3 days ago.
    shell('check_call', [
        "/usr/bin/duplicity", "remove-older-than",
        "%dD" % config["min_age_in_days"], "--verbosity", "error",
        "--archive-dir", backup_cache_dir, "--force", config["target"]
    ] + rsync_ssh_options, get_env(env))

    # From duplicity's manual:
    # "This should only be necessary after a duplicity session fails or is
    # aborted prematurely."
    # That may be unlikely here but we may as well ensure we tidy up if
    # that does happen - it might just have been a poorly timed reboot.
    shell('check_call', [
        "/usr/bin/duplicity", "cleanup", "--verbosity", "error",
        "--archive-dir", backup_cache_dir, "--force", config["target"]
    ] + rsync_ssh_options, get_env(env))

    # Change ownership of backups to the user-data user, so that the after-bcakup
    # script can access them.
    if get_target_type(config) == 'file':
        shell('check_call',
              ["/bin/chown", "-R", env["STORAGE_USER"], backup_dir])

    # Execute a post-backup script that does the copying to a remote server.
    # Run as the STORAGE_USER user, not as root. Pass our settings in
    # environment variables so the script has access to STORAGE_ROOT.
    post_script = os.path.join(backup_root, 'after-backup')
    if os.path.exists(post_script):
        shell('check_call',
              ['su', env['STORAGE_USER'], '-c', post_script, config["target"]],
              env=env)

    # Our nightly cron job executes system status checks immediately after this
    # backup. Since it checks that dovecot and postfix are running, block for a
    # bit (maximum of 10 seconds each) to give each a chance to finish restarting
    # before the status checks might catch them down. See #381.
    wait_for_service(25, True, env, 10)
    wait_for_service(993, True, env, 10)
Example #48
0
def migration_13(env):
	# Add the "mfa" table for configuring MFA for login to the control panel.
	db = os.path.join(env["STORAGE_ROOT"], 'mail/users.sqlite')
	shell("check_call", ["sqlite3", db, "CREATE TABLE mfa (id INTEGER PRIMARY KEY AUTOINCREMENT, user_id INTEGER NOT NULL, type TEXT NOT NULL, secret TEXT NOT NULL, mru_token TEXT, label TEXT, FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE);"])
Example #49
0
def migration_4(env):
	# Add a new column to the mail users table where we can store administrative privileges.
	db = os.path.join(env["STORAGE_ROOT"], 'mail/users.sqlite')
	shell("check_call", ["sqlite3", db, "ALTER TABLE users ADD privileges TEXT NOT NULL DEFAULT ''"])
Example #50
0
 def use_branch(self, branch):
     shell('git checkout {}'.format(branch))
Example #51
0
def hash_password(pw):
	# Turn the plain password into a Dovecot-format hashed password, meaning
	# something like "{SCHEME}hashedpassworddata".
	# http://wiki2.dovecot.org/Authentication/PasswordSchemes
	return utils.shell('check_output', ["/usr/bin/doveadm", "pw", "-s", "SHA512-CRYPT", "-p", pw]).strip()
Example #52
0
            })
        else:
            pkgs.append({
                "package": "[" + line + "]",
                "version": "",
                "current_version": ""
            })

    # Cache for future requests.
    _apt_updates = (datetime.datetime.now(), pkgs)

    return pkgs


try:
    terminal_columns = int(shell('check_output', ['stty', 'size']).split()[1])
except:
    terminal_columns = 76


class ConsoleOutput:
    def add_heading(self, heading):
        print()
        print(heading)
        print("=" * len(heading))

    def print_ok(self, message):
        self.print_block(message, first_line="✓  ")

    def print_error(self, message):
        self.print_block(message, first_line="✖  ")
Example #53
0
	def __init__(self):
		self.buf = sys.stdout
		try:
			self.width = int(shell('check_output', ['stty', 'size']).split()[1])
		except:
			self.width = 76
Example #54
0
def check_certificate(domain, ssl_certificate, ssl_private_key):
    # Use openssl verify to check the status of a certificate.

    # First check that the certificate is for the right domain. The domain
    # must be found in the Subject Common Name (CN) or be one of the
    # Subject Alternative Names. A wildcard might also appear as the CN
    # or in the SAN list, so check for that tool.
    cert_dump = shell('check_output', [
        "openssl",
        "x509",
        "-in",
        ssl_certificate,
        "-noout",
        "-text",
        "-nameopt",
        "rfc2253",
    ])
    cert_dump = cert_dump.split("\n")
    certificate_names = set()
    cert_expiration_date = None
    while len(cert_dump) > 0:
        line = cert_dump.pop(0)

        # Grab from the Subject Common Name. We include the indentation
        # at the start of the line in case maybe the cert includes the
        # common name of some other referenced entity (which would be
        # indented, I hope).
        m = re.match("        Subject: CN=([^,]+)", line)
        if m:
            certificate_names.add(m.group(1))

        # Grab from the Subject Alternative Name, which is a comma-delim
        # list of names, like DNS:mydomain.com, DNS:otherdomain.com.
        m = re.match("            X509v3 Subject Alternative Name:", line)
        if m:
            names = re.split(",\s*", cert_dump.pop(0).strip())
            for n in names:
                m = re.match("DNS:(.*)", n)
                if m:
                    certificate_names.add(m.group(1))

        m = re.match("            Not After : (.*)", line)
        if m:
            cert_expiration_date = dateutil.parser.parse(m.group(1))

    wildcard_domain = re.sub("^[^\.]+", "*", domain)
    if domain is not None and domain not in certificate_names and wildcard_domain not in certificate_names:
        return "This certificate is for the wrong domain names. It is for %s." % \
         ", ".join(sorted(certificate_names))

    # Second, check that the certificate matches the private key. Get the modulus of the
    # private key and of the public key in the certificate. They should match. The output
    # of each command looks like "Modulus=XXXXX".
    if ssl_private_key is not None:
        private_key_modulus = shell('check_output', [
            "openssl", "rsa", "-inform", "PEM", "-noout", "-modulus", "-in",
            ssl_private_key
        ])
        cert_key_modulus = shell(
            'check_output',
            ["openssl", "x509", "-in", ssl_certificate, "-noout", "-modulus"])
        if private_key_modulus != cert_key_modulus:
            return "The certificate installed at %s does not correspond to the private key at %s." % (
                ssl_certificate, ssl_private_key)

    # Next validate that the certificate is valid. This checks whether the certificate
    # is self-signed, that the chain of trust makes sense, that it is signed by a CA
    # that Ubuntu has installed on this machine's list of CAs, and I think that it hasn't
    # expired.

    # In order to verify with openssl, we need to split out any
    # intermediary certificates in the chain (if any) from our
    # certificate (at the top). They need to be passed separately.

    cert = open(ssl_certificate).read()
    m = re.match(r'(-*BEGIN CERTIFICATE-*.*?-*END CERTIFICATE-*)(.*)', cert,
                 re.S)
    if m == None:
        return "The certificate file is an invalid PEM certificate."
    mycert, chaincerts = m.groups()

    # This command returns a non-zero exit status in most cases, so trap errors.

    retcode, verifyoutput = shell(
        'check_output',
        [
            "openssl",
            "verify",
            "-verbose",
            "-purpose",
            "sslserver",
            "-policy_check",
        ] +
        ([] if chaincerts.strip() == "" else ["-untrusted", "/dev/stdin"]) +
        [ssl_certificate],
        input=chaincerts.encode('ascii'),
        trap=True)

    if "self signed" in verifyoutput:
        # Certificate is self-signed.
        return "SELF-SIGNED"
    elif retcode != 0:
        # There is some unknown problem. Return the `openssl verify` raw output.
        return verifyoutput.strip()
    else:
        # `openssl verify` returned a zero exit status so the cert is currently
        # good.

        # But is it expiring soon?
        now = datetime.datetime.now(dateutil.tz.tzlocal())
        ndays = (cert_expiration_date - now).days
        if ndays <= 31:
            return "This certificate expires in %d days on %s." % (
                ndays, cert_expiration_date.strftime("%x"))

        # Return the special OK code.
        return "OK"
Example #55
0
    def create_database_tables(self):
        if self.db == 'mysql':
            ccnet_sql_path = join(self.sql_dir, 'mysql', 'ccnet.sql')
            seafile_sql_path = join(self.sql_dir, 'mysql', 'seafile.sql')
            sql = f'USE ccnet; source {ccnet_sql_path}; USE seafile; source {seafile_sql_path};'.encode(
            )
            shell('mysql -u root', inputdata=sql, wait=False)
        else:
            config_sql_path = join(self.sql_dir, 'sqlite', 'config.sql')
            groupmgr_sql_path = join(self.sql_dir, 'sqlite', 'groupmgr.sql')
            org_sql_path = join(self.sql_dir, 'sqlite', 'org.sql')
            user_sql_path = join(self.sql_dir, 'sqlite', 'user.sql')
            seafile_sql_path = join(self.sql_dir, 'sqlite', 'seafile.sql')

            misc_dir = join(self.ccnet_conf_dir, 'misc')
            os.mkdir(misc_dir, 0o755)
            groupmgr_dir = join(self.ccnet_conf_dir, 'GroupMgr')
            os.mkdir(groupmgr_dir, 0o755)
            orgmgr_dir = join(self.ccnet_conf_dir, 'OrgMgr')
            os.mkdir(orgmgr_dir, 0o755)
            usermgr_dir = join(self.ccnet_conf_dir, 'PeerMgr')
            os.mkdir(usermgr_dir, 0o755)

            config_db_path = join(misc_dir, 'config.db')
            groupmgr_db_path = join(groupmgr_dir, 'groupmgr.db')
            orgmgr_db_path = join(orgmgr_dir, 'orgmgr.db')
            usermgr_db_path = join(usermgr_dir, 'usermgr.db')
            seafile_db_path = join(self.seafile_conf_dir, 'seafile.db')

            sql = f'.read {config_sql_path}'.encode()
            shell('sqlite3 ' + config_db_path, inputdata=sql, wait=False)
            sql = f'.read {groupmgr_sql_path}'.encode()
            shell('sqlite3 ' + groupmgr_db_path, inputdata=sql, wait=False)
            sql = f'.read {org_sql_path}'.encode()
            shell('sqlite3 ' + orgmgr_db_path, inputdata=sql, wait=False)
            sql = f'.read {user_sql_path}'.encode()
            shell('sqlite3 ' + usermgr_db_path, inputdata=sql, wait=False)
            sql = f'.read {seafile_sql_path}'.encode()
            shell('sqlite3 ' + seafile_db_path, inputdata=sql, wait=False)
Example #56
0
 def print_logs(self):
     for logfile in self.ccnet_log, self.seafile_log:
         if exists(logfile):
             shell(f'cat {logfile}')
Example #57
0
def sign_zone(domain, zonefile, env):
    algo = dnssec_choose_algo(domain, env)
    dnssec_keys = load_env_vars_from_file(
        os.path.join(env['STORAGE_ROOT'], 'dns/dnssec/%s.conf' % algo))

    # In order to use the same keys for all domains, we have to generate
    # a new .key file with a DNSSEC record for the specific domain. We
    # can reuse the same key, but it won't validate without a DNSSEC
    # record specifically for the domain.
    #
    # Copy the .key and .private files to /tmp to patch them up.
    #
    # Use os.umask and open().write() to securely create a copy that only
    # we (root) can read.
    files_to_kill = []
    for key in ("KSK", "ZSK"):
        if dnssec_keys.get(key, "").strip() == "":
            raise Exception("DNSSEC is not properly set up.")
        oldkeyfn = os.path.join(env['STORAGE_ROOT'],
                                'dns/dnssec/' + dnssec_keys[key])
        newkeyfn = '/tmp/' + dnssec_keys[key].replace("_domain_", domain)
        dnssec_keys[key] = newkeyfn
        for ext in (".private", ".key"):
            if not os.path.exists(oldkeyfn + ext):
                raise Exception("DNSSEC is not properly set up.")
            with open(oldkeyfn + ext, "r") as fr:
                keydata = fr.read()
            keydata = keydata.replace(
                "_domain_", domain
            )  # trick ldns-signkey into letting our generic key be used by this zone
            fn = newkeyfn + ext
            prev_umask = os.umask(
                0o77)  # ensure written file is not world-readable
            try:
                with open(fn, "w") as fw:
                    fw.write(keydata)
            finally:
                os.umask(prev_umask
                         )  # other files we write should be world-readable
            files_to_kill.append(fn)

    # Do the signing.
    expiry_date = (datetime.datetime.now() +
                   datetime.timedelta(days=30)).strftime("%Y%m%d")
    shell(
        'check_call',
        [
            "/usr/bin/ldns-signzone",
            # expire the zone after 30 days
            "-e",
            expiry_date,

            # use NSEC3
            "-n",

            # zonefile to sign
            "/etc/nsd/zones/" + zonefile,

            # keys to sign with (order doesn't matter -- it'll figure it out)
            dnssec_keys["KSK"],
            dnssec_keys["ZSK"],
        ])

    # Create a DS record based on the patched-up key files. The DS record is specific to the
    # zone being signed, so we can't use the .ds files generated when we created the keys.
    # The DS record points to the KSK only. Write this next to the zone file so we can
    # get it later to give to the user with instructions on what to do with it.
    #
    # We want to be able to validate DS records too, but multiple forms may be valid depending
    # on the digest type. So we'll write all (both) valid records. Only one DS record should
    # actually be deployed. Preferebly the first.
    with open("/etc/nsd/zones/" + zonefile + ".ds", "w") as f:
        for digest_type in ('2', '1'):
            rr_ds = shell(
                'check_output',
                [
                    "/usr/bin/ldns-key2ds",
                    "-n",  # output to stdout
                    "-" + digest_type,  # 1=SHA1, 2=SHA256
                    dnssec_keys["KSK"] + ".key"
                ])
            f.write(rr_ds)

    # Remove our temporary file.
    for fn in files_to_kill:
        os.unlink(fn)
Example #58
0
def do_updates():
    utils.shell("check_call", ["/usr/bin/apt-get", "-qq", "update"])
    return utils.shell("check_output", ["/usr/bin/apt-get", "-y", "upgrade"],
                       env={"DEBIAN_FRONTEND": "noninteractive"})
Example #59
0
def perform_backup(full_backup):
	env = load_environment()

	exclusive_process("backup")

	# Ensure the backup directory exists.
	backup_dir = os.path.join(env["STORAGE_ROOT"], 'backup')
	backup_duplicity_dir = os.path.join(backup_dir, 'duplicity')
	os.makedirs(backup_duplicity_dir, exist_ok=True)

	# On the first run, always do a full backup. Incremental
	# will fail. Otherwise do a full backup when the size of
	# the increments since the most recent full backup are
	# large.
	full_backup = full_backup or should_force_full(env)

	# Stop services.
	shell('check_call', ["/usr/sbin/service", "dovecot", "stop"])
	shell('check_call', ["/usr/sbin/service", "postfix", "stop"])

	# Update the backup mirror directory which mirrors the current
	# STORAGE_ROOT (but excluding the backups themselves!).
	try:
		shell('check_call', [
			"/usr/bin/duplicity",
			"full" if full_backup else "incr",
			"--no-encryption",
			"--archive-dir", "/tmp/duplicity-archive-dir",
			"--name", "mailinabox",
			"--exclude", backup_dir,
			"--volsize", "100",
			"--verbosity", "warning",
			env["STORAGE_ROOT"],
			"file://" + backup_duplicity_dir
			])
	finally:
		# Start services again.
		shell('check_call', ["/usr/sbin/service", "dovecot", "start"])
		shell('check_call', ["/usr/sbin/service", "postfix", "start"])

	# Remove old backups. This deletes all backup data no longer needed
	# from more than 31 days ago. Must do this before destroying the
	# cache directory or else this command will re-create it.
	shell('check_call', [
		"/usr/bin/duplicity",
		"remove-older-than",
		"%dD" % keep_backups_for_days,
		"--archive-dir", "/tmp/duplicity-archive-dir",
		"--name", "mailinabox",
		"--force",
		"--verbosity", "warning",
		"file://" + backup_duplicity_dir
		])

	# Remove duplicity's cache directory because it's redundant with our backup directory.
	shutil.rmtree("/tmp/duplicity-archive-dir")

	# Encrypt all of the new files.
	backup_encrypted_dir = os.path.join(backup_dir, 'encrypted')
	os.makedirs(backup_encrypted_dir, exist_ok=True)
	for fn in os.listdir(backup_duplicity_dir):
		fn2 = os.path.join(backup_encrypted_dir, fn) + ".enc"
		if os.path.exists(fn2): continue

		# Encrypt the backup using the backup private key.
		shell('check_call', [
			"/usr/bin/openssl",
			"enc",
			"-aes-256-cbc",
			"-a",
			"-salt",
			"-in", os.path.join(backup_duplicity_dir, fn),
			"-out", fn2,
			"-pass", "file:%s" % os.path.join(backup_dir, "secret_key.txt"),
			])

		# The backup can be decrypted with:
		# openssl enc -d -aes-256-cbc -a -in latest.tgz.enc -out /dev/stdout -pass file:secret_key.txt | tar -z

	# Remove encrypted backups that are no longer needed.
	for fn in os.listdir(backup_encrypted_dir):
		fn2 = os.path.join(backup_duplicity_dir, fn.replace(".enc", ""))
		if os.path.exists(fn2): continue
		os.unlink(os.path.join(backup_encrypted_dir, fn))

	# Execute a post-backup script that does the copying to a remote server.
	# Run as the STORAGE_USER user, not as root. Pass our settings in
	# environment variables so the script has access to STORAGE_ROOT.
	post_script = os.path.join(backup_dir, 'after-backup')
	if os.path.exists(post_script):
		shell('check_call',
			['su', env['STORAGE_USER'], '-c', post_script],
			env=env)
Example #60
0
def perform_backup(full_backup):
    env = load_environment()

    exclusive_process("backup")
    config = get_backup_config(env)
    backup_root = os.path.join(env["STORAGE_ROOT"], 'backup')
    backup_cache_dir = os.path.join(backup_root, 'cache')
    backup_dir = os.path.join(backup_root, 'encrypted')

    # Are backups dissbled?
    if config["target"] == "off":
        return

    # In an older version of this script, duplicity was called
    # such that it did not encrypt the backups it created (in
    # backup/duplicity), and instead openssl was called separately
    # after each backup run, creating AES256 encrypted copies of
    # each file created by duplicity in backup/encrypted.
    #
    # We detect the transition by the presence of backup/duplicity
    # and handle it by 'dupliception': we move all the old *un*encrypted
    # duplicity files up out of the backup/duplicity directory (as
    # backup/ is excluded from duplicity runs) in order that it is
    # included in the next run, and we delete backup/encrypted (which
    # duplicity will output files directly to, post-transition).
    old_backup_dir = os.path.join(backup_root, 'duplicity')
    migrated_unencrypted_backup_dir = os.path.join(env["STORAGE_ROOT"], "migrated_unencrypted_backup")
    if os.path.isdir(old_backup_dir):
        # Move the old unencrypted files to a new location outside of
        # the backup root so they get included in the next (new) backup.
        # Then we'll delete them. Also so that they do not get in the
        # way of duplicity doing a full backup on the first run after
        # we take care of this.
        shutil.move(old_backup_dir, migrated_unencrypted_backup_dir)

        # The backup_dir (backup/encrypted) now has a new purpose.
        # Clear it out.
        shutil.rmtree(backup_dir)

    # On the first run, always do a full backup. Incremental
    # will fail. Otherwise do a full backup when the size of
    # the increments since the most recent full backup are
    # large.
    try:
        full_backup = full_backup or should_force_full(config, env)
    except Exception as e:
        # This was the first call to duplicity, and there might
        # be an error already.
        print(e)
        sys.exit(1)

    # Stop services.
    def service_command(service, command, quit=None):
        # Execute silently, but if there is an error then display the output & exit.
        code, ret = shell('check_output', ["/usr/sbin/service", service, command], capture_stderr=True, trap=True)
        if code != 0:
            print(ret)
            if quit:
                sys.exit(code)

    service_command("postfix", "stop", quit=True)
    service_command("dovecot", "stop", quit=True)

    # Execute a pre-backup script that copies files outside the homedir.
    # Run as the STORAGE_USER user, not as root. Pass our settings in
    # environment variables so the script has access to STORAGE_ROOT.
    pre_script = os.path.join(backup_root, 'before-backup')
    if os.path.exists(pre_script):
        shell('check_call',
              ['su', env['STORAGE_USER'], '-c', pre_script, config["target"]],
              env=env)

    # Run a backup of STORAGE_ROOT (but excluding the backups themselves!).
    # --allow-source-mismatch is needed in case the box's hostname is changed
    # after the first backup. See #396.
    try:
        shell('check_call', [
            "/usr/bin/duplicity",
            "full" if full_backup else "incr",
            "--verbosity", "warning", "--no-print-statistics",
            "--archive-dir", backup_cache_dir,
            "--exclude", backup_root,
            "--volsize", "250",
            "--gpg-options", "--cipher-algo=AES256",
            env["STORAGE_ROOT"],
            config["target"],
            "--allow-source-mismatch"
        ],
              get_env(env))
    finally:
        # Start services again.
        service_command("dovecot", "start", quit=False)
        service_command("postfix", "start", quit=False)

    # Once the migrated backup is included in a new backup, it can be deleted.
    if os.path.isdir(migrated_unencrypted_backup_dir):
        shutil.rmtree(migrated_unencrypted_backup_dir)

    # Remove old backups. This deletes all backup data no longer needed
    # from more than 3 days ago.
    shell('check_call', [
        "/usr/bin/duplicity",
        "remove-older-than",
        "%dD" % config["min_age_in_days"],
        "--verbosity", "error",
        "--archive-dir", backup_cache_dir,
        "--force",
        config["target"]
    ],
          get_env(env))

    # From duplicity's manual:
    # "This should only be necessary after a duplicity session fails or is
    # aborted prematurely."
    # That may be unlikely here but we may as well ensure we tidy up if
    # that does happen - it might just have been a poorly timed reboot.
    shell('check_call', [
        "/usr/bin/duplicity",
        "cleanup",
        "--verbosity", "error",
        "--archive-dir", backup_cache_dir,
        "--force",
        config["target"]
    ],
          get_env(env))

    # Change ownership of backups to the user-data user, so that the after-bcakup
    # script can access them.
    if get_target_type(config) == 'file':
        shell('check_call', ["/bin/chown", "-R", env["STORAGE_USER"], backup_dir])

    # Execute a post-backup script that does the copying to a remote server.
    # Run as the STORAGE_USER user, not as root. Pass our settings in
    # environment variables so the script has access to STORAGE_ROOT.
    post_script = os.path.join(backup_root, 'after-backup')
    if os.path.exists(post_script):
        shell('check_call',
              ['su', env['STORAGE_USER'], '-c', post_script, config["target"]],
              env=env)

    # Our nightly cron job executes system status checks immediately after this
    # backup. Since it checks that dovecot and postfix are running, block for a
    # bit (maximum of 10 seconds each) to give each a chance to finish restarting
    # before the status checks might catch them down. See #381.
    wait_for_service(25, True, env, 10)
    wait_for_service(993, True, env, 10)