Ejemplo n.º 1
0
def provision_certificates_cmdline():
	import sys
	from exclusiveprocess import Lock

	from utils import load_environment

	Lock(die=True).forever()
	env = load_environment()

	quiet = False
	domains = []

	for arg in sys.argv[1:]:
		if arg == "-q":
			quiet = True
		else:
			domains.append(arg)

	# Go.
	status = provision_certificates(env, limit_domains=domains)

	# Show what happened.
	for request in status:
		if isinstance(request, str):
			print(request)
		else:
			if quiet and request['result'] == 'skipped':
				continue
			print(request['result'] + ":", ", ".join(request['domains']) + ":")
			for line in request["log"]:
				print(line)
			print()
Ejemplo n.º 2
0
def run_migrations():
	if not os.access("/etc/mailinabox.conf", os.W_OK, effective_ids=True):
		print("This script must be run as root.", file=sys.stderr)
		sys.exit(1)

	env = load_environment()

	migration_id_file = os.path.join(env['STORAGE_ROOT'], 'mailinabox.version')
	migration_id = None
	if os.path.exists(migration_id_file):
		with open(migration_id_file) as f:
			migration_id = f.read().strip();

	if migration_id is None:
		# Load the legacy location of the migration ID. We'll drop support
		# for this eventually.
		migration_id = env.get("MIGRATIONID")

	if migration_id is None:
		print()
		print("%s file doesn't exists. Skipping migration..." % (migration_id_file,))
		return

	ourver = int(migration_id)

	while True:
		next_ver = (ourver + 1)
		migration_func = globals().get("migration_%d" % next_ver)

		if not migration_func:
			# No more migrations to run.
			break

		print()
		print("Running migration to Mail-in-a-Box #%d..." % next_ver)

		try:
			migration_func(env)
		except Exception as e:
			print()
			print("Error running the migration script:")
			print()
			print(e)
			print()
			print("Your system may be in an inconsistent state now. We're terribly sorry. A re-install from a backup might be the best way to continue.")
			sys.exit(1)

		ourver = next_ver

		# Write out our current version now. Do this sooner rather than later
		# in case of any problems.
		with open(migration_id_file, "w") as f:
			f.write(str(ourver) + "\n")

		# Delete the legacy location of this field.
		if "MIGRATIONID" in env:
			del env["MIGRATIONID"]
			save_environment(env)
Ejemplo n.º 3
0
def run_duplicity_restore(args):
    env = load_environment()
    config = get_backup_config(env)
    backup_cache_dir = os.path.join(env["STORAGE_ROOT"], "backup", "cache")
    shell(
        "check_call",
        ["/usr/bin/duplicity", "restore", "--archive-dir", backup_cache_dir, config["target"]] + args,
        get_env(env),
    )
Ejemplo n.º 4
0
def run_duplicity_restore(args):
	env = load_environment()
	config = get_backup_config(env)
	backup_cache_dir = os.path.join(env["STORAGE_ROOT"], 'backup', 'cache')
	shell('check_call', [
		"/usr/bin/duplicity",
		"restore",
		"--archive-dir", backup_cache_dir,
		config["target"],
		] + rsync_ssh_options + args,
	get_env(env))
Ejemplo n.º 5
0
def run_duplicity_verification():
	env = load_environment()
	backup_root = os.path.join(env["STORAGE_ROOT"], 'backup')
	backup_cache_dir = os.path.join(backup_root, 'cache')
	backup_dir = os.path.join(backup_root, 'encrypted')
	env_with_passphrase = { "PASSPHRASE" : open(os.path.join(backup_root, 'secret_key.txt')).read() }
	shell('check_call', [
		"/usr/bin/duplicity",
		"--verbosity", "info",
		"verify",
		"--compare-data",
		"--archive-dir", backup_cache_dir,
		"--exclude", backup_root,
		"file://" + backup_dir,
		env["STORAGE_ROOT"],
	], env_with_passphrase)
Ejemplo n.º 6
0
def run_duplicity_verification():
    env = load_environment()
    backup_root = os.path.join(env["STORAGE_ROOT"], 'backup')
    config = get_backup_config(env)
    backup_cache_dir = os.path.join(backup_root, 'cache')

    shell('check_call', [
        "/usr/bin/duplicity",
        "--verbosity", "info",
        "verify",
        "--compare-data",
        "--archive-dir", backup_cache_dir,
        "--exclude", backup_root,
        config["target"],
        env["STORAGE_ROOT"],
    ], get_env(env))
Ejemplo n.º 7
0
def mail_admin(subject, content):
    import smtplib
    from email.message import Message
    from utils import load_environment

    env = load_environment()
    admin_addr = "administrator@" + env['PRIMARY_HOSTNAME']

    # create MIME message
    msg = Message()
    msg['From'] = "\"%s\" <%s>" % (env['PRIMARY_HOSTNAME'], admin_addr)
    msg['To'] = admin_addr
    msg['Subject'] = "[%s] %s" % (env['PRIMARY_HOSTNAME'], subject)
    msg.set_payload(content, "UTF-8")

    smtpclient = smtplib.SMTP('127.0.0.1', 25)
    smtpclient.ehlo()
    smtpclient.sendmail(
        admin_addr,  # MAIL FROM
        admin_addr,  # RCPT TO
        msg.as_string())
    smtpclient.quit()
Ejemplo n.º 8
0
def run_migrations():
	if not os.access("/etc/mailinabox.conf", os.W_OK, effective_ids=True):
		print("This script must be run as root.", file=sys.stderr)
		sys.exit(1)

	env = load_environment()

	ourver = int(env.get("MIGRATIONID", "0"))

	while True:
		next_ver = (ourver + 1)
		migration_func = globals().get("migration_%d" % next_ver)

		if not migration_func:
			# No more migrations to run.
			break

		print("Running migration to Mail-in-a-Box #%d..." % next_ver)

		try:
			migration_func(env)
		except Exception as e:
			print()
			print("Error running the migration script:")
			print()
			print(e)
			print()
			print("Your system may be in an inconsistent state now. We're terribly sorry. A re-install from a backup might be the best way to continue.")
			sys.exit(1)

		ourver = next_ver

		# Write out our current version now. Do this sooner rather than later
		# in case of any problems.
		env["MIGRATIONID"] = ourver
		save_environment(env)
Ejemplo n.º 9
0
def perform_backup(full_backup):
	env = load_environment()

	exclusive_process("backup")

	# Ensure the backup directory exists.
	backup_dir = os.path.join(env["STORAGE_ROOT"], 'backup')
	backup_duplicity_dir = os.path.join(backup_dir, 'duplicity')
	os.makedirs(backup_duplicity_dir, exist_ok=True)

	# On the first run, always do a full backup. Incremental
	# will fail. Otherwise do a full backup when the size of
	# the increments since the most recent full backup are
	# large.
	full_backup = full_backup or should_force_full(env)

	# Stop services.
	shell('check_call', ["/usr/sbin/service", "dovecot", "stop"])
	shell('check_call', ["/usr/sbin/service", "postfix", "stop"])

	# Update the backup mirror directory which mirrors the current
	# STORAGE_ROOT (but excluding the backups themselves!).
	try:
		shell('check_call', [
			"/usr/bin/duplicity",
			"full" if full_backup else "incr",
			"--no-encryption",
			"--archive-dir", "/tmp/duplicity-archive-dir",
			"--name", "mailinabox",
			"--exclude", backup_dir,
			"--volsize", "100",
			"--verbosity", "warning",
			env["STORAGE_ROOT"],
			"file://" + backup_duplicity_dir
			])
	finally:
		# Start services again.
		shell('check_call', ["/usr/sbin/service", "dovecot", "start"])
		shell('check_call', ["/usr/sbin/service", "postfix", "start"])

	# Remove old backups. This deletes all backup data no longer needed
	# from more than 31 days ago. Must do this before destroying the
	# cache directory or else this command will re-create it.
	shell('check_call', [
		"/usr/bin/duplicity",
		"remove-older-than",
		"%dD" % keep_backups_for_days,
		"--archive-dir", "/tmp/duplicity-archive-dir",
		"--name", "mailinabox",
		"--force",
		"--verbosity", "warning",
		"file://" + backup_duplicity_dir
		])

	# Remove duplicity's cache directory because it's redundant with our backup directory.
	shutil.rmtree("/tmp/duplicity-archive-dir")

	# Encrypt all of the new files.
	backup_encrypted_dir = os.path.join(backup_dir, 'encrypted')
	os.makedirs(backup_encrypted_dir, exist_ok=True)
	for fn in os.listdir(backup_duplicity_dir):
		fn2 = os.path.join(backup_encrypted_dir, fn) + ".enc"
		if os.path.exists(fn2): continue

		# Encrypt the backup using the backup private key.
		shell('check_call', [
			"/usr/bin/openssl",
			"enc",
			"-aes-256-cbc",
			"-a",
			"-salt",
			"-in", os.path.join(backup_duplicity_dir, fn),
			"-out", fn2,
			"-pass", "file:%s" % os.path.join(backup_dir, "secret_key.txt"),
			])

		# The backup can be decrypted with:
		# openssl enc -d -aes-256-cbc -a -in latest.tgz.enc -out /dev/stdout -pass file:secret_key.txt | tar -z

	# Remove encrypted backups that are no longer needed.
	for fn in os.listdir(backup_encrypted_dir):
		fn2 = os.path.join(backup_duplicity_dir, fn.replace(".enc", ""))
		if os.path.exists(fn2): continue
		os.unlink(os.path.join(backup_encrypted_dir, fn))

	# Execute a post-backup script that does the copying to a remote server.
	# Run as the STORAGE_USER user, not as root. Pass our settings in
	# environment variables so the script has access to STORAGE_ROOT.
	post_script = os.path.join(backup_dir, 'after-backup')
	if os.path.exists(post_script):
		shell('check_call',
			['su', env['STORAGE_USER'], '-c', post_script],
			env=env)
Ejemplo n.º 10
0
                message = "domain blocked: " + m.group(2)

            collector["rejected-mail"].setdefault(recipient, []).append((date, sender, message))


def scan_postfix_cleanup_line(date, _, collector):
    """ Scan a postfix cleanup log line and extract interesting data

    It is assumed that every log of postfix/cleanup indicates an email that was successfulfy received by Postfix.

    """

    collector["activity-by-hour"]["smtp-receives"][date.hour] += 1


def scan_postfix_submission_line(date, log, collector):
    """ Scan a postfix submission log line and extract interesting data """

    m = re.match("([A-Z0-9]+): client=(\S+), sasl_method=PLAIN, sasl_username=(\S+)", log)

    if m:
        # procid, client, user = m.groups()
        collector["activity-by-hour"]["smtp-sends"][date.hour] += 1


if __name__ == "__main__":
    from status_checks import ConsoleOutput

    env_vars = utils.load_environment()
    scan_mail_log(ConsoleOutput(), env_vars)
Ejemplo n.º 11
0
		# expand qnames
		for i in range(len(records)):
			if records[i][0] == None:
				qname = domain
			else:
				qname = records[i][0] + "." + domain

			records[i] = {
				"qname": qname,
				"rtype": records[i][1],
				"value": records[i][2],
				"explanation": records[i][3],
			}

		# return
		ret.append((domain, records))
	return ret

if __name__ == "__main__":
	from utils import load_environment
	env = load_environment()
	if sys.argv[-1] == "--lint":
		write_custom_dns_config(get_custom_dns_config(env), env)
	else:
		for zone, records in build_recommended_dns(env):
			for record in records:
				print("; " + record['explanation'])
				print(record['qname'], record['rtype'], record['value'], sep="\t")
				print()
Ejemplo n.º 12
0
def perform_backup(full_backup):
    env = load_environment()

    exclusive_process("backup")
    config = get_backup_config(env)
    backup_root = os.path.join(env["STORAGE_ROOT"], "backup")
    backup_cache_dir = os.path.join(backup_root, "cache")
    backup_dir = os.path.join(backup_root, "encrypted")

    # Are backups dissbled?
    if config["target"] == "off":
        return

        # In an older version of this script, duplicity was called
        # such that it did not encrypt the backups it created (in
        # backup/duplicity), and instead openssl was called separately
        # after each backup run, creating AES256 encrypted copies of
        # each file created by duplicity in backup/encrypted.
        #
        # We detect the transition by the presence of backup/duplicity
        # and handle it by 'dupliception': we move all the old *un*encrypted
        # duplicity files up out of the backup/duplicity directory (as
        # backup/ is excluded from duplicity runs) in order that it is
        # included in the next run, and we delete backup/encrypted (which
        # duplicity will output files directly to, post-transition).
    old_backup_dir = os.path.join(backup_root, "duplicity")
    migrated_unencrypted_backup_dir = os.path.join(env["STORAGE_ROOT"], "migrated_unencrypted_backup")
    if os.path.isdir(old_backup_dir):
        # Move the old unencrypted files to a new location outside of
        # the backup root so they get included in the next (new) backup.
        # Then we'll delete them. Also so that they do not get in the
        # way of duplicity doing a full backup on the first run after
        # we take care of this.
        shutil.move(old_backup_dir, migrated_unencrypted_backup_dir)

        # The backup_dir (backup/encrypted) now has a new purpose.
        # Clear it out.
        shutil.rmtree(backup_dir)

        # On the first run, always do a full backup. Incremental
        # will fail. Otherwise do a full backup when the size of
        # the increments since the most recent full backup are
        # large.
    try:
        full_backup = full_backup or should_force_full(config, env)
    except Exception as e:
        # This was the first call to duplicity, and there might
        # be an error already.
        print(e)
        sys.exit(1)

        # Stop services.

    def service_command(service, command, quit=None):
        # Execute silently, but if there is an error then display the output & exit.
        code, ret = shell("check_output", ["/usr/sbin/service", service, command], capture_stderr=True, trap=True)
        if code != 0:
            print(ret)
            if quit:
                sys.exit(code)

    service_command("php5-fpm", "stop", quit=True)
    service_command("postfix", "stop", quit=True)
    service_command("dovecot", "stop", quit=True)

    # Execute a pre-backup script that copies files outside the homedir.
    # Run as the STORAGE_USER user, not as root. Pass our settings in
    # environment variables so the script has access to STORAGE_ROOT.
    pre_script = os.path.join(backup_root, "before-backup")
    if os.path.exists(pre_script):
        shell("check_call", ["su", env["STORAGE_USER"], "-c", pre_script, config["target"]], env=env)

        # Run a backup of STORAGE_ROOT (but excluding the backups themselves!).
        # --allow-source-mismatch is needed in case the box's hostname is changed
        # after the first backup. See #396.
    try:
        shell(
            "check_call",
            [
                "/usr/bin/duplicity",
                "full" if full_backup else "incr",
                "--verbosity",
                "warning",
                "--no-print-statistics",
                "--archive-dir",
                backup_cache_dir,
                "--exclude",
                backup_root,
                "--volsize",
                "250",
                "--gpg-options",
                "--cipher-algo=AES256",
                env["STORAGE_ROOT"],
                config["target"],
                "--allow-source-mismatch",
            ],
            get_env(env),
        )
    finally:
        # Start services again.
        service_command("dovecot", "start", quit=False)
        service_command("postfix", "start", quit=False)
        service_command("php5-fpm", "start", quit=False)

        # Once the migrated backup is included in a new backup, it can be deleted.
    if os.path.isdir(migrated_unencrypted_backup_dir):
        shutil.rmtree(migrated_unencrypted_backup_dir)

        # Remove old backups. This deletes all backup data no longer needed
        # from more than 3 days ago.
    shell(
        "check_call",
        [
            "/usr/bin/duplicity",
            "remove-older-than",
            "%dD" % config["min_age_in_days"],
            "--verbosity",
            "error",
            "--archive-dir",
            backup_cache_dir,
            "--force",
            config["target"],
        ],
        get_env(env),
    )

    # From duplicity's manual:
    # "This should only be necessary after a duplicity session fails or is
    # aborted prematurely."
    # That may be unlikely here but we may as well ensure we tidy up if
    # that does happen - it might just have been a poorly timed reboot.
    shell(
        "check_call",
        [
            "/usr/bin/duplicity",
            "cleanup",
            "--verbosity",
            "error",
            "--archive-dir",
            backup_cache_dir,
            "--force",
            config["target"],
        ],
        get_env(env),
    )

    # Change ownership of backups to the user-data user, so that the after-bcakup
    # script can access them.
    if get_target_type(config) == "file":
        shell("check_call", ["/bin/chown", "-R", env["STORAGE_USER"], backup_dir])

        # Execute a post-backup script that does the copying to a remote server.
        # Run as the STORAGE_USER user, not as root. Pass our settings in
        # environment variables so the script has access to STORAGE_ROOT.
    post_script = os.path.join(backup_root, "after-backup")
    if os.path.exists(post_script):
        shell("check_call", ["su", env["STORAGE_USER"], "-c", post_script, config["target"]], env=env)

        # Our nightly cron job executes system status checks immediately after this
        # backup. Since it checks that dovecot and postfix are running, block for a
        # bit (maximum of 10 seconds each) to give each a chance to finish restarting
        # before the status checks might catch them down. See #381.
    wait_for_service(25, True, env, 10)
    wait_for_service(993, True, env, 10)
Ejemplo n.º 13
0
def provision_certificates_cmdline():
    import sys
    from exclusiveprocess import Lock

    from utils import load_environment

    Lock(die=True).forever()
    env = load_environment()

    verbose = False
    headless = False
    force_domains = None
    show_extended_problems = True

    args = list(sys.argv)
    args.pop(0)  # program name
    if args and args[0] == "-v":
        verbose = True
        args.pop(0)
    if args and args[0] == "-q":
        show_extended_problems = False
        args.pop(0)
    if args and args[0] == "--headless":
        headless = True
        args.pop(0)
    if args and args[0] == "--force":
        force_domains = "ALL"
        args.pop(0)
    else:
        force_domains = args

    agree_to_tos_url = None
    while True:
        # Run the provisioning script. This installs certificates. If there are
        # a very large number of domains on this box, it issues separate
        # certificates for groups of domains. We have to check the result for
        # each group.
        def my_logger(message):
            if verbose:
                print(">", message)

        status = provision_certificates(
            env,
            agree_to_tos_url=agree_to_tos_url,
            logger=my_logger,
            force_domains=force_domains,
            show_extended_problems=show_extended_problems)
        agree_to_tos_url = None  # reset to prevent infinite looping

        if not status["requests"]:
            # No domains need certificates.
            if not headless or verbose:
                if len(status["problems"]) == 0:
                    print(
                        "No domains hosted on this box need a new TLS certificate at this time."
                    )
                elif len(status["problems"]) > 0:
                    print(
                        "No TLS certificates could be provisoned at this time:"
                    )
                    print()
                    for domain in sort_domains(status["problems"], env):
                        print("%s: %s" % (domain, status["problems"][domain]))

            sys.exit(0)

        # What happened?
        wait_until = None
        wait_domains = []
        for request in status["requests"]:
            if request["result"] == "agree-to-tos":
                # We may have asked already in a previous iteration.
                if agree_to_tos_url is not None:
                    continue

                # Can't ask the user a question in this mode. Warn the user that something
                # needs to be done.
                if headless:
                    print(", ".join(request["domains"]) +
                          " need a new or renewed TLS certificate.")
                    print()
                    print(
                        "This box can't do that automatically for you until you agree to Let's Encrypt's"
                    )
                    print(
                        "Terms of Service agreement. Use the Mail-in-a-Box control panel to provision"
                    )
                    print("certificates for these domains.")
                    sys.exit(1)

                print("""
I'm going to provision a TLS certificate (formerly called a SSL certificate)
for you from Let's Encrypt (letsencrypt.org).

TLS certificates are cryptographic keys that ensure communication between
you and this box are secure when getting and sending mail and visiting
websites hosted on this box. Let's Encrypt is a free provider of TLS
certificates.

Please open this document in your web browser:

%s

It is Let's Encrypt's terms of service agreement. If you agree, I can
provision that TLS certificate. If you don't agree, you will have an
opportunity to install your own TLS certificate from the Mail-in-a-Box
control panel.

Do you agree to the agreement? Type Y or N and press <ENTER>: """ %
                      request["url"],
                      end='',
                      flush=True)

                if sys.stdin.readline().strip().upper() != "Y":
                    print("\nYou didn't agree. Quitting.")
                    sys.exit(1)

                # Okay, indicate agreement on next iteration.
                agree_to_tos_url = request["url"]

            if request["result"] == "wait":
                # Must wait. We'll record until when. The wait occurs below.
                if wait_until is None:
                    wait_until = request["until"]
                else:
                    wait_until = max(wait_until, request["until"])
                wait_domains += request["domains"]

            if request["result"] == "error":
                print(", ".join(request["domains"]) + ":")
                print(request["message"])

            if request["result"] == "installed":
                print("A TLS certificate was successfully installed for " +
                      ", ".join(request["domains"]) + ".")

        if wait_until:
            # Wait, then loop.
            import time, datetime
            print()
            print("A TLS certificate was requested for: " +
                  ", ".join(wait_domains) + ".")
            first = True
            while wait_until > datetime.datetime.now():
                if not headless or first:
                    print(
                        "We have to wait",
                        int(
                            round((wait_until -
                                   datetime.datetime.now()).total_seconds())),
                        "seconds for the certificate to be issued...")
                time.sleep(10)
                first = False

            continue  # Loop!

        if agree_to_tos_url:
            # The user agrees to the TOS. Loop to try again by agreeing.
            continue  # Loop!

        # Unless we were instructed to wait, or we just agreed to the TOS,
        # we're done for now.
        break

    # And finally show the domains with problems.
    if len(status["problems"]) > 0:
        print("TLS certificates could not be provisoned for:")
        for domain in sort_domains(status["problems"], env):
            print("%s: %s" % (domain, status["problems"][domain]))
Ejemplo n.º 14
0
def write_backup_config(env, newconfig):
	backup_root = os.path.join(env["STORAGE_ROOT"], 'backup')
	with open(os.path.join(backup_root, 'custom.yaml'), "w") as f:
		f.write(rtyaml.dump(newconfig))

if __name__ == "__main__":
	import sys
	if sys.argv[-1] == "--verify":
		# Run duplicity's verification command to check a) the backup files
		# are readable, and b) report if they are up to date.
		run_duplicity_verification()

	elif sys.argv[-1] == "--list":
		# List the saved backup files.
		for fn, size in list_target_files(get_backup_config(load_environment())):
			print("{}\t{}".format(fn, size))

	elif sys.argv[-1] == "--status":
		# Show backup status.
		ret = backup_status(load_environment())
		print(rtyaml.dump(ret["backups"]))
		print("Storage for unmatched files:", ret["unmatched_file_size"])

	elif len(sys.argv) >= 2 and sys.argv[1] == "--restore":
		# Run duplicity restore. Rest of command line passed as arguments
		# to duplicity. The restore path should be specified.
		run_duplicity_restore(sys.argv[2:])

	else:
		# Perform a backup. Add --full to force a full backup rather than
Ejemplo n.º 15
0
def perform_backup(full_backup):
    env = load_environment()

    # Create an global exclusive lock so that the backup script
    # cannot be run more than one.
    Lock(die=True).forever()

    config = get_backup_config(env)
    backup_root = os.path.join(env["STORAGE_ROOT"], 'backup')
    backup_cache_dir = os.path.join(backup_root, 'cache')
    backup_dir = os.path.join(backup_root, 'encrypted')

    # Are backups disabled?
    if config["target"] == "off":
        return

    # In an older version of this script, duplicity was called
    # such that it did not encrypt the backups it created (in
    # backup/duplicity), and instead openssl was called separately
    # after each backup run, creating AES256 encrypted copies of
    # each file created by duplicity in backup/encrypted.
    #
    # We detect the transition by the presence of backup/duplicity
    # and handle it by 'dupliception': we move all the old *un*encrypted
    # duplicity files up out of the backup/duplicity directory (as
    # backup/ is excluded from duplicity runs) in order that it is
    # included in the next run, and we delete backup/encrypted (which
    # duplicity will output files directly to, post-transition).
    old_backup_dir = os.path.join(backup_root, 'duplicity')
    migrated_unencrypted_backup_dir = os.path.join(
        env["STORAGE_ROOT"], "migrated_unencrypted_backup")
    if os.path.isdir(old_backup_dir):
        # Move the old unencrypted files to a new location outside of
        # the backup root so they get included in the next (new) backup.
        # Then we'll delete them. Also so that they do not get in the
        # way of duplicity doing a full backup on the first run after
        # we take care of this.
        shutil.move(old_backup_dir, migrated_unencrypted_backup_dir)

        # The backup_dir (backup/encrypted) now has a new purpose.
        # Clear it out.
        shutil.rmtree(backup_dir)

    # On the first run, always do a full backup. Incremental
    # will fail. Otherwise do a full backup when the size of
    # the increments since the most recent full backup are
    # large.
    try:
        full_backup = full_backup or should_force_full(config, env)
    except Exception as e:
        # This was the first call to duplicity, and there might
        # be an error already.
        print(e)
        sys.exit(1)

    # Stop services.
    def service_command(service, command, quit=None):
        # Execute silently, but if there is an error then display the output & exit.
        code, ret = shell('check_output',
                          ["/usr/sbin/service", service, command],
                          capture_stderr=True,
                          trap=True)
        if code != 0:
            print(ret)
            if quit:
                sys.exit(code)

    service_command("php5-fpm", "stop", quit=True)
    service_command("postfix", "stop", quit=True)
    service_command("dovecot", "stop", quit=True)

    # Execute a pre-backup script that copies files outside the homedir.
    # Run as the STORAGE_USER user, not as root. Pass our settings in
    # environment variables so the script has access to STORAGE_ROOT.
    pre_script = os.path.join(backup_root, 'before-backup')
    if os.path.exists(pre_script):
        shell('check_call',
              ['su', env['STORAGE_USER'], '-c', pre_script, config["target"]],
              env=env)

    # Run a backup of STORAGE_ROOT (but excluding the backups themselves!).
    # --allow-source-mismatch is needed in case the box's hostname is changed
    # after the first backup. See #396.
    try:
        shell('check_call', [
            "/usr/bin/duplicity", "full" if full_backup else "incr",
            "--verbosity", "warning", "--no-print-statistics", "--archive-dir",
            backup_cache_dir, "--exclude", backup_root, "--volsize", "250",
            "--gpg-options", "--cipher-algo=AES256", env["STORAGE_ROOT"],
            config["target"], "--allow-source-mismatch"
        ] + rsync_ssh_options, get_env(env))
    finally:
        # Start services again.
        service_command("dovecot", "start", quit=False)
        service_command("postfix", "start", quit=False)
        service_command("php5-fpm", "start", quit=False)

    # Once the migrated backup is included in a new backup, it can be deleted.
    if os.path.isdir(migrated_unencrypted_backup_dir):
        shutil.rmtree(migrated_unencrypted_backup_dir)

    # Remove old backups. This deletes all backup data no longer needed
    # from more than 3 days ago.
    shell('check_call', [
        "/usr/bin/duplicity", "remove-older-than",
        "%dD" % config["min_age_in_days"], "--verbosity", "error",
        "--archive-dir", backup_cache_dir, "--force", config["target"]
    ] + rsync_ssh_options, get_env(env))

    # From duplicity's manual:
    # "This should only be necessary after a duplicity session fails or is
    # aborted prematurely."
    # That may be unlikely here but we may as well ensure we tidy up if
    # that does happen - it might just have been a poorly timed reboot.
    shell('check_call', [
        "/usr/bin/duplicity", "cleanup", "--verbosity", "error",
        "--archive-dir", backup_cache_dir, "--force", config["target"]
    ] + rsync_ssh_options, get_env(env))

    # Change ownership of backups to the user-data user, so that the after-bcakup
    # script can access them.
    if get_target_type(config) == 'file':
        shell('check_call',
              ["/bin/chown", "-R", env["STORAGE_USER"], backup_dir])

    # Execute a post-backup script that does the copying to a remote server.
    # Run as the STORAGE_USER user, not as root. Pass our settings in
    # environment variables so the script has access to STORAGE_ROOT.
    post_script = os.path.join(backup_root, 'after-backup')
    if os.path.exists(post_script):
        shell('check_call',
              ['su', env['STORAGE_USER'], '-c', post_script, config["target"]],
              env=env)

    # Our nightly cron job executes system status checks immediately after this
    # backup. Since it checks that dovecot and postfix are running, block for a
    # bit (maximum of 10 seconds each) to give each a chance to finish restarting
    # before the status checks might catch them down. See #381.
    wait_for_service(25, True, env, 10)
    wait_for_service(993, True, env, 10)
Ejemplo n.º 16
0
def perform_backup(full_backup):
    env = load_environment()

    exclusive_process("backup")

    # Ensure the backup directory exists.
    backup_dir = os.path.join(env["STORAGE_ROOT"], 'backup')
    backup_duplicity_dir = os.path.join(backup_dir, 'duplicity')
    os.makedirs(backup_duplicity_dir, exist_ok=True)

    # On the first run, always do a full backup. Incremental
    # will fail. Otherwise do a full backup when the size of
    # the increments since the most recent full backup are
    # large.
    full_backup = full_backup or should_force_full(env)

    # Stop services.
    shell('check_call', ["/usr/sbin/service", "dovecot", "stop"])
    shell('check_call', ["/usr/sbin/service", "postfix", "stop"])

    # Update the backup mirror directory which mirrors the current
    # STORAGE_ROOT (but excluding the backups themselves!).
    try:
        shell('check_call', [
            "/usr/bin/duplicity", "full" if full_backup else "incr",
            "--no-encryption", "--archive-dir", "/tmp/duplicity-archive-dir",
            "--name", "mailinabox", "--exclude", backup_dir, "--volsize",
            "100", "--verbosity", "warning", env["STORAGE_ROOT"],
            "file://" + backup_duplicity_dir
        ])
    finally:
        # Start services again.
        shell('check_call', ["/usr/sbin/service", "dovecot", "start"])
        shell('check_call', ["/usr/sbin/service", "postfix", "start"])

    # Remove old backups. This deletes all backup data no longer needed
    # from more than 31 days ago. Must do this before destroying the
    # cache directory or else this command will re-create it.
    shell('check_call', [
        "/usr/bin/duplicity", "remove-older-than",
        "%dD" % keep_backups_for_days, "--archive-dir",
        "/tmp/duplicity-archive-dir", "--name", "mailinabox", "--force",
        "--verbosity", "warning", "file://" + backup_duplicity_dir
    ])

    # Remove duplicity's cache directory because it's redundant with our backup directory.
    shutil.rmtree("/tmp/duplicity-archive-dir")

    # Encrypt all of the new files.
    backup_encrypted_dir = os.path.join(backup_dir, 'encrypted')
    os.makedirs(backup_encrypted_dir, exist_ok=True)
    for fn in os.listdir(backup_duplicity_dir):
        fn2 = os.path.join(backup_encrypted_dir, fn) + ".enc"
        if os.path.exists(fn2): continue

        # Encrypt the backup using the backup private key.
        shell('check_call', [
            "/usr/bin/openssl",
            "enc",
            "-aes-256-cbc",
            "-a",
            "-salt",
            "-in",
            os.path.join(backup_duplicity_dir, fn),
            "-out",
            fn2,
            "-pass",
            "file:%s" % os.path.join(backup_dir, "secret_key.txt"),
        ])

        # The backup can be decrypted with:
        # openssl enc -d -aes-256-cbc -a -in latest.tgz.enc -out /dev/stdout -pass file:secret_key.txt | tar -z

    # Remove encrypted backups that are no longer needed.
    for fn in os.listdir(backup_encrypted_dir):
        fn2 = os.path.join(backup_duplicity_dir, fn.replace(".enc", ""))
        if os.path.exists(fn2): continue
        os.unlink(os.path.join(backup_encrypted_dir, fn))

    # Execute a post-backup script that does the copying to a remote server.
    # Run as the STORAGE_USER user, not as root. Pass our settings in
    # environment variables so the script has access to STORAGE_ROOT.
    post_script = os.path.join(backup_dir, 'after-backup')
    if os.path.exists(post_script):
        shell('check_call', ['su', env['STORAGE_USER'], '-c', post_script],
              env=env)
Ejemplo n.º 17
0
    with open(os.path.join(backup_root, 'custom.yaml'), "w") as f:
        f.write(rtyaml.dump(newconfig))


if __name__ == "__main__":
    import sys
    if sys.argv[-1] == "--verify":
        # Run duplicity's verification command to check a) the backup files
        # are readable, and b) report if they are up to date.
        run_duplicity_verification()

    elif sys.argv[-1] == "--list":
        # Run duplicity's verification command to check a) the backup files
        # are readable, and b) report if they are up to date.
        for fn, size in list_target_files(get_backup_config(
                load_environment())):
            print("{}\t{}".format(fn, size))

    elif sys.argv[-1] == "--status":
        # Show backup status.
        ret = backup_status(load_environment())
        print(rtyaml.dump(ret["backups"]))

    elif len(sys.argv) >= 2 and sys.argv[1] == "--restore":
        # Run duplicity restore. Rest of command line passed as arguments
        # to duplicity. The restore path should be specified.
        run_duplicity_restore(sys.argv[2:])

    else:
        # Perform a backup. Add --full to force a full backup rather than
        # possibly performing an incremental backup.
Ejemplo n.º 18
0
        message, sender, recipient = m.groups()
        if recipient in collector["real_mail_addresses"]:
            # only log mail to real recipients

            # skip this, is reported in the greylisting report
            if "Recipient address rejected: Greylisted" in message:
                return

            # simplify this one
            m = re.search(
                r"Client host \[(.*?)\] blocked using zen.spamhaus.org; (.*)",
                message)
            if m:
                message = "ip blocked: " + m.group(2)

            # simplify this one too
            m = re.search(
                r"Sender address \[.*@(.*)\] blocked using dbl.spamhaus.org; (.*)",
                message)
            if m:
                message = "domain blocked: " + m.group(2)

            collector["rejected-mail"].setdefault(recipient, []).append(
                (date, sender, message))


if __name__ == "__main__":
    from status_checks import ConsoleOutput
    env = utils.load_environment()
    scan_mail_log(ConsoleOutput(), env)
Ejemplo n.º 19
0
    except KeyError:
        pass

    lines.append(footer)

    print("\n".join(lines))


def print_header(msg):
    print('\n' + msg)
    print("═" * len(msg), '\n')


if __name__ == "__main__":
    try:
        env_vars = utils.load_environment()
    except FileNotFoundError:
        env_vars = {}

    parser = argparse.ArgumentParser(
        description=
        "Scan the mail log files for interesting data. By default, this script "
        "shows today's incoming and outgoing mail statistics. This script was ("
        "re)written for the Mail-in-a-box email server."
        "https://github.com/mail-in-a-box/mailinabox",
        add_help=False)

    # Switches to determine what to parse and what to ignore

    parser.add_argument("-r",
                        "--received",
Ejemplo n.º 20
0
def run_migrations():
    if not os.access("/etc/mailinabox.conf", os.W_OK, effective_ids=True):
        print("This script must be run as root.", file=sys.stderr)
        sys.exit(1)

    env = load_environment()

    migration_id_file = os.path.join(env['STORAGE_ROOT'], 'mailinabox.version')
    migration_id = None
    if os.path.exists(migration_id_file):
        with open(migration_id_file) as f:
            migration_id = f.read().strip()

    if migration_id is None:
        # Load the legacy location of the migration ID. We'll drop support
        # for this eventually.
        migration_id = env.get("MIGRATIONID")

    if migration_id is None:
        print()
        print("%s file doesn't exists. Skipping migration..." %
              (migration_id_file, ))
        return

    ourver = int(migration_id)

    while True:
        next_ver = (ourver + 1)
        migration_func = globals().get("migration_%d" % next_ver)

        if not migration_func:
            # No more migrations to run.
            break

        print()
        print("Running migration to Mail-in-a-Box #%d..." % next_ver)

        try:
            migration_func(env)
        except Exception as e:
            print()
            print("Error running the migration script:")
            print()
            print(e)
            print()
            print(
                "Your system may be in an inconsistent state now. We're terribly sorry. A re-install from a backup might be the best way to continue."
            )
            sys.exit(1)

        ourver = next_ver

        # Write out our current version now. Do this sooner rather than later
        # in case of any problems.
        with open(migration_id_file, "w") as f:
            f.write(str(ourver) + "\n")

        # Delete the legacy location of this field.
        if "MIGRATIONID" in env:
            del env["MIGRATIONID"]
            save_environment(env)
Ejemplo n.º 21
0
def perform_backup(full_backup):
	env = load_environment()

	exclusive_process("backup")

	backup_root = os.path.join(env["STORAGE_ROOT"], 'backup')
	backup_cache_dir = os.path.join(backup_root, 'cache')
	backup_dir = os.path.join(backup_root, 'encrypted')

	# In an older version of this script, duplicity was called
	# such that it did not encrypt the backups it created (in
	# backup/duplicity), and instead openssl was called separately
	# after each backup run, creating AES256 encrypted copies of
	# each file created by duplicity in backup/encrypted.
	#
	# We detect the transition by the presence of backup/duplicity
	# and handle it by 'dupliception': we move all the old *un*encrypted
	# duplicity files up out of the backup/duplicity directory (as
	# backup/ is excluded from duplicity runs) in order that it is
	# included in the next run, and we delete backup/encrypted (which
	# duplicity will output files directly to, post-transition).
	old_backup_dir = os.path.join(backup_root, 'duplicity')
	migrated_unencrypted_backup_dir = os.path.join(env["STORAGE_ROOT"], "migrated_unencrypted_backup")
	if os.path.isdir(old_backup_dir):
		# Move the old unencrpyted files to a new location outside of
		# the backup root so they get included in the next (new) backup.
		# Then we'll delete them. Also so that they do not get in the
		# way of duplicity doing a full backup on the first run after
		# we take care of this.
		shutil.move(old_backup_dir, migrated_unencrypted_backup_dir)

		# The backup_dir (backup/encrypted) now has a new purpose.
		# Clear it out.
		shutil.rmtree(backup_dir)

	# On the first run, always do a full backup. Incremental
	# will fail. Otherwise do a full backup when the size of
	# the increments since the most recent full backup are
	# large.
	full_backup = full_backup or should_force_full(env)

	# Stop services.
	shell('check_call', ["/usr/sbin/service", "dovecot", "stop"])
	shell('check_call', ["/usr/sbin/service", "postfix", "stop"])

	# Get the encryption passphrase. secret_key.txt is 2048 random
	# bits base64-encoded and with line breaks every 65 characters.
	# gpg will only take the first line of text, so sanity check that
	# that line is long enough to be a reasonable passphrase. It
	# only needs to be 43 base64-characters to match AES256's key
	# length of 32 bytes.
	with open(os.path.join(backup_root, 'secret_key.txt')) as f:
		passphrase = f.readline().strip()
	if len(passphrase) < 43: raise Exception("secret_key.txt's first line is too short!")
	env_with_passphrase = { "PASSPHRASE" : passphrase }

	# Update the backup mirror directory which mirrors the current
	# STORAGE_ROOT (but excluding the backups themselves!).
	try:
		shell('check_call', [
			"/usr/bin/duplicity",
			"full" if full_backup else "incr",
			"--archive-dir", backup_cache_dir,
			"--exclude", backup_root,
			"--volsize", "250",
			"--gpg-options", "--cipher-algo=AES256",
			env["STORAGE_ROOT"],
			"file://" + backup_dir
			],
			env_with_passphrase)
	finally:
		# Start services again.
		shell('check_call', ["/usr/sbin/service", "dovecot", "start"])
		shell('check_call', ["/usr/sbin/service", "postfix", "start"])

	# Once the migrated backup is included in a new backup, it can be deleted.
	if os.path.isdir(migrated_unencrypted_backup_dir):
		shutil.rmtree(migrated_unencrypted_backup_dir)

	# Remove old backups. This deletes all backup data no longer needed
	# from more than 3 days ago.
	shell('check_call', [
		"/usr/bin/duplicity",
		"remove-older-than",
		"%dD" % keep_backups_for_days,
		"--archive-dir", backup_cache_dir,
		"--force",
		"file://" + backup_dir
		],
		env_with_passphrase)

	# From duplicity's manual:
	# "This should only be necessary after a duplicity session fails or is
	# aborted prematurely."
	# That may be unlikely here but we may as well ensure we tidy up if
	# that does happen - it might just have been a poorly timed reboot.
	shell('check_call', [
		"/usr/bin/duplicity",
		"cleanup",
		"--archive-dir", backup_cache_dir,
		"--force",
		"file://" + backup_dir
		],
		env_with_passphrase)

	# Change ownership of backups to the user-data user, so that the after-bcakup
	# script can access them.
	shell('check_call', ["/bin/chown", "-R", env["STORAGE_USER"], backup_dir])

	# Execute a post-backup script that does the copying to a remote server.
	# Run as the STORAGE_USER user, not as root. Pass our settings in
	# environment variables so the script has access to STORAGE_ROOT.
	post_script = os.path.join(backup_root, 'after-backup')
	if os.path.exists(post_script):
		shell('check_call',
			['su', env['STORAGE_USER'], '-c', post_script],
			env=env)
Ejemplo n.º 22
0
    def __init__(self, start_date=None, end_date=None, filters=None, no_filter=False,
                 sent=True, received=True, imap=False, pop3=False, grey=False, rejected=False):

        super().__init__()

        # Try and get all the email addresses known to this box

        known_addresses = []

        if not no_filter:
            try:
                env_vars = utils.load_environment()
                import mailconfig
                known_addresses = sorted(
                    set(mailconfig.get_mail_users(env_vars)) |
                    set(alias[0] for alias in mailconfig.get_mail_aliases(env_vars)),
                    key=email_sort
                )
            except (FileNotFoundError, ImportError):
                pass

        start_date = start_date or datetime.now()
        end_date = end_date or start_date - timedelta(weeks=52)

        self.update({
            'end_of_file': False,                   # Indicates whether the end of the log files was reached
            'start_date': start_date,
            'end_date': end_date,
            'line_count': 0,                        # Number of lines scanned
            'parse_count': 0,                       # Number of lines parsed (i.e. that had their contents examined)
            'scan_time': time.time(),               # The time in seconds the scan took
            'unknown services': set(),              # Services encountered that were not recognized
            'known_addresses': known_addresses,     # Addresses handled by MiaB
            'services': {},                         # What services to scan for
            'data': OrderedDict(),                  # Scan data, per service
        })

        # Caching is only useful with longer filter lists, but doesn't seem to hurt performance in shorter ones
        user_match = lru_cache(maxsize=None)(partial(filter_match, [f.lower() for f in filters] if filters else None))

        if sent:
            data = {}
            self['data']['sent mail'] = {
                'scan': partial(scan_postfix_submission, data, user_match),
                'data': data,
            }
            self['services']['postfix/submission/smtpd'] = self['data']['sent mail']

        if received:
            data = {}
            self['data']['received mail'] = {
                'scan': partial(scan_postfix_lmtp, data, user_match),
                'data': data,
            }
            self['services']['postfix/lmtp'] = self['data']['received mail']

        if imap:
            data = {}
            self['data']['imap login'] = {
                'scan': partial(scan_login, data, user_match),
                'data': data,
            }
            self['services']['imap-login'] = self['data']['imap login']

        if pop3:
            data = {}
            self['data']['pop3 login'] = {
                'scan': partial(scan_login, data, user_match),
                'data': data,
            }
            self['services']['pop3-login'] = self['data']['pop3 login']

        if grey:
            data = {}
            self['data']['grey-listed mail'] = {
                'scan': partial(scan_greylist, data, user_match),
                'data': data,
            }
            self['services']['postgrey'] = self['data']['grey-listed mail']

        if rejected:
            data = {}
            self['data']['blocked mail'] = {
                'scan': partial(scan_rejects, data, self['known_addresses'], user_match),
                'data': data,
            }
            self['services']['postfix/smtpd'] = self['data']['blocked mail']
Ejemplo n.º 23
0
        print(
            "The certificate has been installed in %s. Restarting services..."
            % ssl_certificate)

        # Restart dovecot and if this is for PRIMARY_HOSTNAME.

        if domain == env['PRIMARY_HOSTNAME']:
            shell('check_call', ["/usr/sbin/service", "dovecot", "restart"])
            shell('check_call', ["/usr/sbin/service", "postfix", "restart"])

        # Restart nginx in all cases.

        shell('check_call', ["/usr/sbin/service", "nginx", "restart"])

    else:
        print(
            "The certificate has an unknown status. Please check https://www.gandi.net/admin/ssl/%d/details for the status of this order."
            % cert['id'])


if __name__ == "__main__":
    if len(sys.argv) < 4:
        print(
            "Usage: python management/buy_certificate.py gandi_api_key domain_name {purchase, setup}"
        )
        sys.exit(1)
    api_key = sys.argv[1]
    domain_name = sys.argv[2]
    cmd = sys.argv[3]
    buy_ssl_certificate(api_key, domain_name, cmd, load_environment())
Ejemplo n.º 24
0
def print_ok(message):
	print_block(message, first_line="✓  ")

def print_error(message):
	print_block(message, first_line="✖  ")

try:
	terminal_columns = int(shell('check_output', ['stty', 'size']).split()[1])
except:
	terminal_columns = 76
def print_block(message, first_line="   "):
	print(first_line, end='')
	message = re.sub("\n\s*", " ", message)
	words = re.split("(\s+)", message)
	linelen = 0
	for w in words:
		if linelen + len(w) > terminal_columns-1-len(first_line):
			print()
			print("   ", end="")
			linelen = 0
		if linelen == 0 and w.strip() == "": continue
		print(w, end="")
		linelen += len(w)
	if linelen > 0:
		print()

if __name__ == "__main__":
	from utils import load_environment
	run_checks(load_environment())
Ejemplo n.º 25
0
def write_backup_config(env, newconfig):
	backup_root = os.path.join(env["STORAGE_ROOT"], 'backup')
	with open(os.path.join(backup_root, 'custom.yaml'), "w") as f:
		f.write(rtyaml.dump(newconfig))

if __name__ == "__main__":
	import sys
	if sys.argv[-1] == "--verify":
		# Run duplicity's verification command to check a) the backup files
		# are readable, and b) report if they are up to date.
		run_duplicity_verification()

	elif sys.argv[-1] == "--list":
		# Run duplicity's verification command to check a) the backup files
		# are readable, and b) report if they are up to date.
		for fn, size in list_target_files(get_backup_config(load_environment())):
			print("{}\t{}".format(fn, size))

	elif sys.argv[-1] == "--status":
		# Show backup status.
		ret = backup_status(load_environment())
		print(rtyaml.dump(ret["backups"]))

	elif len(sys.argv) >= 2 and sys.argv[1] == "--restore":
		# Run duplicity restore. Rest of command line passed as arguments
		# to duplicity. The restore path should be specified.
		run_duplicity_restore(sys.argv[2:])

	else:
		# Perform a backup. Add --full to force a full backup rather than
		# possibly performing an incremental backup.
Ejemplo n.º 26
0
	# longer have any other email addresses for.
	for source, target in existing_aliases:
		user, domain = source.split("@")
		if user in ("postmaster", "admin") and domain not in real_mail_domains \
			and target == administrator:
			remove_mail_alias(source, env, do_kick=False)
			results.append("removed alias %s (was to %s; domain no longer used for email)\n" % (source, target))

	# Update DNS and nginx in case any domains are added/removed.

	from dns_update import do_dns_update
	results.append( do_dns_update(env) )

	from web_update import do_web_update
	results.append( do_web_update(env) )

	return "".join(s for s in results if s != "")

if __name__ == "__main__":
	import sys
	if len(sys.argv) > 2 and sys.argv[1] == "validate-email":
		# Validate that we can create a Dovecot account for a given string.
		if validate_email(sys.argv[2], mode='user'):
			sys.exit(0)
		else:
			sys.exit(1)

	if len(sys.argv) > 1 and sys.argv[1] == "update":
		from utils import load_environment
		print(kick(load_environment()))
Ejemplo n.º 27
0
def provision_certificates_cmdline():
	import sys
	from utils import load_environment, exclusive_process

	exclusive_process("update_tls_certificates")
	env = load_environment()

	verbose = False
	headless = False
	force_domains = None
	show_extended_problems = True
	
	args = list(sys.argv)
	args.pop(0) # program name
	if args and args[0] == "-v":
		verbose = True
		args.pop(0)
	if args and args[0] == "q":
		show_extended_problems = False
		args.pop(0)
	if args and args[0] == "--headless":
		headless = True
		args.pop(0)
	if args and args[0] == "--force":
		force_domains = "ALL"
		args.pop(0)
	else:
		force_domains = args

	agree_to_tos_url = None
	while True:
		# Run the provisioning script. This installs certificates. If there are
		# a very large number of domains on this box, it issues separate
		# certificates for groups of domains. We have to check the result for
		# each group.
		def my_logger(message):
			if verbose:
				print(">", message)
		status = provision_certificates(env, agree_to_tos_url=agree_to_tos_url, logger=my_logger, force_domains=force_domains, show_extended_problems=show_extended_problems)
		agree_to_tos_url = None # reset to prevent infinite looping

		if not status["requests"]:
			# No domains need certificates.
			if not headless or verbose:
				if len(status["problems"]) == 0:
					print("No domains hosted on this box need a new TLS certificate at this time.")
				elif len(status["problems"]) > 0:
					print("No TLS certificates could be provisoned at this time:")
					print()
					for domain in sort_domains(status["problems"], env):
						print("%s: %s" % (domain, status["problems"][domain]))

			sys.exit(0)

		# What happened?
		wait_until = None
		wait_domains = []
		for request in status["requests"]:
			if request["result"] == "agree-to-tos":
				# We may have asked already in a previous iteration.
				if agree_to_tos_url is not None:
					continue

				# Can't ask the user a question in this mode. Warn the user that something
				# needs to be done.
				if headless:
					print(", ".join(request["domains"]) + " need a new or renewed TLS certificate.")
					print()
					print("This box can't do that automatically for you until you agree to Let's Encrypt's")
					print("Terms of Service agreement. Use the Mail-in-a-Box control panel to provision")
					print("certificates for these domains.")
					sys.exit(1)

				print("""
I'm going to provision a TLS certificate (formerly called a SSL certificate)
for you from Let's Encrypt (letsencrypt.org).

TLS certificates are cryptographic keys that ensure communication between
you and this box are secure when getting and sending mail and visiting
websites hosted on this box. Let's Encrypt is a free provider of TLS
certificates.

Please open this document in your web browser:

%s

It is Let's Encrypt's terms of service agreement. If you agree, I can
provision that TLS certificate. If you don't agree, you will have an
opportunity to install your own TLS certificate from the Mail-in-a-Box
control panel.

Do you agree to the agreement? Type Y or N and press <ENTER>: """
				 % request["url"], end='', flush=True)
			
				if sys.stdin.readline().strip().upper() != "Y":
					print("\nYou didn't agree. Quitting.")
					sys.exit(1)

				# Okay, indicate agreement on next iteration.
				agree_to_tos_url = request["url"]

			if request["result"] == "wait":
				# Must wait. We'll record until when. The wait occurs below.
				if wait_until is None:
					wait_until = request["until"]
				else:
					wait_until = max(wait_until, request["until"])
				wait_domains += request["domains"]

			if request["result"] == "error":
				print(", ".join(request["domains"]) + ":")
				print(request["message"])

			if request["result"] == "installed":
				print("A TLS certificate was successfully installed for " + ", ".join(request["domains"]) + ".")

		if wait_until:
			# Wait, then loop.
			import time, datetime
			print()
			print("A TLS certificate was requested for: " + ", ".join(wait_domains) + ".")
			first = True
			while wait_until > datetime.datetime.now():
				if not headless or first:
					print ("We have to wait", int(round((wait_until - datetime.datetime.now()).total_seconds())), "seconds for the certificate to be issued...")
				time.sleep(10)
				first = False

			continue # Loop!

		if agree_to_tos_url:
			# The user agrees to the TOS. Loop to try again by agreeing.
			continue # Loop!

		# Unless we were instructed to wait, or we just agreed to the TOS,
		# we're done for now.
		break

	# And finally show the domains with problems.
	if len(status["problems"]) > 0:
		print("TLS certificates could not be provisoned for:")
		for domain in sort_domains(status["problems"], env):
			print("%s: %s" % (domain, status["problems"][domain]))
Ejemplo n.º 28
0
def perform_backup(full_backup):
    env = load_environment()

    exclusive_process("backup")

    backup_root = os.path.join(env["STORAGE_ROOT"], 'backup')
    backup_cache_dir = os.path.join(backup_root, 'cache')
    backup_dir = os.path.join(backup_root, 'encrypted')

    # In an older version of this script, duplicity was called
    # such that it did not encrypt the backups it created (in
    # backup/duplicity), and instead openssl was called separately
    # after each backup run, creating AES256 encrypted copies of
    # each file created by duplicity in backup/encrypted.
    #
    # We detect the transition by the presence of backup/duplicity
    # and handle it by 'dupliception': we move all the old *un*encrypted
    # duplicity files up out of the backup/duplicity directory (as
    # backup/ is excluded from duplicity runs) in order that it is
    # included in the next run, and we delete backup/encrypted (which
    # duplicity will output files directly to, post-transition).
    old_backup_dir = os.path.join(backup_root, 'duplicity')
    migrated_unencrypted_backup_dir = os.path.join(
        env["STORAGE_ROOT"], "migrated_unencrypted_backup")
    if os.path.isdir(old_backup_dir):
        # Move the old unencrypted files to a new location outside of
        # the backup root so they get included in the next (new) backup.
        # Then we'll delete them. Also so that they do not get in the
        # way of duplicity doing a full backup on the first run after
        # we take care of this.
        shutil.move(old_backup_dir, migrated_unencrypted_backup_dir)

        # The backup_dir (backup/encrypted) now has a new purpose.
        # Clear it out.
        shutil.rmtree(backup_dir)

    # On the first run, always do a full backup. Incremental
    # will fail. Otherwise do a full backup when the size of
    # the increments since the most recent full backup are
    # large.
    full_backup = full_backup or should_force_full(env)

    # Stop services.
    shell('check_call', ["/usr/sbin/service", "dovecot", "stop"])
    shell('check_call', ["/usr/sbin/service", "postfix", "stop"])

    # Get the encryption passphrase. secret_key.txt is 2048 random
    # bits base64-encoded and with line breaks every 65 characters.
    # gpg will only take the first line of text, so sanity check that
    # that line is long enough to be a reasonable passphrase. It
    # only needs to be 43 base64-characters to match AES256's key
    # length of 32 bytes.
    with open(os.path.join(backup_root, 'secret_key.txt')) as f:
        passphrase = f.readline().strip()
    if len(passphrase) < 43:
        raise Exception("secret_key.txt's first line is too short!")
    env_with_passphrase = {"PASSPHRASE": passphrase}

    # Update the backup mirror directory which mirrors the current
    # STORAGE_ROOT (but excluding the backups themselves!).
    try:
        shell('check_call', [
            "/usr/bin/duplicity", "full" if full_backup else "incr",
            "--archive-dir", backup_cache_dir, "--exclude", backup_root,
            "--volsize", "250", "--gpg-options", "--cipher-algo=AES256",
            env["STORAGE_ROOT"], "file://" + backup_dir
        ], env_with_passphrase)
    finally:
        # Start services again.
        shell('check_call', ["/usr/sbin/service", "dovecot", "start"])
        shell('check_call', ["/usr/sbin/service", "postfix", "start"])

    # Once the migrated backup is included in a new backup, it can be deleted.
    if os.path.isdir(migrated_unencrypted_backup_dir):
        shutil.rmtree(migrated_unencrypted_backup_dir)

    # Remove old backups. This deletes all backup data no longer needed
    # from more than 3 days ago.
    shell('check_call', [
        "/usr/bin/duplicity", "remove-older-than",
        "%dD" % keep_backups_for_days, "--archive-dir", backup_cache_dir,
        "--force", "file://" + backup_dir
    ], env_with_passphrase)

    # From duplicity's manual:
    # "This should only be necessary after a duplicity session fails or is
    # aborted prematurely."
    # That may be unlikely here but we may as well ensure we tidy up if
    # that does happen - it might just have been a poorly timed reboot.
    shell('check_call', [
        "/usr/bin/duplicity", "cleanup", "--archive-dir", backup_cache_dir,
        "--force", "file://" + backup_dir
    ], env_with_passphrase)

    # Change ownership of backups to the user-data user, so that the after-bcakup
    # script can access them.
    shell('check_call', ["/bin/chown", "-R", env["STORAGE_USER"], backup_dir])

    # Execute a post-backup script that does the copying to a remote server.
    # Run as the STORAGE_USER user, not as root. Pass our settings in
    # environment variables so the script has access to STORAGE_ROOT.
    post_script = os.path.join(backup_root, 'after-backup')
    if os.path.exists(post_script):
        shell('check_call', ['su', env['STORAGE_USER'], '-c', post_script],
              env=env)

    # Our nightly cron job executes system status checks immediately after this
    # backup. Since it checks that dovecot and postfix are running, block for a
    # bit (maximum of 10 seconds each) to give each a chance to finish restarting
    # before the status checks might catch them down. See #381.
    wait_for_service(25, True, env, 10)
    wait_for_service(993, True, env, 10)
Ejemplo n.º 29
0
def write_backup_config(env, newconfig):
    backup_root = os.path.join(env["STORAGE_ROOT"], "backup")
    with open(os.path.join(backup_root, "custom.yaml"), "w") as f:
        f.write(rtyaml.dump(newconfig))


if __name__ == "__main__":
    import sys

    if sys.argv[-1] == "--verify":
        # Run duplicity's verification command to check a) the backup files
        # are readable, and b) report if they are up to date.
        run_duplicity_verification()

    elif sys.argv[-1] == "--status":
        # Show backup status.
        ret = backup_status(load_environment())
        print(rtyaml.dump(ret["backups"]))

    elif len(sys.argv) >= 2 and sys.argv[1] == "--restore":
        # Run duplicity restore. Rest of command line passed as arguments
        # to duplicity. The restore path should be specified.
        run_duplicity_restore(sys.argv[2:])

    else:
        # Perform a backup. Add --full to force a full backup rather than
        # possibly performing an incremental backup.
        full_backup = "--full" in sys.argv
        perform_backup(full_backup)
Ejemplo n.º 30
0
	from dns_update import do_dns_update
	results.append( do_dns_update(env) )

	from web_update import do_web_update
	results.append( do_web_update(env) )

	return "".join(s for s in results if s != "")

def validate_password(pw):
	# validate password
	if pw.strip() == "":
		raise ValueError("No password provided.")
	if re.search(r"[\s]", pw):
		raise ValueError("Passwords cannot contain spaces.")
	if len(pw) < 4:
		raise ValueError("Passwords must be at least four characters.")


if __name__ == "__main__":
	import sys
	if len(sys.argv) > 2 and sys.argv[1] == "validate-email":
		# Validate that we can create a Dovecot account for a given string.
		if validate_email(sys.argv[2], mode='user'):
			sys.exit(0)
		else:
			sys.exit(1)

	if len(sys.argv) > 1 and sys.argv[1] == "update":
		from utils import load_environment
		print(kick(load_environment()))
Ejemplo n.º 31
0
		# Write out.

		with open(ssl_certificate, "w") as f:
			f.write(pem)

		print("The certificate has been installed in %s. Restarting services..." % ssl_certificate)

		# Restart dovecot and if this is for PRIMARY_HOSTNAME.

		if domain == env['PRIMARY_HOSTNAME']:
			shell('check_call', ["/usr/sbin/service", "dovecot", "restart"])
			shell('check_call', ["/usr/sbin/service", "postfix", "restart"])

		# Restart nginx in all cases.

		shell('check_call', ["/usr/sbin/service", "nginx", "restart"])

	else:
		print("The certificate has an unknown status. Please check https://www.gandi.net/admin/ssl/%d/details for the status of this order." % cert['id'])

if __name__ == "__main__":
	if len(sys.argv) < 4:
		print("Usage: python management/buy_certificate.py gandi_api_key domain_name {purchase, setup}")
		sys.exit(1)
	api_key = sys.argv[1]
	domain_name = sys.argv[2]
	cmd = sys.argv[3]
	buy_ssl_certificate(api_key, domain_name, cmd, load_environment())

Ejemplo n.º 32
0
def perform_backup(full_backup, user_initiated=False):
    env = load_environment()
    php_fpm = f"php{get_php_version()}-fpm"

    # Create an global exclusive lock so that the backup script
    # cannot be run more than one.
    lock = Lock(name="mailinabox_backup_daemon", die=(not user_initiated))
    if user_initiated:
        # God forgive me for what I'm about to do
        try:
            lock._acquire()
        except CannotAcquireLock:
            return "Another backup is already being done!"
    else:
        lock.forever()

    config = get_backup_config(env)
    backup_root = os.path.join(env["STORAGE_ROOT"], 'backup')
    backup_cache_dir = os.path.join(backup_root, 'cache')
    backup_dir = os.path.join(backup_root, 'encrypted')

    # Are backups disabled?
    if config["target"] == "off":
        return

    # On the first run, always do a full backup. Incremental
    # will fail. Otherwise do a full backup when the size of
    # the increments since the most recent full backup are
    # large.
    try:
        full_backup = full_backup or should_force_full(config, env)
    except Exception as e:
        # This was the first call to duplicity, and there might
        # be an error already.
        print(e)
        sys.exit(1)

    # Stop services.
    def service_command(service, command, quit=None):
        # Execute silently, but if there is an error then display the output & exit.
        code, ret = shell('check_output',
                          ["/usr/sbin/service", service, command],
                          capture_stderr=True,
                          trap=True)
        if code != 0:
            print(ret)
            if quit:
                sys.exit(code)

    service_command(php_fpm, "stop", quit=True)
    service_command("postfix", "stop", quit=True)
    service_command("dovecot", "stop", quit=True)

    # Execute a pre-backup script that copies files outside the homedir.
    # Run as the STORAGE_USER user, not as root. Pass our settings in
    # environment variables so the script has access to STORAGE_ROOT.
    pre_script = os.path.join(backup_root, 'before-backup')
    if os.path.exists(pre_script):
        shell('check_call',
              ['su', env['STORAGE_USER'], '-c', pre_script, config["target"]],
              env=env)

    # Run a backup of STORAGE_ROOT (but excluding the backups themselves!).
    # --allow-source-mismatch is needed in case the box's hostname is changed
    # after the first backup. See #396.
    try:
        shell('check_call', [
            "/usr/bin/duplicity", "full" if full_backup else "incr",
            "--verbosity", "warning", "--no-print-statistics", "--archive-dir",
            backup_cache_dir, "--exclude", backup_root, "--volsize", "250",
            "--gpg-options", "--cipher-algo=AES256", env["STORAGE_ROOT"],
            config["target"], "--allow-source-mismatch"
        ] + rsync_ssh_options, get_env(env))
    finally:
        # Start services again.
        service_command("dovecot", "start", quit=False)
        service_command("postfix", "start", quit=False)
        service_command(php_fpm, "start", quit=False)

    # Remove old backups. This deletes all backup data no longer needed
    # from more than 3 days ago.
    shell('check_call', [
        "/usr/bin/duplicity", "remove-older-than",
        "%dD" % config["min_age_in_days"], "--verbosity", "error",
        "--archive-dir", backup_cache_dir, "--force", config["target"]
    ] + rsync_ssh_options, get_env(env))

    # From duplicity's manual:
    # "This should only be necessary after a duplicity session fails or is
    # aborted prematurely."
    # That may be unlikely here but we may as well ensure we tidy up if
    # that does happen - it might just have been a poorly timed reboot.
    shell('check_call', [
        "/usr/bin/duplicity", "cleanup", "--verbosity", "error",
        "--archive-dir", backup_cache_dir, "--force", config["target"]
    ] + rsync_ssh_options, get_env(env))

    # Change ownership of backups to the user-data user, so that the after-bcakup
    # script can access them.
    if get_target_type(config) == 'file':
        shell('check_call',
              ["/bin/chown", "-R", env["STORAGE_USER"], backup_dir])

    # Execute a post-backup script that does the copying to a remote server.
    # Run as the STORAGE_USER user, not as root. Pass our settings in
    # environment variables so the script has access to STORAGE_ROOT.
    post_script = os.path.join(backup_root, 'after-backup')
    if os.path.exists(post_script):
        shell('check_call',
              ['su', env['STORAGE_USER'], '-c', post_script, config["target"]],
              env=env)

    # Our nightly cron job executes system status checks immediately after this
    # backup. Since it checks that dovecot and postfix are running, block for a
    # bit (maximum of 10 seconds each) to give each a chance to finish restarting
    # before the status checks might catch them down. See #381.
    if user_initiated:
        # God forgive me for what I'm about to do
        lock._release()
        # We don't need to wait for the services to be up in this case
    else:
        wait_for_service(25, True, env, 10)
        wait_for_service(993, True, env, 10)
Ejemplo n.º 33
0
#!/usr/bin/python3

import os, os.path, re, json
import subprocess
from functools import wraps

from flask import Flask, request, render_template, abort, Response, send_from_directory, make_response

import auth, utils, multiprocessing.pool
from mailconfig import get_mail_users, get_mail_users_ex, get_admins, add_mail_user, set_mail_password, remove_mail_user
from mailconfig import get_mail_user_privileges, add_remove_mail_user_privilege
from mailconfig import get_mail_aliases, get_mail_aliases_ex, get_mail_domains, add_mail_alias, remove_mail_alias

env = utils.load_environment()

auth_service = auth.KeyAuthService()

# We may deploy via a symbolic link, which confuses flask's template finding.
me = __file__
try:
	me = os.readlink(__file__)
except OSError:
	pass

# for generating CSRs we need a list of country codes
csr_country_codes = []
with open(os.path.join(os.path.dirname(me), "csr_country_codes.tsv")) as f:
	for line in f:
		if line.strip() == "" or line.startswith("#"): continue
		code, name = line.strip().split("\t")[0:2]
		csr_country_codes.append((code, name))
Ejemplo n.º 34
0
def perform_backup(full_backup):
	env = load_environment()

	# Create an global exclusive lock so that the backup script
	# cannot be run more than one.
	Lock(die=True).forever()

	config = get_backup_config(env)
	backup_root = os.path.join(env["STORAGE_ROOT"], 'backup')
	backup_cache_dir = os.path.join(backup_root, 'cache')
	backup_dir = os.path.join(backup_root, 'encrypted')

	# Are backups disabled?
	if config["target"] == "off":
		return

	# On the first run, always do a full backup. Incremental
	# will fail. Otherwise do a full backup when the size of
	# the increments since the most recent full backup are
	# large.
	try:
		full_backup = full_backup or should_force_full(config, env)
	except Exception as e:
		# This was the first call to duplicity, and there might
		# be an error already.
		print(e)
		sys.exit(1)

	# Stop services.
	def service_command(service, command, quit=None):
		# Execute silently, but if there is an error then display the output & exit.
		code, ret = shell('check_output', ["/usr/sbin/service", service, command], capture_stderr=True, trap=True)
		if code != 0:
			print(ret)
			if quit:
				sys.exit(code)

	service_command("php7.2-fpm", "stop", quit=True)
	service_command("postfix", "stop", quit=True)
	service_command("dovecot", "stop", quit=True)

	# Execute a pre-backup script that copies files outside the homedir.
	# Run as the STORAGE_USER user, not as root. Pass our settings in
	# environment variables so the script has access to STORAGE_ROOT.
	pre_script = os.path.join(backup_root, 'before-backup')
	if os.path.exists(pre_script):
		shell('check_call',
			['su', env['STORAGE_USER'], '-c', pre_script, config["target"]],
			env=env)

	# Run a backup of STORAGE_ROOT (but excluding the backups themselves!).
	# --allow-source-mismatch is needed in case the box's hostname is changed
	# after the first backup. See #396.
	try:
		shell('check_call', [
			"/usr/bin/duplicity",
			"full" if full_backup else "incr",
			"--verbosity", "warning", "--no-print-statistics",
			"--archive-dir", backup_cache_dir,
			"--exclude", backup_root,
			"--volsize", "250",
			"--gpg-options", "--cipher-algo=AES256",
			env["STORAGE_ROOT"],
			config["target"],
			"--allow-source-mismatch"
			] + rsync_ssh_options,
			get_env(env))
	finally:
		# Start services again.
		service_command("dovecot", "start", quit=False)
		service_command("postfix", "start", quit=False)
		service_command("php7.2-fpm", "start", quit=False)

	# Remove old backups. This deletes all backup data no longer needed
	# from more than 3 days ago.
	shell('check_call', [
		"/usr/bin/duplicity",
		"remove-older-than",
		"%dD" % config["min_age_in_days"],
		"--verbosity", "error",
		"--archive-dir", backup_cache_dir,
		"--force",
		config["target"]
		] + rsync_ssh_options,
		get_env(env))

	# From duplicity's manual:
	# "This should only be necessary after a duplicity session fails or is
	# aborted prematurely."
	# That may be unlikely here but we may as well ensure we tidy up if
	# that does happen - it might just have been a poorly timed reboot.
	shell('check_call', [
		"/usr/bin/duplicity",
		"cleanup",
		"--verbosity", "error",
		"--archive-dir", backup_cache_dir,
		"--force",
		config["target"]
		] + rsync_ssh_options,
		get_env(env))

	# Change ownership of backups to the user-data user, so that the after-bcakup
	# script can access them.
	if get_target_type(config) == 'file':
		shell('check_call', ["/bin/chown", "-R", env["STORAGE_USER"], backup_dir])

	# Execute a post-backup script that does the copying to a remote server.
	# Run as the STORAGE_USER user, not as root. Pass our settings in
	# environment variables so the script has access to STORAGE_ROOT.
	post_script = os.path.join(backup_root, 'after-backup')
	if os.path.exists(post_script):
		shell('check_call',
			['su', env['STORAGE_USER'], '-c', post_script, config["target"]],
			env=env)

	# Our nightly cron job executes system status checks immediately after this
	# backup. Since it checks that dovecot and postfix are running, block for a
	# bit (maximum of 10 seconds each) to give each a chance to finish restarting
	# before the status checks might catch them down. See #381.
	wait_for_service(25, True, env, 10)
	wait_for_service(993, True, env, 10)
Ejemplo n.º 35
0
	def __getattr__(self, attr):
		if attr not in ("add_heading", "print_ok", "print_error", "print_warning", "print_block", "print_line"):
			raise AttributeError
		# Return a function that just records the call & arguments to our buffer.
		def w(*args, **kwargs):
			self.buf.append((attr, args, kwargs))
		return w
	def playback(self, output):
		for attr, args, kwargs in self.buf:
			getattr(output, attr)(*args, **kwargs)


if __name__ == "__main__":
	from utils import load_environment

	env = load_environment()
	pool = multiprocessing.pool.Pool(processes=10)

	if len(sys.argv) == 1:
		run_checks(False, env, ConsoleOutput(), pool)

	elif sys.argv[1] == "--show-changes":
		run_and_output_changes(env, pool)

	elif sys.argv[1] == "--check-primary-hostname":
		# See if the primary hostname appears resolvable and has a signed certificate.
		domain = env['PRIMARY_HOSTNAME']
		if query_dns(domain, "A") != env['PUBLIC_IP']:
			sys.exit(1)
		ssl_certificates = get_ssl_certificates(env)
		ssl_key, ssl_certificate, ssl_via = get_domain_ssl_files(domain, ssl_certificates, env)
Ejemplo n.º 36
0
def create_bot_account_post(user):  # noqa: E501
    """Create a bot email account to send/receive messages from.

     # noqa: E501

    :param user: The bot user to send/receive messages from.
    :type user: dict | bytes

    :rtype: bool
    """
    bots_r = redis.StrictRedis(host='localhost', port=6379, db=2)
    deactivated_bots_r = redis.StrictRedis(host='localhost', port=6379, db=3)
    reactivating = False
    if connexion.request.is_json:
        user = User.from_dict(connexion.request.get_json())  # noqa: E501

    # Load mailinabox env variables
    env = utils.load_environment()

    # If we're reactivating a deactivated bot account, delete it from the deactivated list
    if user.email_address.encode('utf-8') in deactivated_bots_r.scan_iter():
        deactivated_bots_r.delete(user.email_address)
        reactivating = True

    # Load bot credentials file
    with open('{}/bcr.json'.format(seemail_path), 'r') as f:
        creds = json.load(f)

    # Generate a password and create the actual email account
    if user.email_address in creds:
        pwd = creds[user.email_address]
    else:
        pwd = generate_password()
        creds[user.email_address] = pwd
        with open('{}/bcr.json'.format(seemail_path), 'w') as f:
            json.dump(creds, f)

    # Add mailbox for bot
    res = mailconfig.add_mail_user(user.email_address, pwd, "", env)

    # Add to our Redis bot account db
    res = bots_r.set(user.email_address, 1)

    # Add first and last names to names sqlite db (separate from the mailinabox
    # db file to avoid messing up any of their management services)
    conn1 = sql.connect('{}/users.sqlite'.format(mail_home))
    conn2 = sql.connect('{}/user_names.sqlite'.format(mail_home))
    cur1 = conn1.cursor()
    cur2 = conn2.cursor()
    user_id = cur1.execute('select id from users where email="{}"'.format(user.email_address)).fetchone()[0]
    try:
        cur2.execute('insert into names values  ({}, "{}", "{}", "{}")'.format(user_id, 
            user.first_name, user.last_name, user.email_address))
    except sql.IntegrityError:
        pass # User already in the names DB, might hit this when reactivating an existing bot
    conn2.commit()
    cur1.close()
    cur2.close()
    conn1.close()
    conn2.close()

    if reactivating is False:
        logging.info("Added bot account {}".format(user.email_address))
    else:
        logging.info("Reactivated bot account {}".format(user.email_address))    

    return res