示例#1
0
def default_quota_set():
    config = utils.load_settings(env)
    try:
        config["default-quota"] = validate_quota(request.values.get('default_quota'))
        utils.write_settings(config, env)

    except ValueError as e:
        return ("ERROR: %s" % str(e), 400)

    return "OK"
示例#2
0
def smtp_relay_set():
    from editconf import edit_conf
    config = utils.load_settings(env)
    newconf = request.form
    try:
        # Write on daemon settings
        config["SMTP_RELAY_ENABLED"] = (newconf.get("enabled") == "true")
        config["SMTP_RELAY_HOST"] = newconf.get("host")
        config["SMTP_RELAY_AUTH"] = (newconf.get("auth_enabled") == "true")
        config["SMTP_RELAY_USER"] = newconf.get("user")
        utils.write_settings(config, env)
        # Write on Postfix configs
        edit_conf("/etc/postfix/main.cf", [
            "relayhost=" +
            (f"[{config['SMTP_RELAY_HOST']}]:587" if
             config["SMTP_RELAY_ENABLED"] else ""), "smtp_sasl_auth_enable=" +
            ("yes" if config["SMTP_RELAY_AUTH"] else "no"),
            "smtp_sasl_security_options=" +
            ("noanonymous" if config["SMTP_RELAY_AUTH"] else "anonymous"),
            "smtp_sasl_tls_security_options=" +
            ("noanonymous" if config["SMTP_RELAY_AUTH"] else "anonymous")
        ],
                  delimiter_re=r"\s*=\s*",
                  delimiter="=",
                  comment_char="#")
        if config["SMTP_RELAY_AUTH"]:
            # Edit the sasl password
            with open("/etc/postfix/sasl_passwd", "w") as f:
                f.write(
                    f"[{config['SMTP_RELAY_HOST']}]:587 {config['SMTP_RELAY_USER']}:{newconf.get('key')}\n"
                )
            utils.shell("check_output",
                        ["/usr/bin/chmod", "600", "/etc/postfix/sasl_passwd"],
                        capture_stderr=True)
            utils.shell("check_output",
                        ["/usr/sbin/postmap", "/etc/postfix/sasl_passwd"],
                        capture_stderr=True)
        # Restart Postfix
        return utils.shell("check_output",
                           ["/usr/bin/systemctl", "restart", "postfix"],
                           capture_stderr=True)
    except Exception as e:
        return (str(e), 500)
示例#3
0
def privacy_status_set():
	config = utils.load_settings(env)
	config["privacy"] = (request.form.get('value') == "private")
	utils.write_settings(config, env)
	return "OK"
示例#4
0
文件: daemon.py 项目: fdns/mailinabox
def privacy_status_set():
	config = utils.load_settings(env)
	config["privacy"] = (request.form.get('value') == "private")
	utils.write_settings(config, env)
	return "OK"
def main_worker(local_rank, args):
    best_acc1 = 0
    best_acc1_index = 0
    logger = None
    writer = None

    # os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus
    args.outpath = args.outpath + '_' + args.arch

    if args.local_rank == 0:
        output_process(args.outpath)
        logger = get_logger(args.outpath, 'DistributedDataParallel_amp')
        writer = SummaryWriter(args.outpath)

    # distributed init
    args.nprocs = torch.cuda.device_count()
    dist.init_process_group(backend='nccl')
    torch.cuda.set_device(device=local_rank)

    if args.local_rank == 0:
        write_settings(args)
        logger.info(args)

    # create model
    if args.pretrained:
        ddp_print("=> using pre-trained model: {}".format(args.arch), logger,
                  local_rank)
        model = models.__dict__[args.arch](pretrained=True)
    else:
        ddp_print('=> creating model: {}'.format(args.arch), logger,
                  local_rank)
        model = models.__dict__[args.arch]()

    # sync BN
    if dist.is_available() and dist.is_initialized() and args.sync_batchnorm:
        ddp_print("=> using sync BN", logger, local_rank)
        model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
    else:
        ddp_print("=> not use sync BN", logger, local_rank)

    model = model.cuda(device=local_rank)
    # When using a single GPU per process and per
    # DistributedDataParallel, we need to divide the batch size
    # ourselves based on the total number of GPUs we have
    args.batch_size = int(args.batch_size / args.nprocs)
    model = DDP(model, device_ids=[local_rank])

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda(device=local_rank)
    optimizer = optim.SGD(model.parameters(),
                          args.lr,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)

    if args.lr_scheduler == 'steplr':
        lr_scheduler = MultiStepLR(optimizer,
                                   milestones=args.step,
                                   gamma=args.gamma)
        ddp_print('lr_scheduler: SGD MultiStepLR !!!', logger, local_rank)
    else:
        assert False, ddp_print(
            "invalid lr_scheduler={}".format(args.lr_scheduler), logger,
            local_rank)

    # dataloader
    traindir = os.path.join(args.data, 'train')
    valdir = os.path.join(args.data, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    train_dataset = datasets.ImageFolder(
        traindir,
        transforms.Compose([
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ]))
    train_sampler = DistributedSampler(train_dataset)
    train_loader = DataLoader(train_dataset,
                              batch_size=args.batch_size,
                              num_workers=args.workers,
                              pin_memory=True,
                              sampler=train_sampler)

    val_dataset = datasets.ImageFolder(
        valdir,
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ]))
    val_sampler = DistributedSampler(val_dataset)
    val_loader = DataLoader(val_dataset,
                            batch_size=args.batch_size,
                            num_workers=args.workers,
                            pin_memory=True,
                            sampler=val_sampler)

    if args.evaluate:
        validate(val_loader,
                 model,
                 criterion,
                 args,
                 logger,
                 writer,
                 epoch=-1,
                 local_rank=local_rank)
        return 0

    # amp
    scaler = GradScaler(enabled=args.use_amp)

    total_start = time.time()
    for epoch in range(args.start_epoch, args.epochs):
        # DDP
        train_sampler.set_epoch(epoch)
        val_sampler.set_epoch(epoch)

        epoch_start = time.time()
        lr_scheduler.step(epoch)

        # train for every epoch
        train(train_loader, model, criterion, optimizer, epoch, args, logger,
              writer, local_rank, scaler)

        # evaluate on validation set
        acc1 = validate(val_loader, model, criterion, args, logger, writer,
                        epoch, local_rank)

        # remember best acc@1 and save checkpoint
        is_best = acc1 > best_acc1
        if is_best:
            best_acc1_index = epoch
            best_acc1 = acc1

        epoch_end = time.time()
        ddp_print(
            '||==> Epoch=[{:d}/{:d}]\tbest_acc1={:.4f}\tbest_acc1_index={}\ttime_cost={:.4f}s'
            .format(epoch, args.epochs, best_acc1, best_acc1_index,
                    epoch_end - epoch_start), logger, local_rank)

        if args.local_rank == 0:
            # save model
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'arch': args.arch,
                    'state_dict': model.module.state_dict(),
                    'best_acc1': best_acc1,
                }, is_best, args.outpath)

    total_end = time.time()
    ddp_print('||==> total_time_cost={:.4f}s'.format(total_end - total_start),
              logger, local_rank)

    if args.local_rank == 0:
        writer.close()
示例#6
0
def main_worker(args):
    global best_acc1
    global best_acc1_index
    # os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus

    args.outpath = args.outpath + '_' + args.arch
    output_process(args.outpath)
    write_settings(args)
    logger = get_logger(args.outpath, 'DataParallel')
    writer = SummaryWriter(args.outpath)
    logger.info(args)

    # create model
    if args.pretrained:
        logger.info("=> using pre-trained model: {}".format(args.arch))
        model = models.__dict__[args.arch](pretrained=True)
    else:
        logger.info('=> creating model: {}'.format(args.arch))
        model = models.__dict__[args.arch]()

    model = nn.DataParallel(model).cuda()

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay)

    if args.lr_scheduler == 'steplr':
        lr_scheduler = MultiStepLR(optimizer, milestones=args.step, gamma=args.gamma)
        logger.info('lr_scheduler: SGD MultiStepLR !!!')
    else:
        assert False, logger.info("invalid lr_scheduler={}".format(args.lr_scheduler))
    # logger.info('lr_scheduler={}'.format(lr_scheduler))

    # dataloader
    traindir = os.path.join(args.data, 'train')
    valdir = os.path.join(args.data, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])

    train_dataset = datasets.ImageFolder(traindir, transforms.Compose([
        transforms.RandomResizedCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        normalize,
    ]))
    train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True,
                              num_workers=args.workers, pin_memory=True)

    val_dataset = datasets.ImageFolder(valdir, transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        normalize,
    ]))
    val_loader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False,
                              num_workers=args.workers, pin_memory=True)

    if args.evaluate:
        validate(val_loader, model, criterion, args, logger, writer, epoch=-1)
        return 0

    total_start = time.time()
    for epoch in range(args.start_epoch, args.epochs):
        epoch_start = time.time()
        lr_scheduler.step(epoch)

        # train for every epoch
        train(train_loader, model, criterion, optimizer, epoch, args, logger, writer)

        # evaluate on validation set
        acc1 = validate(val_loader, model, criterion, args, logger, writer, epoch)

        # remember best acc@1 and save checkpoint
        is_best = acc1 > best_acc1
        if is_best:
            best_acc1_index = epoch
            best_acc1 = acc1

        epoch_end = time.time()
        logger.info('||==> Epoch=[{:d}/{:d}]\tbest_acc1={:.4f}\tbest_acc1_index={}\ttime_cost={:.4f}s'
                    .format(epoch, args.epochs, best_acc1, best_acc1_index, epoch_end - epoch_start))

        # save model
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': model.module.state_dict(),
                'best_acc1': best_acc1,
            }, is_best, args.outpath)

    total_end = time.time()
    logger.info('||==> total_time_cost={:.4f}s'.format(total_end - total_start))
    writer.close()
示例#7
0
 def sync(self):
     self.do_pull()
     self.do_push()
     utils.write_settings(self._conn, last_update=time.time())
示例#8
0
def smtp_relay_set():
    from editconf import edit_conf
    from os import chmod
    import re, socket, ssl

    config = utils.load_settings(env)
    newconf = request.form

    # Is DKIM configured?
    sel = newconf.get("dkim_selector")
    if sel is None or sel.strip() == "":
        config["SMTP_RELAY_DKIM_SELECTOR"] = None
        config["SMTP_RELAY_DKIM_RR"] = None
    elif re.fullmatch(r"[a-z\d\._]+", sel.strip()) is None:
        return ("The DKIM selector is invalid!", 400)
    elif sel.strip() == config.get("local_dkim_selector", "mail"):
        return (
            f"The DKIM selector {sel.strip()} is already in use by the box!",
            400)
    else:
        # DKIM selector looks good, try processing the RR
        rr = newconf.get("dkim_rr", "")
        if rr.strip() == "":
            return ("Cannot publish a selector with an empty key!", 400)

        components = {}
        for r in re.split(r"[;\s]+", rr):
            sp = re.split(r"\=", r)
            if len(sp) != 2:
                return ("DKIM public key RR is malformed!", 400)
            components[sp[0]] = sp[1]

        if not components.get("p"):
            return ("The DKIM public key doesn't exist!", 400)

        config["SMTP_RELAY_DKIM_SELECTOR"] = sel
        config["SMTP_RELAY_DKIM_RR"] = components

    relay_on = False
    implicit_tls = False

    if newconf.get("enabled") == "true":
        relay_on = True

        # Try negotiating TLS directly. We need to know this because we need to configure Postfix
        # to be aware of this detail.
        try:
            ctx = ssl.create_default_context()
            with socket.create_connection(
                (newconf.get("host"), int(newconf.get("port"))), 5) as sock:
                with ctx.wrap_socket(sock,
                                     server_hostname=newconf.get("host")):
                    implicit_tls = True
        except ssl.SSLError as sle:
            # Couldn't connect via TLS, configure Postfix to send via STARTTLS
            print(sle.reason)
        except (socket.herror, socket.gaierror) as he:
            return (
                f"Unable to resolve hostname (it probably is incorrect): {he.strerror}",
                400)
        except socket.timeout:
            return (
                "We couldn't connect to the server. Is it down or did you write the wrong port number?",
                400)

    pw_file = "/etc/postfix/sasl_passwd"
    modify_password = True
    # Check that if the provided password is empty, that there was a password saved before
    if (newconf.get("key", "") == ""):
        if os.path.isfile(pw_file):
            modify_password = False
        else:
            return (
                "Please provide a password/key (there is no existing password to retain).",
                400)

    try:
        # Write on daemon settings
        config["SMTP_RELAY_ENABLED"] = relay_on
        config["SMTP_RELAY_HOST"] = newconf.get("host")
        config["SMTP_RELAY_PORT"] = int(newconf.get("port"))
        config["SMTP_RELAY_USER"] = newconf.get("user")
        config["SMTP_RELAY_AUTHORIZED_SERVERS"] = [
            s.strip()
            for s in re.split(r"[, ]+",
                              newconf.get("authorized_servers", []) or "")
            if s.strip() != ""
        ]
        utils.write_settings(config, env)

        # Write on Postfix configs
        edit_conf("/etc/postfix/main.cf", [
            "relayhost=" +
            (f"[{config['SMTP_RELAY_HOST']}]:{config['SMTP_RELAY_PORT']}"
             if config["SMTP_RELAY_ENABLED"] else ""),
            f"smtp_tls_wrappermode={'yes' if implicit_tls else 'no'}"
        ],
                  delimiter_re=r"\s*=\s*",
                  delimiter="=",
                  comment_char="#")

        # Edit the sasl password (still will edit the file, but keep the pw)

        with open(pw_file, "a+") as f:
            f.seek(0)
            pwm = re.match(r"\[.+\]\:[0-9]+\s.+\:(.*)", f.readline())
            if (pwm is None or len(pwm.groups()) != 1) and not modify_password:
                # Well if this isn't a bruh moment
                return (
                    "Please provide a password/key (there is no existing password to retain).",
                    400)

            f.truncate(0)
            f.write(
                f"[{config['SMTP_RELAY_HOST']}]:{config['SMTP_RELAY_PORT']} {config['SMTP_RELAY_USER']}:{newconf.get('key') if modify_password else pwm[1]}\n"
            )
        chmod(pw_file, 0o600)
        utils.shell("check_output", ["/usr/sbin/postmap", pw_file],
                    capture_stderr=True)

        # Regenerate DNS (to apply whatever changes need to be made)
        from dns_update import do_dns_update
        do_dns_update(env)

        # Restart Postfix
        return utils.shell("check_output", ["/usr/sbin/postfix", "reload"],
                           capture_stderr=True)
    except Exception as e:
        return (str(e), 400)