示例#1
0
def install(vpn_opts: ClientOpts, svc_opts: UnixServiceOpts,
            auto_startup: bool, auto_dnsmasq: bool, dnsmasq: bool,
            auto_connman_dhcp: bool, force: bool):
    executor = VPNClientExecutor(vpn_opts).probe(log_lvl=logger.INFO)
    dns_resolver = executor.device.dns_resolver
    if not dnsmasq and not dns_resolver.is_connman():
        logger.error('Only support dnsmasq as DNS resolver in first version')
        sys.exit(ErrorCode.NOT_YET_SUPPORTED)
    if executor.is_installed(silent=True):
        if force:
            logger.warn(
                'VPN service is already installed. Try to remove then reinstall...'
            )
            executor.do_uninstall(keep_vpn=False, keep_dnsmasq=True)
        else:
            logger.error('VPN service is already installed')
            sys.exit(ErrorCode.VPN_ALREADY_INSTALLED)
    if dnsmasq and not dns_resolver.is_dnsmasq_available(
    ) and not dns_resolver.is_connman():
        executor.device.install_dnsmasq(auto_dnsmasq)
    logger.info(
        f'Installing VPN client into [{vpn_opts.vpn_dir}] and register service[{svc_opts.service_name}]...'
    )
    executor.do_install(svc_opts, auto_startup, auto_connman_dhcp)
    logger.done()
示例#2
0
 def wrapper(*args, **kwargs):
     opts = kwargs[opt_name]
     dev = kwargs.pop('dev')
     if dev and not EnvHelper.is_binary_mode():
         dev_dir = EnvHelper.get_dev_dir()
         logger.warn(f'[DEV MODE] Reload vpn_dir to {dev_dir}')
         kwargs[opt_name] = opts.reload(dev_dir)
     return func(*args, **kwargs)
示例#3
0
def __validate(file):
    keys, n, d = {}, 0, 0
    for row in file:
        n += 1
        keys[row] = keys[row] + [n] if row in keys else [n]
    for k, v in keys.items():
        if len(v) > 1:
            logger.warn(f'Duplicated key: {k.strip()} in lines {v}')
            d += 1
    if d == 0:
        logger.success('No duplication')
        sys.exit(0)
    logger.error(f'Duplicated {d} keys')
    sys.exit(20)
示例#4
0
def append_linear_layer_transform(model, num_of_classes):
    if type(model) == torchvision.models.resnet.ResNet:
        if num_of_classes == model.fc.out_features:  # Do not replace anything if output classes is same as requested.
            return model
        else:
            logger.warn(
                f'Model appended another FC layer transforming number of output classes to {num_of_classes}'
            )
            model = torch.nn.Sequential(
                model,
                torch.nn.Linear(in_features=model.fc.out_features,
                                out_features=num_of_classes,
                                bias=True))

    return model
示例#5
0
 def probe(self) -> 'DNSResolver':
     self.kind = next(
         (t for t in DNSResolverType.as_services()
          if self.service.status(t.config.identity).is_enabled()),
         self.kind)
     if self.kind.might_be_command():
         self.kind = next(t for t in DNSResolverType.as_command()
                          if SystemHelper.verify_command(t.config.identity))
     if self.kind.is_unknown():
         logger.warn(
             'Unknown DNS resolver. DNS VPN IP might be not resolved correctly'
         )
     if self.kind not in [DNSResolverType.DNSMASQ, DNSResolverType.UNKNOWN]:
         dnsmasq_name = DNSResolverType.DNSMASQ.config.identity
         self._is_dnsmasq = self.service.status(dnsmasq_name).is_enabled(
         ) or shutil.which(dnsmasq_name) is not None
     logger.debug(
         f'Current DNS resolver [{self.kind.name}], is dnsmasq available [{self._is_dnsmasq}]'
     )
     return self
示例#6
0
def __dns(vpn_opts: ClientOpts, nic: str, reason: str, new_nameservers: str,
          old_nameservers: str, debug: bool):
    logger.info(f'Discover DNS with {reason}::{nic}...')
    _reason = DHCPReason[reason]
    if not vpn_opts.is_vpn_nic(nic):
        logger.warn(f'NIC[{nic}] does not belong to VPN service')
        sys.exit(0)
    executor = VPNClientExecutor(
        vpn_opts, adhoc_task=True).require_install().probe(silent=True,
                                                           log_lvl=logger.INFO)
    current = executor.storage.get_current(info=True)
    if not current:
        current = executor.storage.find(executor.opts.nic_to_account(nic))
        if not current:
            logger.warn(f'Not found any VPN account')
            sys.exit(ErrorCode.VPN_ACCOUNT_NOT_FOUND)
    if executor.opts.nic_to_account(nic) != current.account:
        logger.warn(f'NIC[{nic}] does not meet current VPN account')
        sys.exit(ErrorCode.VPN_ACCOUNT_NOT_MATCH)
    if debug:
        now = datetime.now().isoformat()
        FileHelper.write_file(
            FileHelper.tmp_dir().joinpath('vpn_dns'),
            append=True,
            content=
            f"{now}::{reason}::{nic}::{new_nameservers}::{old_nameservers}\n")
    executor.device.dns_resolver.resolve(executor.vpn_service, _reason,
                                         current.hub, new_nameservers,
                                         old_nameservers)
示例#7
0
def train_and_evaluate_model(arguments):
    """
    Main Pipeline for training and cross-validation.
    """
    """ Setup result directory and enable logging to file in it """
    logger.init(arguments['outdir'],
                filename_prefix='train_cls',
                log_level=logging.INFO)  # keep logs at root dir.
    outdir = os.path.join(arguments['outdir'], 'train_cls')
    os.makedirs(outdir, exist_ok=True)
    logger.info('Arguments:\n{}'.format(pformat(arguments)))
    """ Set random seed throughout python"""
    utils.set_random_seed(random_seed=arguments['random_seed'])
    """ Create tensorboard writer """
    tb_writer = initialize_tensorboard(outdir)
    """ Set device - cpu or gpu """
    device = torch.device(
        arguments['cuda_device'] if torch.cuda.is_available() else "cpu")
    logger.info(f'Using device - {device}')
    """ Load parameters for the Dataset """
    dataset = create_dataset(arguments['dataset_args'],
                             arguments['train_data_args'],
                             arguments['val_data_args'])
    """ Load Model with weights(if available) """
    model: torch.nn.Module = models_utils.get_model(
        arguments.get('model_args'), device,
        arguments['dataset_args']).to(device)
    """ Create optimizer and scheduler """
    optimizer = optimizer_utils.create_optimizer(model.parameters(),
                                                 arguments['optimizer_args'])
    lr_scheduler: _LRScheduler = optimizer_utils.create_scheduler(
        optimizer, arguments['scheduler_args'])
    """ Create loss function """
    logger.info(f"Loss weights {dataset.pos_neg_balance_weights()}")
    criterion = loss_utils.create_loss(arguments['loss_args'])
    """ Sample and View the inputs to model """
    dataset.debug()
    """ Pipeline - loop over the dataset multiple times """
    max_validation_acc, best_validation_model_path = 0, None
    batch_index = 0
    nb_epochs = 1 if is_debug_mode() else arguments['nb_epochs']
    for epoch in range(nb_epochs):
        """ Train the model """
        logger.info(f"Training, Epoch {epoch + 1}/{nb_epochs}")
        train_dataloader = dataset.train_dataloader
        model.train()
        start = time.time()
        total, correct = 0, 0
        epoch_loss = 0
        for i, data in enumerate(tqdm(train_dataloader)):
            # get the inputs
            inputs, labels = data
            inputs = inputs.to(device)
            labels = labels.to(device)

            # zero the parameter gradients
            optimizer.zero_grad()

            # Forward Pass
            outputs = model(inputs)

            loss = criterion(outputs, labels)
            loss.backward()

            tb_writer.save_scalar('batch_training_loss', loss.item(),
                                  batch_index)
            batch_index += 1
            epoch_loss += loss.item() * labels.size(0)
            total += labels.size(0)

            _, predicted = torch.max(outputs.data, 1)
            correct += (predicted == labels).sum().item()

            optimizer.step()

        epoch_loss = epoch_loss / total
        logger.info(f"Epoch = {epoch}, Train_loss = {epoch_loss}, "
                    f"Time taken = {time.time() - start} seconds.")

        logger.info(f"Train_accuracy = {100 * correct / total}")
        tb_writer.save_scalar('training_loss', epoch_loss, epoch)
        tb_writer.save_scalar('training_acc', 100 * correct / total, epoch)
        """ Validate the model """
        val_data_args = arguments['val_data_args']
        if val_data_args['validate_step_size'] > 0 and \
                epoch % val_data_args['validate_step_size'] == 0:

            model.eval()
            validation_dataloader = dataset.validation_dataloader
            logger.info(
                f"Validation, Epoch {epoch + 1}/{arguments['nb_epochs']}")

            val_loss, val_accuracy = evaluate_single_class(
                device, model, validation_dataloader, criterion)
            logger.info(f'validation images: {dataset.val_dataset_size}, '
                        f'val_auc : {val_accuracy} %% '
                        f'val_loss: {val_loss}')
            tb_writer.save_scalar('validation_acc', val_accuracy, epoch)
            tb_writer.save_scalar('validation_loss', val_loss, epoch)
            """ Save Model """
            if val_accuracy > max_validation_acc:
                max_validation_acc = val_accuracy
                if best_validation_model_path is not None:
                    os.remove(best_validation_model_path)
                best_validation_model_path = os.path.join(
                    outdir,
                    f'epoch_{epoch:04}-model-val_acc_{val_accuracy}.pth')
                torch.save(model.state_dict(), best_validation_model_path)
                logger.info(f'Model saved at: {best_validation_model_path}')

        if lr_scheduler:
            prev_lr = lr_scheduler.get_last_lr()
            lr_scheduler.step()
            if lr_scheduler.get_last_lr() != prev_lr:
                logger.warn(
                    f'Updated LR from {prev_lr} to {lr_scheduler.get_last_lr()}'
                )

    logger.info('Finished Training')
    logger.info(f'Max Validation accuracy is {max_validation_acc}')
    """ Create a symbolic link to the best model at a static path 'best_model.pth' """
    symlink_path = os.path.join(outdir, 'best_model.pth')
    if os.path.islink(symlink_path):
        os.unlink(symlink_path)
    os.symlink(best_validation_model_path.rsplit('/')[-1], symlink_path)
    logger.info(
        f'Best Model saved at: {best_validation_model_path}. and symlink to {symlink_path}'
    )
    """ Evaluate model on test set """
    model.load_state_dict(torch.load(best_validation_model_path), strict=False)
    test_dataloader = dataset.test_dataloader
    test_loss, test_accuracy = evaluate_single_class(device, model,
                                                     test_dataloader,
                                                     criterion)
    logger.info(
        f'Accuracy of the network on the {dataset.test_dataset_size} test images: {test_accuracy} %%'
    )
    return test_loss, test_accuracy