Ejemplo n.º 1
0
def load_env(args):
    env = 'dev'
    if len(args) > 1:
        arg = args[1].split('=')
        if len(arg) > 1 and arg[0] == '--active' and arg[1] == 'prod':
            env = arg[1]
    logger.info('当前环境为: ' + env)
    config.load_config(env)
Ejemplo n.º 2
0
def get_repository(config, name):
    if config is None:
        config = load_config()
    section_name = repo_section_name(name)
    if not config.has_section(section_name):
        raise ValueError("No repository found: " + name)
    return config[section_name]
Ejemplo n.º 3
0
def main(config_filepath):

    config = load_config(config_filepath)

    if os.path.isfile(config.x_train_output_path):
        click.confirm(f"Overwrite {config.x_train_output_path}?", abort=True)
    if os.path.isfile(config.y_train_output_path):
        click.confirm(f"Overwrite {config.y_train_output_path}?", abort=True)
    if os.path.isfile(config.x_test_output_path):
        click.confirm(f"Overwrite {config.x_test_output_path}?", abort=True)
    if os.path.isfile(config.y_test_output_path):
        click.confirm(f"Overwrite {config.y_test_output_path}?", abort=True)
    if os.path.isfile(config.covariance_output_path):
        click.confirm(f"Overwrite {config.covariance_output_path}?", abort=True)
    if os.path.isfile(config.means_output_path):
        click.confirm(f"Overwrite {config.means_output_path}?", abort=True)
    if os.path.isfile(config.clip_values_output_path):
        click.confirm(f"Overwrite {config.clip_values_output_path}?", abort=True)


    np.random.seed(config.seed)
    torch.manual_seed(config.seed)

    # Load data
    (
        (x_train, y_train),
        (x_test, y_test),
        min_pixel_value,
        max_pixel_value,
    ) = load_mnist()
    clip_values = {
        "min_pixel_value": min_pixel_value,
        "max_pixel_value": max_pixel_value,
    }

    # Swap axes to PyTorch's NCHW format

    x_train = np.swapaxes(x_train, 1, 3).astype(np.float32)
    x_test = np.swapaxes(x_test, 1, 3).astype(np.float32)

    x_obvs = torch.Tensor(x_train)

    means = x_obvs.mean(dim=0)
    means = means.unsqueeze(dim=0)

    x_obvs = x_obvs.reshape(
        (-1, 1, 28 * 28)
    )  # flatten so we can get a real covariance matrix
    covariance = torch.Tensor(np.cov(x_obvs[:, 0, :], rowvar=False))

    # Save data
    torch.save(x_train, config.x_train_output_path)
    torch.save(y_train, config.y_train_output_path)
    torch.save(x_test, config.x_test_output_path)
    torch.save(y_test, config.y_test_output_path)
    torch.save(covariance, config.covariance_output_path)
    torch.save(means, config.means_output_path)

    with open(config.clip_values_output_path, mode="w") as f:
        json.dump(clip_values, f)
Ejemplo n.º 4
0
def load_srv_config():
    """ read configs from config file, and simulate a static config class """
    
    log = logging.getLogger('server')
    config_filepath = "srv_config.conf"
    dic = load_config(log, os.path.abspath(config_filepath))
    for k,v in dic.items():
        _dict[k] = v
Ejemplo n.º 5
0
def load_client_config():
    """ read configs from config file, and simulate a static config class """
    
    log = logging.getLogger('client')
    config_filepath = os.path.abspath("../client/client_config.conf")
    dic = load_config(log, config_filepath)
    for k, v in dic.items():
        _dict[k] = v
Ejemplo n.º 6
0
    def install_update(self):
        if not self._update_ready or self._status == UPDATER_STATUS_INSTALLING:
            return False
        self._status = UPDATER_STATUS_INSTALLING
        self.emit_status()
        logger.info('Installing update')
        try:
            assert self._update_file_path and isfile(self._update_file_path)
            logger.debug("self._update_file_path %s", self._update_file_path)
            path, name = split(self._update_file_path)
            old_cwd = os.getcwd()
            os.chdir(path)
            system = get_platform()
            if system == 'Windows':
                from common.config import load_config

                config = load_config()
                root = config.sync_directory
                log_basename = time.strftime('%Y%m%d_%H%M%S.log')
                log_filename = get_bases_filename(root, log_basename)
                if not self._is_ascii(log_filename):
                    log_filename = log_basename
                args = [name, '/verysilent', '/Log={}'.format(log_filename)]
                if is_portable():
                    args.append('/PATH={}'.format(get_application_path()))
                subprocess.Popen(
                    args,
                    creationflags=0x00000200  # CREATE_NEW_PROCESS_GROUP
                    | 0x00000008,  # DETACHED_PROCESS
                    close_fds=True)
            elif system == 'Darwin':
                bundle_path = normpath(
                    join(get_application_path(), '..', '..', '..', '..'))
                logger.debug("bundle_path: %s", bundle_path)
                subprocess.call(
                    ['ditto', '-xk', self._update_file_path, bundle_path])
                subprocess.call(
                    ['xattr', '-d', '-r', 'com.apple.quarantine', bundle_path])
                logger.debug("Update completed, restart")
                remove_file(get_cfg_filename('lock'))
                if is_portable():
                    launcher_path = normpath(
                        join(bundle_path, "..", "Pvtbox-Mac.command"))
                else:
                    launcher_path = bundle_path
                subprocess.call(['open', launcher_path])
            os.chdir(old_cwd)
            Application.exit()
        except Exception as e:
            logger.warning("Can't install update. Reason: %s", e)
            self._status = UPDATER_STATUS_INSTALL_ERROR
            self.emit_status()
            return False

        self._status = UPDATER_STATUS_INSTALLED
        self.emit_status()
        return True
Ejemplo n.º 7
0
def delete_repository(name):
    """
    Deletes a configured Maven repository.
    """
    config = load_config()
    section_name = repo_section_name(name)
    if config.has_section(section_name):
        config.remove_section(section_name)
        save_config(config)
Ejemplo n.º 8
0
def view_stagedir():
    """
    Views the output directory where mvnfeed will be staged.
    """
    config = load_config()
    if "general" not in config:
        return ""

    return config.get("general", STAGE_DIR_CONFIGNAME)
Ejemplo n.º 9
0
def main(config_filepath):

    config = load_config(config_filepath)

    if os.path.isfile(config.metrics_output_path):
        click.confirm(f"Overwrite {config.metrics_output_path}?", abort=True)

    np.random.seed(config.seed)
    torch.manual_seed(config.seed)

    # Load data
    x = torch.load(config.x_filepath)
    y = torch.load(config.y_filepath)

    # Flatten
    x = x.reshape(x.shape[0], -1)

    model = torch.load(config.trained_model_filepath)

    clip_values = {}
    with open(config.clip_values_filepath, "r") as f:
        clip_values = json.load(f)
    clip_values = (
        clip_values.get("min_pixel_value"),
        clip_values.get("max_pixel_value"),
    )

    classifier = PyTorchClassifier(
        model=model,
        clip_values=clip_values,
        loss=model.criterion,
        optimizer=model.optimizer,
        input_shape=(1, 28, 28),
        nb_classes=10,
    )  # TODO: move these parameters to config

    # Evaluate the classifier on benign data
    predictions = classifier.predict(x)

    # Convert one-hots to numbers for metrics
    y = utils.one_hot_to_num(y)
    predictions = utils.one_hot_to_num(predictions)
    accuracy = {
        "Accuracy": metrics.accuracy_score(y, predictions),
        "Confusion Matrix": metrics.confusion_matrix(y, predictions).tolist(),
    }

    # Save data
    with open(config.metrics_output_path, "w") as f:
        json.dump(
            accuracy,
            f,
            ensure_ascii=False,
            sort_keys=True,
            indent=4,
            separators=(",", ": "),
        )
Ejemplo n.º 10
0
def get_stagedir(config):
    """
    Returns the value of the stage directory.
    """
    if config is None:
        config = load_config()
    if "general" not in config:
        return None

    return config.get("general", STAGE_DIR_CONFIGNAME)
Ejemplo n.º 11
0
def list_repositories():
    """
    Lists the configured Maven repositories.
    """
    config = load_config()
    for section in config.sections():
        if section.startswith(REPOSITORY):
            repo = config[section]
            print(section[11:])
            if URL in repo:
                print("  url : " + repo[URL])
Ejemplo n.º 12
0
def main(config_filepath):

    config = load_config(config_filepath)

    if os.path.isfile(config.output_path):
        click.confirm(f"Overwrite {config.output_path}?", abort=True)

    metrics = {}
    with open(config.metrics_filepath, "r") as f:
        metrics = json.load(f)

    accuracy = metrics.get("Accuracy")
    misclass = 1 - accuracy
    cm = np.array(metrics.get("Confusion Matrix"))

    cmap = plt.get_cmap("Blues")

    plt.figure(figsize=(8, 6))
    plt.imshow(cm, interpolation="nearest", cmap=cmap)
    plt.title(config.title)
    plt.colorbar()

    target_names = [str(t) for t in range(10)]
    tick_marks = np.arange(len(target_names))
    plt.xticks(tick_marks, target_names, rotation=45)
    plt.yticks(tick_marks, target_names)

    if config.normalize:
        cm = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]

    thresh = cm.max() / 1.5 if config.normalize else cm.max() / 2
    for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
        if config.normalize:
            plt.text(
                j,
                i,
                f"{cm[i, j]:0.4f}",
                horizontalalignment="center",
                color="white" if cm[i, j] > thresh else "black",
            )
        else:
            plt.text(
                j,
                i,
                f"{cm[i, j]:,}",
                horizontalalignment="center",
                color="white" if cm[i, j] > thresh else "black",
            )

    # plt.tight_layout()
    plt.ylabel("True label")
    plt.xlabel(
        f"Predicted label\naccuracy={accuracy:0.4f}; misclass={misclass:0.4f}")
    plt.savefig(config.output_path)
Ejemplo n.º 13
0
def main(config_filepath):

    config = load_config(config_filepath)

    if os.path.isfile(config.x_adv_output_path):
        click.confirm(f"Overwrite {config.x_adv_output_path}?", abort=True)

    np.random.seed(config.seed)
    torch.manual_seed(config.seed)

    # Load data
    x = torch.load(config.x_filepath)
    x_shape = x.shape
    x = x.reshape(x.shape[0], -1)
    y = torch.load(config.y_filepath)
    ace = torch.load(config.ace_filepath).cpu()
    interventions = torch.load(config.interventions_filepath).cpu()

    model = torch.load(config.model_filepath)

    clip_values = {}
    with open(config.clip_values_filepath, "r") as f:
        clip_values = json.load(f)
    clip_values = (
        clip_values.get("min_pixel_value"),
        clip_values.get("max_pixel_value"),
    )

    classifier = PyTorchClassifier(
        model=model,
        clip_values=clip_values,
        loss=model.criterion,
        optimizer=model.optimizer,
        input_shape=(1, 28, 28),
        nb_classes=10,
    )  # TODO: move these parameters to config

    # Target the attacks to a particular class
    y_adv = torch.zeros_like(torch.from_numpy(y))
    y_adv[:, 0] = 1.0

    # Generate attacks
    x_adv = ace_attack(
        ace,
        interventions,
        torch.from_numpy(x),
        target_classes=y_adv,
        norm=2,
        budget=5.0,
    )  # TODO: move these parameters to config

    # Unflatten for saving
    x_adv = x_adv.reshape(x_shape)
    torch.save(x_adv, config.x_adv_output_path)
Ejemplo n.º 14
0
def set_stagedir(path):
    """
    Sets the output directory where dependencies will be staged.

    :param path: path where the downloaded mvnfeed will be staged
    """
    config = load_config()
    if "general" not in config:
        config.add_section("general")

    config.set("general", STAGE_DIR_CONFIGNAME, path)
    save_config(config)
Ejemplo n.º 15
0
def main(config_filepath):

    config = load_config(config_filepath)

    if os.path.isfile(config.interventional_expectations_output_path):
        click.confirm(
            f"Overwrite {config.interventional_expectations_output_path}?",
            abort=True)
    if os.path.isfile(config.average_causal_effects_output_path):
        click.confirm(
            f"Overwrite {config.average_causal_effects_output_path}?",
            abort=True)
    if os.path.isfile(config.interventions_output_path):
        click.confirm(f"Overwrite {config.interventions_output_path}?",
                      abort=True)

    model = torch.load(config.model_filepath)
    cov = torch.load(config.covariance_filepath)
    mean = torch.load(config.means_filepath)

    # Flatten the mean
    mean = mean.reshape(1, -1)

    clip_values = {}
    with open(config.clip_values_filepath, "r") as f:
        clip_values = json.load(f)
    clip_values = (
        clip_values.get("min_pixel_value"),
        clip_values.get("max_pixel_value"),
    )

    num_alphas = 10
    interventions = torch.Tensor(np.linspace(*clip_values, num_alphas))
    ie = ace.interventional_expectation(
        model,
        mean,
        cov,
        interventions,
        epsilon=0.000001,
        method="hessian_diag",
        progress=True,
    )
    avg_ce = ace.average_causal_effect(ie)

    # Remove names for now since named tensors aren't serializable
    # Also detach grads so they are not saved.
    ie = ie.rename(None).detach()
    avg_ce = avg_ce.rename(None).detach()

    torch.save(ie, config.interventional_expectations_output_path)
    torch.save(avg_ce, config.average_causal_effects_output_path)
    torch.save(torch.Tensor(interventions), config.interventions_output_path)
Ejemplo n.º 16
0
def check_sync_folder_removed():
    from application.app_config import load_config as load_main_config
    from common.config import load_config
    from common.file_path import FilePath
    from common.utils import get_bases_dir

    main_cfg = load_main_config(check=False)
    if not main_cfg.get_setting('user_email'):
        return False

    config = load_config()
    root = FilePath(config.sync_directory).longpath
    return not isdir(root) or not isdir(get_bases_dir(root))
Ejemplo n.º 17
0
def app(ctx):
    """
        Use the EC-instance connect tools, mssh and msftp, with instance names instead of
        instance ids with the commands ec2 ssh and ec2 sftp.
        Boto3 is used to retrieve instance ids and correct os user given instance names.
        This only works if your instances have unique names, not containing whitespace.
    """
    # Entry point for CLI.
    # We just want some object for the context, to carry variables.
    ctx.obj = EmptyObj()
    public_key = Path.home() / ".ssh/id_rsa.pub"

    if not public_key.exists():
        public_key = None
        click.echo(
            "Rsa key not found. Please generate one with: ssh-keygen -t rsa -f ~/.ssh/id_rsa. Some commands will be unavailable."
        )
    else:
        public_key = public_key.read_text()

    ctx.obj.public_key = public_key

    # Load custom config.
    try:
        ctx.obj.cfg, ctx.obj.config_file = load_config(
            Path(click.get_app_dir(APP_NAME)))
    except json.decoder.JSONDecodeError:
        raise click.ClickException(
            f"Your configuration file is invalid JSON. Please fix: {Path(click.get_app_dir(APP_NAME))}"
        )

    # Confirm AWS credentials.
    ctx.obj.ec2 = boto3.client("ec2")
    ctx.obj.ec2ic = boto3.client("ec2-instance-connect")

    try:
        _ = ctx.obj.ec2.describe_instances(DryRun=True)
    except ClientError as e:
        if "DryRunOperation" not in str(e):
            raise click.ClickException(
                "Failure. Your AWS credentials do not authorize: describe_instances"
            )
    try:
        _ = ctx.obj.ec2.describe_images(DryRun=True)
    except ClientError as e:
        if "DryRunOperation" not in str(e):
            raise click.ClickException(
                "Failure. Your AWS credentials do not authorize: describe_images"
            )
Ejemplo n.º 18
0
def download_artifact(name, repo_name, download_deps=False):
    """
    Downloads a single artifact.

    :param name: name of the artifact to download, following the group_id:artifact_id:version format
    :param repo_name: name of the repository to look for artifacts
    :param download_deps: True if the dependencies must be downloaded
    """
    logging.info("downloading %s", name)

    config = load_config()
    repository = get_repository(config, repo_name)
    stage_dir = get_stagedir(config)

    _download_single_artifact(name, repository, stage_dir, download_deps)
Ejemplo n.º 19
0
    def start(self, app_start_ts, args):
        '''
        Performs application initialization and launch

        @raise SystemExit
        '''
        logger.debug("Launching service with args (%s)...", args)

        clear_old_logs(logger)
        # Load configuration file
        self._cfg = config.load_config()
        set_max_log_size_mb(logger, max(self._cfg.max_log_size, 0.02))
        if self._cfg.copies_logging:
            copies_logger = logging.getLogger('copies_logger')
            set_max_log_size_mb(copies_logger, max(self._cfg.max_log_size,
                                                   0.02))

        if 'sync_directory' in args and args['sync_directory']:
            self._cfg.set_settings(
                {'sync_directory': args['sync_directory'].decode('utf-8')})

        if 'wipe_internal' in args and args['wipe_internal']:
            try:
                wipe_internal(self._cfg.sync_directory)
            except Exception as e:
                logger.warning("Can't wipe internal info. Reason: %s", e)

            raise SystemExit(0)

        if self._cfg.tracking_address:
            self._tracker = Tracker('service_stats.db',
                                    self._cfg.sync_directory,
                                    self._cfg.tracking_address)
            init_crash_handler(self._tracker)
            self._tracker_thread = QThread()
            self._tracker.moveToThread(self._tracker_thread)
            self._tracker_thread.started.connect(self._tracker.start.emit)
            self._tracker_thread.start(QThread.IdlePriority)
        else:
            init_crash_handler(logger=logger)

        self._worker = ApplicationWorker(self._cfg, self._tracker,
                                         app_start_ts, args)
        self._worker.exited.connect(self._on_exit, Qt.QueuedConnection)

        self._worker.start_work()
        self.exec_()
        logger.debug("Service exiting...")
Ejemplo n.º 20
0
def main(config_filepath):

    config = load_config(config_filepath)

    if os.path.isfile(config.x_adv_output_path):
        click.confirm(f"Overwrite {config.x_adv_output_path}?", abort=True)

    seed = 45616451
    np.random.seed(seed)
    torch.manual_seed(seed)

    # Load data
    x = torch.load(config.x_filepath)
    x_shape = x.shape
    y = torch.load(config.y_filepath)

    # Flatten test set
    x = x.reshape(x.shape[0], -1)

    model = torch.load(config.model_filepath)

    clip_values = {}
    with open(config.clip_values_filepath, "r") as f:
        clip_values = json.load(f)
    clip_values = (
        clip_values.get("min_pixel_value"),
        clip_values.get("max_pixel_value"),
    )

    classifier = PyTorchClassifier(
        model=model,
        clip_values=clip_values,
        loss=model.criterion,
        optimizer=model.optimizer,
        input_shape=(1, 28, 28),
        nb_classes=10,
    )  # TODO: move these parameters to config

    # Generate attacks
    attack = FastGradientMethod(
        classifier=classifier,
        eps=0.2)  # TODO: move these parameters to config
    x_adv = attack.generate(x=x)

    # Reshape adversarial examples back to original test data shape
    x_adv = torch.from_numpy(x_adv.reshape(x_shape))
    torch.save(x_adv, config.x_adv_output_path)
Ejemplo n.º 21
0
def main(config_filepath):

    config = load_config(config_filepath)

    if (os.path.isfile(config.x_train_output_path)
            or os.path.isfile(config.y_train_output_path)
            or os.path.isfile(config.covariance_output_path)
            or os.path.isfile(config.means_output_path)):
        click.confirm(f"Overwrite files?", abort=True)

    np.random.seed(config.seed)
    torch.manual_seed(config.seed)

    dataset = pd.read_csv(config.input_filepath)
    dataset = pd.get_dummies(dataset, columns=["species"])  # One Hot Encoding
    values = list(dataset.columns.values)

    y = dataset[values[-3:]]
    y = np.array(y, dtype="float32")
    x = dataset[values[:-3]]
    x = np.array(x, dtype="float32")

    # Shuffle Data
    indices = np.random.choice(len(x), len(x), replace=False)
    x_values = x[indices]

    scaler = MinMaxScaler()

    test_size = 30
    x_train = x_values[:-test_size]
    x_train = scaler.fit_transform(x_train)

    x_values = scaler.transform(x_values)
    y_values = y[indices]

    x_tensor = torch.Tensor(x_values)
    y_tensor = torch.Tensor(y_values)

    # Calculate observational statistics
    covariance = torch.Tensor(np.cov(x_values, rowvar=False))
    means = torch.Tensor(np.mean(x_values, axis=0))

    # Save data
    torch.save(x_tensor, config.x_train_output_path)
    torch.save(y_tensor, config.y_train_output_path)
    torch.save(covariance, config.covariance_output_path)
    torch.save(means, config.means_output_path)
Ejemplo n.º 22
0
def main(config_filepath):

    config = load_config(config_filepath)

    if os.path.isfile(config.model_output_path):
        click.confirm(f"Overwrite {config.model_output_path}?", abort=True)

    np.random.seed(config.seed)
    torch.manual_seed(config.seed)

    # Load data
    x = torch.load(config.x_filepath)
    y = torch.load(config.y_filepath)

    # Flatten training set
    x = x.reshape(x.shape[0], -1)

    clip_values = {}
    with open(config.clip_values_filepath, "r") as f:
        clip_values = json.load(f)
    clip_values = (
        clip_values.get("min_pixel_value"),
        clip_values.get("max_pixel_value"),
    )

    model = get_model_from_module(mnist.models, config.model_class_name)

    if not model:
        sys.exit(f"Could not load provided model {config.model_class_name}")

    classifier = PyTorchClassifier(
        model=model,
        clip_values=clip_values,
        loss=model.criterion,
        optimizer=model.optimizer,
        input_shape=(784),
        nb_classes=10,
    )  # TODO: move these parameters to config

    # Train classifier
    classifier.fit(x, y, batch_size=config.batch_size, nb_epochs=config.num_epochs)

    # Save data
    torch.save(model, config.model_output_path)
Ejemplo n.º 23
0
def main(config_filepath):

    config = load_config(config_filepath)

    if os.path.isfile(config.metrics_output_path):
        click.confirm(f"Overwrite {config.metrics_output_path}?", abort=True)

    ace = torch.load(config.ace_filepath).cpu().numpy()

    num_monotonic = 0
    num_nonmonotonic = 0
    extrema_dict = {}

    for x in range(ace.shape[0]):
        for y in range(ace.shape[1]):
            alphas = ace[x, y, :]
            diff = np.diff(alphas)
            if np.all(diff >= 0) or np.all(diff <= 0):
                num_monotonic += 1
            else:
                num_nonmonotonic += 1
                maxima = argrelextrema(alphas, np.greater)[0]
                minima = argrelextrema(alphas, np.less)[0]
                extrema_count = len(maxima) + len(minima)
                if extrema_count in extrema_dict.keys():
                    extrema_dict[extrema_count] += 1
                else:
                    extrema_dict[extrema_count] = 1

    metrics = {
        "Monotonic": num_monotonic,
        "Non-monotonic": num_nonmonotonic,
        "Extrema": extrema_dict,
    }

    with open(config.metrics_output_path, "w") as f:
        json.dump(
            metrics,
            f,
            ensure_ascii=False,
            sort_keys=True,
            indent=4,
            separators=(",", ": "),
        )
Ejemplo n.º 24
0
def download_bulk(filename, repo_name, download_deps=False):
    """
    Downloads artifacts from a file, one artifact per line.

    :param filename: name of the file containing the artifacts to download
    :param repo_name: name of the repository to look for artifacts
    :param download_deps: True if the dependencies must be downloaded
    """
    logging.info("downloading from file %s", filename)

    config = load_config()
    repository = get_repository(config, repo_name)
    stage_dir = get_stagedir(config)

    with open(filename, "r") as file:
        lines = file.readlines()
        for line in lines:
            line = line.strip().rstrip()
            if line:
                _download_single_artifact(line, repository, stage_dir,
                                          download_deps)
Ejemplo n.º 25
0
def add_repository(name, username, url=None):
    """
    Adds an external Maven repository.

    :param name: internal name of the repository
    :param username: name of the user for the basic authentication to the repository
    :param url: url of the repository
    """
    if username is None:
        raise ValueError("Username must be defined")
    if url is None:
        raise ValueError("Url must be defined")

    password = getpass.getpass()
    encoded = base64.b64encode((username + ":" + password).encode("utf-8"))
    authorization = "Basic " + encoded.decode("utf-8")

    config = load_config()
    config[repo_section_name(name)] = {
        URL: _default_value(url),
        AUTHORIZATION: _default_value(authorization),
    }
    save_config(config)
Ejemplo n.º 26
0
def test_find_equivalent_variant_whole_seq(fetch_seq_mock_data):
    with patch.object(bioutils.seqfetcher,
                      'fetch_seq',
                      side_effect=lambda ac, s, e: fetch_seq_mock_data[
                          (str(ac), str(s), str(e))]):
        gene_config_path = os.path.join(pwd, 'test_files',
                                        'gene_config_test.txt')

        cfg = load_config(gene_config_path)
        regions = extract_gene_regions_dict(cfg, 'start_hg38_legacy_variants',
                                            'end_hg38_legacy_variants').keys()
        seq_wrapper = seq_utils.SeqRepoWrapper(regions_preload=regions)

        # empty case
        assert [] == find_equivalent_variants_whole_seq({}, seq_wrapper)

        # a bunch of variants. If they appear in the same set, they are considered equivalent
        example_variants = [
            frozenset({'chr13:g.32355030:A>AA'}),
            frozenset({'chr13:g.32339774:GAT>G', 'chr13:g.32339776:TAT>T'}),
            frozenset({'chr17:g.43090921:G>GCA', 'chr17:g.43090921:GCA>GCACA'})
        ]

        # construct variant dict (flattening example_variants!)
        variant_dict = {
            v: VCFVariant(int(v.split(':')[0].lstrip('chr')),
                          int(v.split(':')[1].lstrip('g.')),
                          v.split(':')[2].split('>')[0],
                          v.split(':')[2].split('>')[1])
            for eq_variants in example_variants for v in eq_variants
        }

        whole_seq_provider = seq_utils.WholeSeqSeqProvider(seq_wrapper)

        assert frozenset(example_variants) == frozenset(
            find_equivalent_variants_whole_seq(variant_dict,
                                               whole_seq_provider))
Ejemplo n.º 27
0
def main(config_filepath):

    config = load_config(config_filepath)

    if (
        os.path.isfile(config.interventional_expectations_output_path)
        or os.path.isfile(config.average_causal_effects_output_path)
        or os.path.isfile(config.interventions_output_path)
    ):
        click.confirm(f"Overwrite files?", abort=True)

    model = torch.load(config.model_filepath)
    wrapped_model = wrap_model_activation(
        model
    )  # Iris uses a softmax activation, but I didn't want to build that assumption into the ace calculation.
    cov = torch.load(config.covariance_filepath)
    mean = torch.load(config.means_filepath)

    interventions = torch.Tensor(np.linspace(0, 1, 1000))
    ie = ace.interventional_expectation(
        wrapped_model,
        mean,
        cov,
        interventions,
        epsilon=config.epsilon,
        method=config.method,
        progress=config.show_progress,
    )
    avg_ce = ace.average_causal_effect(ie)

    # Also detach grads so they are not saved.
    ie = ie.detach()
    avg_ce = avg_ce.detach()

    torch.save(ie, config.interventional_expectations_output_path)
    torch.save(avg_ce, config.average_causal_effects_output_path)
    torch.save(torch.Tensor(interventions), config.interventions_output_path)
Ejemplo n.º 28
0
"""
database seeding for init db data
"""
import psycopg2

from common.config import load_config

CONF = load_config()

CONN = psycopg2.connect(host=CONF["postgres"]["host"], port=CONF["postgres"]["port"],
                        dbname=CONF["postgres"]["db"], user=CONF["postgres"]["user"],
                        password=CONF["postgres"]["password"])
CURSOR = CONN.cursor()

with open("../bin/db_seed/addresses.csv", "r", encoding="utf-8") as f:
    CURSOR.copy_from(f, "addresses", sep=",")
    CONN.commit()

with open("../bin/db_seed/users.csv", "r", encoding="utf-8") as f:
    CURSOR.copy_from(f, "users", sep=",")
    CONN.commit()

with open("../bin/db_seed/schools.csv", "r", encoding="utf-8") as f:
    CURSOR.copy_from(f, "schools", sep=",")
    CONN.commit()


with open("../bin/db_seed/students.csv", "r", encoding="utf-8") as f:
    CURSOR.copy_from(f, "students", sep=",")
    CONN.commit()
Ejemplo n.º 29
0
from database import db, SQLALCHEMY_DATABASE_URI


def create_app():
    app = Flask(__name__)
    app.config.update(SQLALCHEMY_DATABASE_URI=SQLALCHEMY_DATABASE_URI)
    CORS(app)
    app.config.from_object(Config)
    db.init_app(app)
    Migrate(app, db)
    from models import user, student, school, address, transfer, attendance, donation, extrafund
    app.register_blueprint(api.api)
    return app


conf = load_config()
load_logging_conf(conf["common"]["log"]["conf"])

app = create_app()
jwt = JWTManager(app)

api.jwt = jwt

if __name__ == "__main__":
    try:
        app.run(host=conf["common"]["server"]["host"],
                port=conf["common"]["server"]["port"],
                threaded=True,
                debug=True)
    finally:
        pass
Ejemplo n.º 30
0
import ast
import glob
import os
import re
from pathlib import Path
from time import time
from termcolor import colored
from sympy import factor, Interval

## Importing my code
from common.config import load_config
from common.convert import parse_numbers, parse_interval_bounds
from common.files import pickle_load
from common.my_storm import parse_2D_refinement_into_space, merge_2D_refinements, merge_multidim_refinements

spam = load_config()
data_path = spam["data"]
results_dir = spam["results"]
prism_results = spam["prism_results"]
storm_results = spam["storm_results"]
del spam

###############################
###   RATIONAL FUNCTIONS    ###
###############################


def load_mc_result(file_path,
                   tool="unknown",
                   factorize=True,
                   rewards_only=False,
Ejemplo n.º 31
0
# 删除机器目录
def _delete_file(ip, target_dir):
    logger.info('开始删除目录: ' + ip)
    del_str = 'ls'
    if target_dir and '/home/centos/' in target_dir:
        del_str = 'rm -rf ' + target_dir
    # 删除文件
    del_cmd = str.format('ssh {} {}@{} sudo {}',
                         conf().IDENTITY,
                         conf().USER_NAME, ip, del_str)
    res_del = shell_util.exec_cmd(del_cmd)
    logger.info('删除: ' + str(res_del))


if __name__ == '__main__':
    config.load_config('dev')
    name = 'TTANode1'
    p2p_str = 'fd69dfa8baa7cb0dc91a10cdc1eac00506d05b5e@TTANode3:26656'
    genesis_dict = {
        'genesis_time':
        '2019-12-21T08:15:49.6296137Z',
        'chain_id':
        'test-chain-hVOXVd',
        'consensus_params': {
            'block': {
                'max_bytes': '22020096',
                'max_gas': '-1',
                'time_iota_ms': '1000'
            },
            'evidence': {
                'max_age': '100000'
Ejemplo n.º 32
0
    def setUp(self):
        self.columns = [
            'Source', 'Gene_symbol_ENIGMA', 'Genomic_Coordinate', 'Chr', 'Pos',
            'Ref', 'Alt', 'Reference_sequence_ENIGMA', 'HGVS_cDNA_ENIGMA',
            'BIC_Nomenclature_ENIGMA', 'Abbrev_AA_change_ENIGMA', 'URL_ENIGMA',
            'Condition_ID_type_ENIGMA', 'Condition_ID_value_ENIGMA',
            'Condition_category_ENIGMA', 'Clinical_significance_ENIGMA',
            'Date_last_evaluated_ENIGMA', 'Assertion_method_ENIGMA',
            'Assertion_method_citation_ENIGMA',
            'Clinical_significance_citations_ENIGMA',
            'Comment_on_clinical_significance_ENIGMA',
            'Collection_method_ENIGMA', 'Allele_origin_ENIGMA',
            'ClinVarAccession_ENIGMA', 'HGVS_protein_ENIGMA', 'BX_ID_ENIGMA',
            'Clinical_Significance_ClinVar', 'Date_Last_Updated_ClinVar',
            'BX_ID_ClinVar', 'HGVS_ClinVar', 'Submitter_ClinVar',
            'Protein_ClinVar', 'SCV_ClinVar', 'Allele_Origin_ClinVar',
            'Method_ClinVar', 'Description_ClinVar',
            'Summary_Evidence_ClinVar', 'Review_Status_ClinVar',
            'Condition_Type_ClinVar', 'Condition_Value_ClinVar',
            'Condition_DB_ID_ClinVar', 'Individuals_LOVD', 'BX_ID_LOVD',
            'Variant_effect_LOVD', 'Variant_frequency_LOVD', 'HGVS_cDNA_LOVD',
            'HGVS_protein_LOVD', 'Genetic_origin_LOVD', 'RNA_LOVD',
            'Submitters_LOVD', 'DBID_LOVD', 'Created_date_LOVD',
            'Edited_date_LOVD', 'Submission_ID_LOVD', 'Remarks_LOVD',
            'Classification_LOVD', 'BX_ID_ESP',
            'Minor_allele_frequency_percent_ESP', 'EA_Allele_Frequency_ESP',
            'AA_Allele_Frequency_ESP', 'Allele_Frequency_ESP',
            'polyPhen2_result_ESP', 'EUR_Allele_frequency_1000_Genomes',
            'AFR_Allele_frequency_1000_Genomes',
            'AMR_Allele_frequency_1000_Genomes',
            'EAS_Allele_frequency_1000_Genomes', 'BX_ID_1000_Genomes',
            'Allele_frequency_1000_Genomes',
            'SAS_Allele_frequency_1000_Genomes', 'Allele_frequency_ExAC',
            'BX_ID_ExAC', 'BX_ID_BIC', 'Patient_nationality_BIC',
            'Clinical_importance_BIC', 'Clinical_classification_BIC',
            'BIC_Designation_BIC', 'Literature_citation_BIC',
            'Number_of_family_member_carrying_mutation_BIC',
            'Germline_or_Somatic_BIC', 'Ethnicity_BIC', 'Mutation_type_BIC',
            'IARC_class_exLOVD', 'BIC_Nomenclature_exLOVD',
            'Sum_family_LR_exLOVD', 'Combined_prior_probablility_exLOVD',
            'BX_ID_exLOVD', 'HGVS_cDNA_exLOVD', 'Literature_source_exLOVD',
            'Co_occurrence_LR_exLOVD', 'Posterior_probability_exLOVD',
            'Missense_analysis_prior_probability_exLOVD',
            'Segregation_LR_exLOVD', 'HGVS_protein_exLOVD',
            "Allele_count_AFR_ExAC", "Allele_number_AFR_ExAC",
            "Homozygous_count_AFR_ExAC", "Allele_count_AMR_ExAC",
            "Allele_number_AMR_ExAC", "Homozygous_count_AMR_ExAC",
            "Allele_count_EAS_ExAC", "Allele_number_EAS_ExAC",
            "Homozygous_count_EAS_ExAC", "Allele_count_FIN_ExAC",
            "Allele_number_FIN_ExAC", "Homozygous_count_FIN_ExAC",
            "Allele_count_NFE_ExAC", "Allele_number_NFE_ExAC",
            "Homozygous_count_NFE_ExAC", "Allele_count_OTH_ExAC",
            "Allele_number_OTH_ExAC", "Homozygous_count_OTH_ExAC",
            "Allele_count_SAS_ExAC", "Allele_number_SAS_ExAC",
            "Homozygous_count_SAS_ExAC", "Allele_frequency_AFR_ExAC",
            "Allele_frequency_AMR_ExAC", "Allele_frequency_EAS_ExAC",
            "Allele_frequency_FIN_ExAC", "Allele_frequency_NFE_ExAC",
            "Allele_frequency_OTH_ExAC", "Allele_frequency_SAS_ExAC",
            "DateSignificanceLastEvaluated_ClinVar", "SCV_Version_ClinVar",
            "Synonyms_ClinVar",
            "HGVS_Nucleotide_Findlay_BRCA1_Ring_Function_Scores",
            "Log_RNA_Depletion_Findlay_BRCA1_Ring_Function_Scores",
            "Functional_Enrichment_Score_Findlay_BRCA1_Ring_Function_Scores",
            "BX_ID_Findlay_BRCA1_Ring_Function_Scores", "HGVS_cDNA_GnomAD",
            "HGVS_GnomAD", "HGVS_protein_GnomAD", "Flags_GnomAD",
            "Consequence_GnomAD", "Variant_id_GnomAD",
            "Allele_count_genome_AFR_GnomAD",
            "Allele_count_hemi_genome_AFR_GnomAD",
            "Allele_count_hom_genome_AFR_GnomAD",
            "Allele_number_genome_AFR_GnomAD",
            "Allele_frequency_genome_AFR_GnomAD",
            "Allele_count_genome_AMR_GnomAD",
            "Allele_count_hemi_genome_AMR_GnomAD",
            "Allele_count_hom_genome_AMR_GnomAD",
            "Allele_number_genome_AMR_GnomAD",
            "Allele_frequency_genome_AMR_GnomAD",
            "Allele_count_genome_ASJ_GnomAD",
            "Allele_count_hemi_genome_ASJ_GnomAD",
            "Allele_count_hom_genome_ASJ_GnomAD",
            "Allele_number_genome_ASJ_GnomAD",
            "Allele_frequency_genome_ASJ_GnomAD",
            "Allele_count_genome_EAS_GnomAD",
            "Allele_count_hemi_genome_EAS_GnomAD",
            "Allele_count_hom_genome_EAS_GnomAD",
            "Allele_number_genome_EAS_GnomAD",
            "Allele_frequency_genome_EAS_GnomAD",
            "Allele_count_genome_FIN_GnomAD",
            "Allele_count_hemi_genome_FIN_GnomAD",
            "Allele_count_hom_genome_FIN_GnomAD",
            "Allele_number_genome_FIN_GnomAD",
            "Allele_frequency_genome_FIN_GnomAD",
            "Allele_count_genome_NFE_GnomAD",
            "Allele_count_hemi_genome_NFE_GnomAD",
            "Allele_count_hom_genome_NFE_GnomAD",
            "Allele_number_genome_NFE_GnomAD",
            "Allele_frequency_genome_NFE_GnomAD",
            "Allele_count_genome_OTH_GnomAD",
            "Allele_count_hemi_genome_OTH_GnomAD",
            "Allele_count_hom_genome_OTH_GnomAD",
            "Allele_number_genome_OTH_GnomAD",
            "Allele_frequency_genome_OTH_GnomAD",
            "Allele_count_genome_SAS_GnomAD",
            "Allele_count_hemi_genome_SAS_GnomAD",
            "Allele_count_hom_genome_SAS_GnomAD",
            "Allele_number_genome_SAS_GnomAD",
            "Allele_frequency_genome_SAS_GnomAD", "Allele_count_genome_GnomAD",
            "Allele_number_genome_GnomAD", "Allele_frequency_genome_GnomAD",
            "Allele_count_exome_AFR_GnomAD", "Allele_count_exome_AFR_GnomAD",
            "Allele_count_hom_exome_AFR_GnomAD",
            "Allele_number_exome_AFR_GnomAD",
            "Allele_frequency_exome_AFR_GnomAD",
            "Allele_count_exome_AMR_GnomAD",
            "Allele_count_hemi_exome_AMR_GnomAD",
            "Allele_count_hom_exome_AMR_GnomAD",
            "Allele_number_exome_AMR_GnomAD",
            "Allele_frequency_exome_AMR_GnomAD",
            "Allele_count_exome_ASJ_GnomAD",
            "Allele_count_hemi_exome_ASJ_GnomAD",
            "Allele_count_hom_exome_ASJ_GnomAD",
            "Allele_number_exome_ASJ_GnomAD",
            "Allele_frequency_exome_ASJ_GnomAD",
            "Allele_count_exome_EAS_GnomAD",
            "Allele_count_hemi_exome_EAS_GnomAD",
            "Allele_count_hom_exome_EAS_GnomAD",
            "Allele_number_exome_EAS_GnomAD",
            "Allele_frequency_exome_EAS_GnomAD",
            "Allele_count_exome_FIN_GnomAD",
            "Allele_count_hemi_exome_FIN_GnomAD",
            "Allele_count_hom_exome_FIN_GnomAD",
            "Allele_number_exome_FIN_GnomAD",
            "Allele_frequency_exome_FIN_GnomAD",
            "Allele_count_exome_NFE_GnomAD",
            "Allele_count_hemi_exome_NFE_GnomAD",
            "Allele_count_hom_exome_NFE_GnomAD",
            "Allele_number_exome_NFE_GnomAD",
            "Allele_frequency_exome_NFE_GnomAD",
            "Allele_count_exome_OTH_GnomAD",
            "Allele_count_hemi_exome_OTH_GnomAD",
            "Allele_count_hom_exome_OTH_GnomAD",
            "Allele_number_exome_OTH_GnomAD",
            "Allele_frequency_exome_OTH_GnomAD",
            "Allele_count_exome_SAS_GnomAD",
            "Allele_count_hemi_exome_SAS_GnomAD",
            "Allele_count_hom_exome_SAS_GnomAD",
            "Allele_number_exome_SAS_GnomAD",
            "Allele_frequency_exome_SAS_GnomAD", "Allele_number_exome_GnomAD",
            "Allele_count_exome_GnomAD", "Allele_frequency_exome_GnomAD",
            "BX_ID_GnomAD"
        ]

        self.sources = aggregate_reports.FIELD_DICT.keys() + ["ENIGMA"]
        self.vcf_test_file = VCF_TESTDATA_FILENAME
        self.tsv_test_file = TSV_TESTDATA_FILENAME

        pwd = os.path.dirname(os.path.realpath(__file__))

        gene_config_df = config.load_config(
            os.path.join(pwd, 'test_files', 'gene_config_test.txt'))
        self.genome_regions_symbol_dict = config.get_genome_regions_symbol_dict(
            gene_config_df, 'start_hg38_legacy_variants',
            'end_hg38_legacy_variants')