def graphs_analysis(data, col_init, col_out, **kwargs):
    color_label = kwargs.get('color_label', 'black')
    params_default = get_default_args(pd.value_counts)

    params_default = {
        k: v
        for k, v in {
            **params_default,
            **kwargs
        }.items() if k in params_default
    }

    print(pd.DataFrame(data[col_init]).corrwith(data[col_out]))

    fig, axes = plt.subplots(nrows=2, ncols=2)
    graphs = [
        data[col_init].value_counts(**params_default).plot(ax=axes[0, 0],
                                                           figsize=(15, 10)),
        data[col_init].value_counts(**params_default).plot.bar(ax=axes[0, 1]),
        sns.lineplot(x=col_init, y=col_out, data=data, ax=axes[1, 0]),
        sns.barplot(x=col_init, y=col_out, data=data, ax=axes[1, 1])
    ]
    for _ in graphs:
        _.xaxis.label.set_color(color_label)
        _.tick_params(colors=color_label)
Example #2
0
    def __init__(
        self,
        policy,
        env,
        gamma,
        learning_rate,
        buffer_size,
        exploration_type,
        exploration_frac,
        exploration_ep,
        exploration_initial_eps,
        exploration_final_eps,
        double_q,
        policy_kwargs,
        seed,
        intent
        ):

        super(TabularRLModel, self).__init__(
            policy=policy,
            env=env, 
            policy_kwargs=policy_kwargs,
            seed=seed)

        self.gamma = gamma
        self.learning_rate = learning_rate
        self.buffer_size = buffer_size
        self.exploration_type = exploration_type
        self.exploration_frac = exploration_frac
        self.exploration_ep = exploration_ep
        self.exploration_initial_eps = exploration_initial_eps
        self.exploration_final_eps = exploration_final_eps
        self.double_q = double_q
        self.intent = intent
        # self.policy_kwargs = {} if policy_kwargs is None else policy_kwargs
        # self.policy = policy(self.observation_space, self.action_space, intent=True)

        self.policy_kwargs = get_default_args(self.policy)
        self.policy_kwargs['ob_space'] = self.observation_space
        self.policy_kwargs['ac_space'] = self.action_space
        self.policy_kwargs['intent'] = self.intent

        if policy_kwargs is not None:
            for key, val in policy_kwargs.items():
                self.policy_kwargs[key] = val
        # self.policy_kwargs['transform_func'] = transform_func

        # if policy_kwargs is None:
        #     self.policy = policy(self.observation_space, self.action_space,
        #                          intent=True, device=self.device)
        # else:
        self.policy = policy(**self.policy_kwargs)

        if self.buffer_size is None:
            self.replay_buffer = None
        else:
            self.replay_buffer = ReplayBuffer(self.buffer_size)
Example #3
0
    def test_ingest_ztf_source_features(self):
        tag = get_default_args(run).get("tag")
        collection = f"ZTF_source_features_{tag}"

        run(
            path="/data",
            tag=tag,
            xmatch=False,
            num_processes=1,
        )

        ingested_entries = list(self.mongo.db[collection].find({}, {"_id": 1}))
        log(f"Ingested features of {len(ingested_entries)} sources")

        assert len(ingested_entries) == 123
Example #4
0
    def _write_config(self):
        """Write model configuration file."""
        self.model_config['model_dir'] = self.model_dir
        self.wrapper_config['dict_path'] = self.dict_path

        model_defaults = get_default_args(Seq2SeqModel.__init__)
        self.model_config = update_dict(model_defaults, self.model_config)

        config = {}
        config['model'] = self.model_config
        config['wrapper'] = self.wrapper_config

        create_folder(self.model_dir)
        with open(self.config_path, 'w', encoding='utf8') as f:
            f.write(json.dumps(config, indent=2))
    def from_json(cls, model_json):
        """Loads in json file containing the serialized RegressionMeta class
        converted to json in serialize.py. Separates required args in
        LinearRegression construcor and passesthem to current class as keyword
        args.

        @param: model_json: path to json file containing serialized model data
        """
        with open(model_json) as m:
            model_json = json.load(m)
        # re-construct instance of RegressionMeta from json
        meta = RegressionMeta.parse_obj(model_json)
        required_args = get_default_args(LinearRegression.__init__)
        req_args_ = {
            k: v
            for (k, v) in meta.params.items() if k in required_args
        }
        non_req_args_ = {
            k: v
            for (k, v) in meta.params.items() if k not in required_args
        }
        # print(non_req_args_)
        return cls(non_req_args_, **req_args_)
Example #6
0
def get_Seq2Seq_model_param_names():
    """Get wrapper model parameter names.
    
    Some of the parameter feeding into models.Seq2SeqModel is done by the
    wrapper, which is why there are less wrapper parameters than actual
    parameters that Seq2SeqModel requires. For example, the 'mode',
    'num_encoder_symbols', 'dropout_rate' and 'beam_width', are fed into
    from wrapper's 'train' and 'decode' functions.
    
    Args:
        None
    
    Returns:
        List of wrapper parameters.
    """
    def_keys = list(get_default_args(Seq2SeqModel.__init__).keys())
    not_allowed = [
        # Given as init to wrapper
        'model_dir',
        # Handled by wrapper automatically
        'mode',
        'num_encoder_symbols',
        'num_decoder_symbols',
        'start_token',
        'end_token',
        'pad_token',
        # Training
        'dropout_rate',
        'optimizer',
        'learning_rate',
        'max_gradient_norm',
        # Decoding
        'beam_width',
        'max_decode_step'
    ]
    return [k for k in def_keys if k not in not_allowed]
Example #7
0
 def get_params(self):
     return get_default_args(self.__init__)