示例#1
0
文件: app.py 项目: codekip/vertikin
def main():
    # get data from args
    gcm_id = request.args.get("gcm_id")
    new_data = request.args.get("user_data")

    file_path = "user_data/" + gcm_id

    if os.path.isfile(file_path):
        user_history = pickle.load(open(file_path, "rb"))

        existing_data = user_history['data']
        threshold = user_history['threshold']

        data_dict = update_dict(existing_data, new_data)
    else:
        data_dict = update_dict({}, new_data)
        threshold = DEFAULT_THRESHOLD

    data_dict = check_prediction(data_dict, threshold)

    user_json = {
        'data': data_dict,
        'threshold': threshold,
    }

    pickle.dump(user_json, open(file_path, "wb"))

    return jsonify(**data_dict)
示例#2
0
def setUI(data):
    '''
        takes {
            key: value,
            'sub.key': value
        }
    '''
    update_dict(settings.ui, data)
    return settings.ui
示例#3
0
def setUI(data):
    """
        takes {
            key: value,
            'sub.key': value
        }
    """
    update_dict(settings.ui, data)
    return settings.ui
示例#4
0
 def update(self, data):
     self.optimizer.zero_grad()
     self.model.train()
     self.model.set_data(data)
     self.model.update()
     loss_dict = self.model.loss_dict
     update_dict(self.loss_dict, loss_dict)
     self.optimizer.step()
     self.iteration += 1
     return loss_dict
示例#5
0
def fetch_server_data_for_account(server, account, ucaas=False):
    xsi = create_xsi_tool_for_account(server, account)
    utils.update_dict(account, xsi.get_directory_data())
    kwargs = {"ucaas": ucaas}
    account["dm_config"] = xsi.get_dm_config(**kwargs)
    if not ucaas:
        for x in ["username", "password"]:
            account["xmpp_" + x] = utils.node_value(account["dm_config"],
                                                    "protocols/xmpp/"
                                                    "credentials/%s" % x)
        xmpp = create_xmpp_tool_for_account(account)
        utils.update_dict(account, xmpp.get_xmpp_data())
示例#6
0
def setPreferences(data):
    '''
        takes {
            key: value,
            'sub.key': value
        }
    '''
    update_dict(settings.preferences, data)
    if 'username' in data:
        u = state.user()
        u.update_name()
        u.save()
    return settings.preferences
示例#7
0
def setPreferences(data):
    """
        takes {
            key: value,
            'sub.key': value
        }
    """
    update_dict(settings.preferences, data)
    if "username" in data:
        u = state.user()
        u.update_name()
        u.save()
    return settings.preferences
示例#8
0
 def save(self, name=None, extra_info=None):
     epoch = self.epoch
     if name is None:
         name = f'model_{epoch:04d}'
     savepath = pjoin(self.ckpt_dir, "%s.pt" % name)
     state = {
         'epoch': epoch,
         'iteration': self.iteration,
         'model': self.model.state_dict(),
         'optimizer': self.optimizer.state_dict()
     }
     if isinstance(extra_info, dict):
         update_dict(state, extra_info)
     torch.save(state, savepath)
     self.log_string("Saving model at epoch {}, path {}".format(
         epoch, savepath))
示例#9
0
def get_products_from_url(url, location):
    """This method is meant to extract products from a get response
       :param url - site to search url
       :param location - SearchPlace location
       :return Dictionary with objects"""
    response = requests.get(
        url,
        headers={
            'User-Agent':
            'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:57.0) Gecko/20100101 Firefox/57.0'
        })
    response = response.text
    response = get_products(response, location)
    product_dict = {}
    update_dict(product_dict, response, location.name)
    return product_dict
示例#10
0
    def create_nml(self):

        fv3_nml = os.path.join(self.workdir, 'input.nml')

        # Read in the namelist that has all the base settings.
        with open(self.config.paths.base_nml.format(n=self.config),
                  'r') as nml_file:
            base_nml = f90nml.read(nml_file)

        # Update the base namelist with settings for the current configuration
        # Send self.nml, a Namespace object, as dict to update_dict.
        # Update_dict modifies dict in place.
        utils.update_dict(base_nml, self.nml)

        with open(fv3_nml, 'w') as fn:
            base_nml.write(fn)
def train_all_models_with_cv_balance(config, k=10):
    train_df, test_df = data_acquisition(config)
    cv_df = cv_data_acquisition(config)
    cv = KFold(n_splits=k, random_state=1, shuffle=True)
    # cv_evaluator = Evaluator(test_path, cv_df)

    evaluator = Evaluator(config.args.test_data_path, test_df)
    # we won't save these they are for baseline
    majority_classifier(test_df, evaluator, 0)
    throw_a_die_classifier(test_df, evaluator)

    count = 0
    model_dict = {}

    ngrams = [(1, 1), (1, 2), (1, 3)]
    for ngram in ngrams:

        cv_X_train_counts, cv_X_train_tf, y_cv_res = data_balance(cv_df)
        X_train_tf, X_test_tfidf, y_tr_res, y_ts_res = train_test_split(
            cv_X_train_tf, y_cv_res)
        # X_train_tf, X_test_tfidf = ngram_text_tf_idf(train_df.text, test_df.text, ngram)
        modles = get_all_Models(size=cv_X_train_counts.shape)
        # modles = {}
        evaluator.set_label(y_ts_res)

        for model in modles:
            model_name = model + str(ngram)

            clf, mic, mac, cm, acc, pred = train_eval_model(
                modles[model][utils.CLF],
                model_name,
                X_train_tf,
                y_tr_res,
                X_test_tfidf,
                evaluator,
                feature_selection=False)

            model_dict[model_name] = {utils.CLF: clf}
            model_dict = utils.update_dict(model_dict, model_name, clf, mic,
                                           mac, cm, acc, pred)
            scores = cross_val_score(model_dict[model_name][utils.CLF],
                                     cv_X_train_tf,
                                     y_cv_res,
                                     cv=cv,
                                     n_jobs=-1)
            mean_cv = scores.mean()
            model_dict[model_name][utils.CROSS_VALIDATION] = mean_cv
            count += 1
            # print(f'mean Cross validation : {mean_cv}')
            print("\n")

    print(f"# of models ={count} with balancing")
    if config.args.print_info:
        for model in model_dict:
            print(f" model = {model}", end=' ')
            print(f"cv={model_dict[model][utils.CROSS_VALIDATION]}")
    print("\n")
    return model_dict, evaluator
示例#12
0
 def remove_call_forwards(self, **kwargs):
     """Remove all call forwards defined on the server.
     """
     for forward_type in "always,busy,noanswer,notreachable".split(","):
         args = utils.update_dict(kwargs, {
             "forward_type": forward_type,
             "number": "",
             "active": False
         })
         self.set_call_forwarding(**args)
示例#13
0
    def _write_config(self):
        """Write model configuration file."""
        self.model_config['model_dir'] = self.model_dir
        self.wrapper_config['dict_path'] = self.dict_path

        model_defaults = get_default_args(Seq2SeqModel.__init__)
        self.model_config = update_dict(model_defaults, self.model_config)

        config = {}
        config['model'] = self.model_config
        config['wrapper'] = self.wrapper_config

        create_folder(self.model_dir)
        with open(self.config_path, 'w', encoding='utf8') as f:
            f.write(json.dumps(config, indent=2))
示例#14
0
 def _xsi_http(self, fn, **kwargs):
     api_endpoint = kwargs.get("api_endpoint", None)
     location = kwargs.get("location", None)
     parse = kwargs.get("parse", True)
     data = kwargs.get("data", None)
     auth = kwargs.get("auth", None) or (self.username, self.password)
     url = location if location else (
         "%s/com.broadsoft.xsi-actions/v2.0"
         "/user/%s/%s" % (self.xsp_url, self.username, api_endpoint))
     r = fn(url, verify=True, data=data, auth=auth)
     if len(r.history) > 0 and r.history[0].is_redirect:
         # requests bug (loses authorization header when redirected)
         return self._xsi_http(
             fn,
             **utils.update_dict(
                 kwargs, {"location": r.history[0].headers["Location"]}))
     assert str(r.status_code)[0] == '2', \
         "HTTP request %s failed (%s): %s" % (url,
                                              str(r.status_code),
                                              r.content or "")
     return utils.xml_tree(r.content) if parse else True
示例#15
0
 def load_plan(cls, config_filepath):
     """Loads a plan from file."""
     with open(config_filepath) as fout:
         new_params = yaml.load(fout)
         update_dict(cls.params, new_params)
示例#16
0
 def update(self, item_id: int, data: dict) -> models.TasksList:
     item = self.get(item_id)
     self.delete(item_id)
     update_dict(item.__dict__, data)
     self._repo.add(item)
     return item
示例#17
0
def main(cla):

    # Load the user-defined settings, and script settings
    # ----------------------------------------------------
    user_config = cla.user_config

    print(f"user config: {user_config}")
    script_config = cla.script_config
    print(f"script config: {script_config}")
    if not script_config:
        ushdir = os.path.join(user_config['paths']['homerrfs'], 'configs')
        script_config = checks.load_config_file(
            os.path.join(ushdir, 'fv3_script.yml'))

    # Update script config with user-supplied config file
    # ----------------------------------------------------
    utils.update_dict(script_config, user_config, quiet=cla.quiet)

    # Create Namespace of config for easier syntax
    # ---------------------------------------------
    config = argparse.Namespace()
    utils.namespace(config, script_config)

    # Now config and script_config contain identical information in Namespace
    # and dict formats, respectively.

    # Load each of the standard YAML config files
    # --------------------------------------------
    #
    # Reminder:
    # checks.load_config_section(arg) takes a two-element list as it's input.
    #    arg = [file_name, section_name(s)]
    #
    grid = cla.grid_config
    if not grid:
        grid = checks.load_config_section([
            config.paths.grid.format(n=config),
            [config.grid_name, config.grid_gen_method],
        ])

    machine = cla.machine_config
    if not machine:
        machine_path = config.paths.machine.format(n=config)
        machine = checks.load_config_section([
            machine_path,
            config.machine,
        ])

    namelist = cla.nml_config
    if not namelist:
        namelist = checks.load_config_section([
            config.paths.namelist.format(n=config),
            config.phys_pkg,
        ])

    # Update each of the provided configure files with user-supplied settings
    # ------------------------------------------------------------------------
    for cfg in ['grid', 'machine', 'namelist']:
        utils.update_dict(locals()[cfg][0],
                          script_config.get(cfg),
                          quiet=cla.quiet)

    # Set up a kwargs dict for Forecast object
    # -----------------------------------------
    fcst_kwargs = {
        'grid': grid[0],
        'nml': namelist[0],
        'overwrite': cla.overwrite,
    }

    # Create the Forecast object
    # ---------------------------
    fcst = Forecast(
        config=script_config,
        machine=machine[0],
        starttime=cla.start_date,
        **fcst_kwargs,
    )

    # Run the forecast job
    # ---------------------
    fcst.run(dry_run=cla.dry_run)
示例#18
0
    def __init__(self, hyperparameters):
        hp = hyperparameters
        self.hyperparameters = hp

        print(
            "LstmMlpSupersensesModel: Building model with the following hyperparameters:"
        )
        pprint(hp.__dict__)

        self.features = build_features(hp)

        names = lambda features: [f.name for f in features]

        self.model = LstmMlpMulticlassModel(
            input_vocabularies={
                feat.name: feat.vocab
                for feat in chain(self.features.list_enum_features(),
                                  self.features.list_string_features())
            },
            input_embeddings={
                feat.name: feat.embedding
                for feat in self.features.list_features_with_embedding(
                    include_auto=False)
            },
            output_vocabulary=vocabs.PSS
            if self.hyperparameters.allow_empty_prediction else
            vocabs.PSS_WITHOUT_NONE,
            hyperparameters=LstmMlpMulticlassModel.HyperParameters(
                **update_dict(hp.__dict__, {
                    'lstm_input_fields':
                    names(self.features.list_lstm_features()),
                    'mlp_input_fields':
                    names(self.features.list_mlp_features(include_refs=False)),
                    'token_neighbour_types':
                    names(self.features.list_ref_features()),
                    'input_embeddings_to_allow_partial':
                    names(self.features.list_default_zero_vec_features()),
                    'input_embeddings_to_update': {
                        name: True
                        for name in names(
                            self.features.list_updatable_features())
                    },
                    'input_embeddings_default_dim':
                    None,
                    'input_embedding_dims': {
                        f.name: f.dim
                        for f in self.features.list_features_with_embedding()
                    },
                    'n_labels_to_predict':
                    len(self.hyperparameters.labels_to_predict)
                },
                              del_keys=[
                                  'use_token', 'lemmas_from',
                                  'update_lemmas_embd', 'use_ud_xpos',
                                  'use_govobj', 'use_ud_dep', 'use_ner',
                                  'use_lexcat', 'token_embd_dim',
                                  'ner_embd_dim', 'token_internal_embd_dim',
                                  'ud_xpos_embd_dim', 'ud_deps_embd_dim',
                                  'spacy_ner_embd_dim',
                                  'govobj_config_embd_dim', 'lexcat_embd_dim',
                                  'update_token_embd', 'use_prep_onehot',
                                  'use_token_internal', 'labels_to_predict',
                                  'mask_by', 'mask_mwes',
                                  'allow_empty_prediction'
                              ])))
示例#19
0
def test_update_dict(a, b, expected):
    update_dict(a, b)
    assert a == expected
示例#20
0
 def set_params(self, **params):
     for key, value in params.items():
         update_dict(self.params, key.split('__'), value)
     return self
def train_all_models_with_cv(config, k=10):
    # train test data
    train_df, test_df = data_acquisition(config)

    X_train_counts, X_test_counts, X_train_tf, X_test_tfidf, count_vect = data_pre_processing(
        train_df.text, test_df.text)

    cv_df = cv_data_acquisition(config)
    cv_X_train_counts, cv_X_train_tf, cv_count_vect = cv_data_pre_processing(
        cv_df.text)

    cv = KFold(n_splits=k, random_state=1, shuffle=True)
    evaluator = Evaluator(config.args.test_data_path, test_df)

    # trying out different classifiers

    # we won't save these they are for baseline
    majority_classifier(test_df, evaluator, 0)
    throw_a_die_classifier(test_df, evaluator)

    model_dict = {}
    count = 0
    ngrams = [(1, 1), (1, 2), (1, 3)]

    for ngram in ngrams:
        modles = get_all_Models(size=test_df.shape)

        X_train_tf, X_test_tfidf = ngram_text_tf_idf(train_df.text,
                                                     test_df.text, ngram)

        for model in modles:
            model_name = model + str(ngram)
            clf, mic, mac, cm, acc, pred = train_eval_model(
                modles[model][utils.CLF],
                model_name,
                X_train_tf,
                train_df.label,
                X_test_tfidf,
                evaluator,
                feature_selection=False)
            model_dict[model_name] = {utils.CLF: clf}
            model_dict = utils.update_dict(model_dict, model_name, clf, mic,
                                           mac, cm, acc, pred)

            scores = cross_val_score(model_dict[model_name][utils.CLF],
                                     cv_X_train_tf,
                                     cv_df.label,
                                     scoring='accuracy',
                                     cv=cv,
                                     n_jobs=-1)

            mean_cv = np.mean(scores)
            model_dict[model_name][utils.CROSS_VALIDATION] = mean_cv
            count += 1
            # print(f'mean Cross validation : {mean_cv}')
            print("\n")

    print(f"# of models ={count} without balancing")
    if config.args.print_info:
        for model in model_dict:
            print(f" model = {model}", end=' ')
            print(f"cv={model_dict[model][utils.CROSS_VALIDATION]}")
    print("\n")
    return model_dict, count_vect, evaluator
def train_all_models(config):
    train_df, test_df = data_acquisition(config)

    X_train_counts, X_test_counts, X_train_tf, X_test_tfidf, count_vect = data_pre_processing(
        train_df.text, test_df.text)

    evaluator = Evaluator(config.args.test_data_path, test_df)

    # trying out different classifiers

    # we won't save these they are for baseline
    majority_classifier(test_df, evaluator, 0)
    throw_a_die_classifier(test_df, evaluator)

    model_dict = get_all_Models(size=test_df.shape)
    count = 0
    for model in model_dict:
        clf, mic, mac, cm, acc, pred = train_eval_model(
            model_dict[model][utils.CLF],
            model,
            X_train_tf,
            train_df.label,
            X_test_tfidf,
            evaluator,
            feature_selection=False)
        model_dict = utils.update_dict(model_dict, model, clf, mic, mac, cm,
                                       acc, pred)

        count += 1
    ngrams = [(1, 2), (1, 3)]
    for ngram in ngrams:
        modles = get_all_Models(size=test_df.shape)

        X_train_tf, X_test_tfidf = ngram_text_tf_idf(train_df.text,
                                                     test_df.text, ngram)

        for model in modles:
            model_name = model + str(ngram)
            clf, mic, mac, cm, acc, pred = train_eval_model(
                modles[model][utils.CLF],
                model_name,
                X_train_tf,
                train_df.label,
                X_test_tfidf,
                evaluator,
                feature_selection=False)
            model_dict[model_name] = {utils.CLF: clf}
            model_dict = utils.update_dict(model_dict, model_name, clf, mic,
                                           mac, cm, acc, pred)

            count += 1
    from datetime import datetime
    start_time = datetime.now()

    if config.args.with_bert:
        bert = Bert_init()
        model_name = 'bert'
        clf, mic, mac, cm, acc, pred = train_eval_model(
            clf=bert,
            name=model_name,
            X_train_tf=train_df.text,
            label=train_df.label,
            X_test_tfidf=test_df.text,
            evaluator=evaluator)
        end_time = datetime.now()
        print('Duration: {}'.format(end_time - start_time))
        model_dict[model_name] = {utils.CLF: clf}
        model_dict = utils.update_dict(model_dict, model_name, clf, mic, mac,
                                       cm, acc, pred)
        count += 1
        print(f"# of models ={count}")

    return model_dict, count_vect, evaluator