Beispiel #1
0
def get_data(config):
    return (
        loader.read_data(
            "br",
            config,
            endpoint=config["br"]["api"]["endpoints"]["farolcovid"]["cities"],
        ).replace({"medio": "médio", "insatisfatorio": "insatisfatório"}),
        loader.read_data(
            "br",
            config,
            endpoint=config["br"]["api"]["endpoints"]["farolcovid"]["states"],
        ).replace({"medio": "médio", "insatisfatorio": "insatisfatório"}),
    )
Beispiel #2
0
def get_data(config):

    dfs = {
        place: loader.read_data(
            "br",
            config,
            endpoint=config["br"]["api"]["endpoints"]["farolcovid"][place],
        ).pipe(utils.fix_dates)
        for place in ["city", "health_region", "state"]
    }

    cnes_sources = loader.read_data("br", config, "br/cities/cnes")
    # places_ids = loader.read_data("br", config, "br/places/ids")
    return dfs, cnes_sources
Beispiel #3
0
def _prepare_simulation(user_input, config):

    # based on Alison Hill: 40% asymptomatic
    user_input["population_params"]["I"] = [
        int(user_input["population_params"]["I"] *
            (1 - config["br"]["seir_parameters"]["asymptomatic_proportion"]))
        if not np.isnan(user_input["population_params"]["I"]) else 1
    ][0]

    place_specific_params = loader.read_data(
        "br",
        config,
        endpoint=config["br"]["api"]["endpoints"]["parameters"][
            user_input["place_type"]],
    ).set_index(user_input["place_type"])
    place_id = user_input["place_type"]

    user_input["place_specific_params"] = {
        "fatality_ratio":
        place_specific_params["fatality_ratio"].loc[int(user_input[place_id])],
        "i1_percentage":
        place_specific_params["i1_percentage"].loc[int(user_input[place_id])],
        "i2_percentage":
        place_specific_params["i2_percentage"].loc[int(user_input[place_id])],
        "i3_percentage":
        place_specific_params["i3_percentage"].loc[int(user_input[place_id])],
    }
    return user_input
Beispiel #4
0
def loading_cached_cities(params):
    return loader.read_data(
        "br",
        loader.config,
        endpoint=loader.config["br"]["api"]["endpoints"]["analysis"]["cases"],
        params=params,
    )
Beispiel #5
0
def get_rt(place_type, user_input, config, bound):

    if place_type == "city_id":
        col = place_type
        endpoint = config["br"]["api"]["endpoints"]["rt_cities"]

    if place_type == "state_id":
        col = "state"
        endpoint = config["br"]["api"]["endpoints"]["rt_states"]

    rt = loader.read_data("br", config, endpoint=endpoint)
    # pegando estimacao de 10 dias atras
    rt = rt[rt["last_updated"] == (rt["last_updated"].max() -
                                   dt.timedelta(10))]

    # caso nao tenha rt, usa o rt do estado
    if (place_type == "city_id") & (user_input[place_type]
                                    not in rt[col].values):
        return get_rt("state_id", user_input, config, bound)

    cols = {"best": "Rt_low_95", "worst": "Rt_high_95"}

    if user_input["strategy"] == "isolation":  # current
        return rt[rt[col] == user_input[place_type]][cols[bound]].values[0]

    if user_input["strategy"] == "lockdown":  # smaller_rt
        return rt[rt[col] == user_input[place_type]][cols[bound]] / 2

    if user_input["strategy"] == "nothing":  # greater_rt
        return rt[rt[col] == user_input[place_type]][cols[bound]] * 2
def get_data(config):

    dfs = {
        place: loader.read_data(
            "br",
            config,
            endpoint=config["br"]["api"]["endpoints"]["farolcovid"][place],
        ).replace({
            "medio": "médio",
            "insatisfatorio": "insatisfatório"
        }).pipe(utils.fix_dates)
        for place in ["city", "health_region", "state"]
    }

    places_ids = loader.read_data("br", config, "br/places/ids")
    return dfs, places_ids
Beispiel #7
0
 def check_initialize(self):
     if self.dictionary is None:
         self.dictionary = loader.read_data(
             "br",
             loader.config,
             loader.config["br"]["api"]["endpoints"]["utilities"]
             ["place_ids"],
         )
Beispiel #8
0
def main(session_state=None):
    user_analytics = amplitude.gen_user(utils.get_server_session())
    opening_response = user_analytics.safe_log_event("opened analysis",
                                                     session_state,
                                                     is_new_page=True)
    utils.localCSS("style.css")
    utils.localCSS("icons.css")

    config = yaml.load(open("configs/config.yaml", "r"),
                       Loader=yaml.FullLoader)
    br_cases = loader.read_data(
        "br",
        config,
        endpoint=config["br"]["api"]["endpoints"]["analysis"]["cases"])

    st.write(
        """
        <div class="base-wrapper">
                <span class="section-header primary-span">MORTES DIÁRIAS POR MUNICÍPIO</span>
        </div>
        """,
        unsafe_allow_html=True,
    )

    user_uf = st.selectbox("Selecione um estado para análise:",
                           utils.get_ufs_list())

    prepare_heatmap(
        br_cases,
        place_type="city_name",
        group=user_uf,
    )

    prepare_heatmap(
        br_cases,
        place_type="state_id",
    )

    prepare_heatmap(
        loader.read_data(
            "br",
            config,
            endpoint=config["br"]["api"]["endpoints"]["analysis"]["owid"]),
        place_type="country_pt",
    )
Beispiel #9
0
def load_countries_heatmap(config):
    st.write("")
    da.prepare_heatmap(
        loader.read_data(
            "br",
            loader.config,
            endpoint=config["br"]["api"]["endpoints"]["analysis"]["owid"],
        ),
        place_type="country_pt",
    )
Beispiel #10
0
def read(config):
    counter = get_counter(config.train_file)
    if os.path.exists(config.emb_dict):
        with open(config.emb_dict, "r") as fh:
            emb_dict = json.load(fh)
    else:
        emb_dict = read_glove(config.glove_word_file, counter,
                              config.glove_word_size, config.glove_dim)
        with open(config.emb_dict, "w") as fh:
            json.dump(emb_dict, fh)
    word2idx_dict, fixed_emb, traiable_emb = token2id(config, counter,
                                                      emb_dict)

    train_data = read_data(config.train_file)
    dev_data = read_data(config.dev_file)
    test_data = read_data(config.test_file)
    pretrain_data = read_pretrain(config)
    pretrain_data2 = read_pretrain(config, 2)
    return word2idx_dict, fixed_emb, traiable_emb, train_data, dev_data, test_data, pretrain_data, pretrain_data2
def main():

    utils.localCSS("style.css")
    utils.localCSS("icons.css")

    config = yaml.load(open("configs/config.yaml", "r"),
                       Loader=yaml.FullLoader)
    br_cases = loader.read_data(
        "br",
        config,
        endpoint=config["br"]["api"]["endpoints"]["analysis"]["cases"])

    st.write(
        """
        <div class="base-wrapper">
                <span class="section-header primary-span">MORTES DIÁRIAS POR MUNICÍPIO</span>
        </div>
        """,
        unsafe_allow_html=True,
    )

    user_uf = st.selectbox("Selecione um estado para análise:",
                           utils.get_ufs_list())

    prepare_heatmap(
        br_cases,
        place_type="city",
        group=user_uf,
    )

    prepare_heatmap(
        br_cases,
        place_type="state",
    )

    prepare_heatmap(
        loader.read_data(
            "br",
            config,
            endpoint=config["br"]["api"]["endpoints"]["analysis"]["owid"]),
        place_type="country_pt",
    )
def get_score_groups(config, session_state, slider_value):
    """ Takes our data and splits it into 4 sectors for use by our diagram generator """
    # uf_num = utils.get_place_id_by_names(session_state.state)

    if (session_state.city_name != "Todos"
            or session_state.health_region_name != "Todos"):
        endpoint = "health_region"
        col = "health_region_id"
        value = session_state.health_region_id
        place_name = session_state.health_region_name + " (Região de Saúde)"

    else:
        endpoint = "state"
        col = "state_num_id"
        value = session_state.state_num_id
        place_name = session_state.state_name + " (Estado)"

    economic_data = loader.read_data(
        "br",
        config,
        config["br"]["api"]["endpoints"]["safereopen"]["economic_data"]
        [endpoint],
    ).query(f"{col} == {value}")

    CNAE_sectors = loader.read_data(
        "br", config,
        config["br"]["api"]["endpoints"]["safereopen"]["cnae_sectors"])
    CNAE_sectors = dict(zip(CNAE_sectors.cnae, CNAE_sectors.activity))

    economic_data["activity_name"] = economic_data.apply(
        lambda row: CNAE_sectors[row["cnae"]], axis=1)
    return (
        gen_sorted_sectors(
            economic_data,
            slider_value,
            DO_IT_BY_RANGE,
        ),
        economic_data,
        place_name,
    )
Beispiel #13
0
def plot_rt_wrapper(place_id, place_type):

    endpoints = {
        "state_num_id": "state",
        "health_region_id": "health_region",
        "city_id": "city",
    }

    data = (loader.read_data(
        "br", config,
        config["br"]["api"]["endpoints"]["rt"][endpoints[place_type]]).query(
            f"{place_type} == {place_id}").sort_values("last_updated"))

    if len(data) < 30:
        return None

    fig = plot_rt(data)
    fig.update_layout(xaxis=dict(tickformat="%d/%m"))
    fig.update_layout(margin=dict(l=50, r=50, b=100, t=20, pad=4))
    fig.update_yaxes(automargin=True)

    return fig
Beispiel #14
0
def train(args: Dict):
    """ Train the NMT Model.
    @param args (Dict): args from cmd line
    :param args:
    """
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print('use device: %s' % device)
    train_data, dev_data = read_data(args)
    vocab, vocab_mask = read_vocab(args)
    train_batch_size, N, d_model, d_ff, h, dropout, valid_niter, log_every, model_save_path, lr = read_model_params(
        args)

    transformer_model = TransfomerModel(vocab, N, d_model, d_ff, h, dropout,
                                        device)
    model = transformer_model.model
    optimizer = NoamOpt(
        model.src_embed[0].d_model, 1, 400,
        torch.optim.Adam(model.parameters(),
                         lr=lr,
                         betas=(0.9, 0.999),
                         eps=1e-9))

    # criterion = nn.CrossEntropyLoss()
    # criterion = LabelSmoothing(size=len(vocab.tgt.word2id), padding_idx=vocab.tgt.word2id['<pad>'], smoothing=0.0)
    criterion = LabelSmoothing(size=len(vocab.tgt.word2id),
                               padding_idx=vocab.tgt.word2id['<pad>'],
                               smoothing=0.001)
    # criterion = partial(nllLoss, vocab.src.word2id['<pad>'])
    loss_compute_train = SimpleLossCompute(model.generator, criterion,
                                           optimizer)
    loss_compute_dev = SimpleLossCompute(model.generator,
                                         criterion,
                                         optimizer,
                                         train=False)

    train_time = start_time = time.time()
    patience = cum_loss = report_loss = cum_tgt_words = report_tgt_words = 0
    num_trial = cum_exmaples = report_examples = epoch = valid_num = 0
    hist_valid_scores = []

    print('begin Maximum Likelihood Training')
    while True:
        epoch += 1
        for train_iter, batch_sents in enumerate(
                batch_iter(train_data,
                           batch_size=train_batch_size,
                           shuffle=True)):

            loss, batch_size, n_tokens = train_step(model, batch_sents, vocab,
                                                    loss_compute_train, device)
            report_loss += loss
            cum_loss += loss
            cum_exmaples += batch_size
            report_examples += batch_size
            report_tgt_words += n_tokens
            cum_tgt_words += n_tokens

            if train_iter % log_every == 0:
                elapsed = time.time() - start_time
                elapsed_since_last = time.time() - train_time
                print(
                    f"epoch {epoch}, iter {train_iter}, avg loss {report_loss / report_examples: .3f}, "
                    f"avg ppl {np.exp(report_loss / report_tgt_words): .3f}, cum examples {cum_exmaples}, "
                    f"speed {report_tgt_words/ elapsed_since_last: .3f} w/s, elapsed time {elapsed: .3f} s, lr= {optimizer._rate}"
                )
                train_time = time.time()
                report_tgt_words = report_loss = report_examples = 0.

            if train_iter % valid_niter == 0:
                print(
                    f"epoch {epoch}, iter {train_iter}, cum. loss {cum_loss/cum_exmaples}, "
                    f"cum ppl {np.exp(cum_loss / cum_tgt_words)}, cum exmples {cum_exmaples}, lr= {optimizer._rate}"
                )
                cum_loss = cum_exmaples = cum_tgt_words = 0.
                valid_num += 1
                print("begin validation ...")

                dev_loss, dev_ppl = run_dev_session(model,
                                                    dev_data,
                                                    vocab,
                                                    loss_compute_dev,
                                                    batch_size=32,
                                                    device=device)
                print(
                    f'validation: iter {train_iter}, dev. loss {dev_loss}, dev. ppl {dev_ppl}'
                )

                valid_metric = -dev_ppl
                is_better = len(hist_valid_scores
                                ) == 0 or valid_metric > max(hist_valid_scores)
                hist_valid_scores.append(valid_metric)
                if is_better:
                    patience = 0
                    print(
                        f'save currently the best model to {model_save_path}')
                    transformer_model.save(model_save_path)
                    torch.save(optimizer.optimizer.state_dict(),
                               model_save_path + ".optim")
                elif patience < int(args['--patience']):
                    patience += 1
                    print(f'hit patience {patience}')
                    if patience == int(args['--patience']):
                        num_trial += 1
                        print(f"hit #{num_trial} trial")
                        if num_trial == int(args['--max-num-trial']):
                            print('early stop!')
                            exit(0)
                if epoch == int(args['--max-epoch']):
                    print('reached max number of epochs!')
                    exit(0)
def main():
    data = read_data()
    data_frames = []
    ctr = 0
    length = len(data)

    # Continue through the list until there is nothing left
    while ctr < length:
        data_frames.append(pd.DataFrame(np.transpose(data[ctr][2])))
        ctr += 1

    print("\n" + str(ctr) + " : Months Processed\n")

    # Setup the final datafram with the columns
    df = pd.concat(data_frames, ignore_index=True)
    df.columns = VARNAMES

    # Add in Label Column and make them 1 or 0
    df["Label"] = df["Precipitation"] > 0
    df.Label = df.Label.astype(int)
    df = df.drop(columns=["Precipitation"])
    print(df)

    # Get rid of precipitation
    correlation_matrix = df.corr()
    print(correlation_matrix["Label"].sort_values(ascending=False))

    # Create a stratified shuffle split for the data
    split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
    for train_index, test_index in split.split(df, df["Label"]):
        strat_train = df.loc[train_index]
        strat_test = df.loc[test_index]

    # Prepare the Data for Machine Learning
    weather_train = strat_train.drop("Label", axis=1)
    weather_labels = strat_train["Label"].copy()

    weather_test = strat_test.drop("Label", axis=1)
    test_labels = strat_test["Label"].copy()

    # Setup a Nearest-Neighbor
    neigh2 = KNeighborsClassifier(n_neighbors=2)

    neigh3 = KNeighborsClassifier(n_neighbors=3)
    neigh5 = KNeighborsClassifier(n_neighbors=5)
    neigh10 = KNeighborsClassifier(n_neighbors=10)
    neigh2.fit(weather_train, weather_labels)
    neigh3.fit(weather_train, weather_labels)
    neigh5.fit(weather_train, weather_labels)
    neigh10.fit(weather_train, weather_labels)

    # Setup a Linear Model
    lin_reg = LinearRegression()
    lin_reg.fit(weather_train, weather_labels)

    # Do Predictions
    linear_predictions = lin_reg.predict(weather_test)
    neigh2_pred = neigh2.predict(weather_test)
    neigh3_pred = neigh3.predict(weather_test)
    neigh5_pred = neigh5.predict(weather_test)
    neigh10_pred = neigh10.predict(weather_test)

    # Print out errors
    lin_mae = mean_absolute_error(test_labels, linear_predictions)
    neigh2_mae = mean_absolute_error(test_labels, neigh2_pred)
    neigh3_mae = mean_absolute_error(test_labels, neigh3_pred)
    neigh5_mae = mean_absolute_error(test_labels, neigh5_pred)
    neigh10_mae = mean_absolute_error(test_labels, neigh10_pred)

    # Print Statements for accuracy scores
    print("Linear Accuracy Score: " + str(1 - lin_mae) + "%")
    print("NN 2 accuracy Score : " + str(1 - neigh2_mae) + "%")
    print("NN 3 accuracy Score : " + str(1 - neigh3_mae) + "%")
    print("NN 5 accuracy Score : " + str(1 - neigh5_mae) + "%")
    print("NN 10 accuracy Score : " + str(1 - neigh10_mae) + "%")
Beispiel #16
0
def main(user_input, indicators, data, config, session_state):

    utils.genHeroSection(
        title1="Onda",
        title2="Covid",
        subtitle=
        "Veja e compare a evolução da curva de contágio da Covid-19 em seu estado ou município.",
        logo="https://i.imgur.com/Oy7IiGB.png",
        header=False)

    try:
        # load data
        # print("loading br cases")
        br_cases = loading_cached()
        # print("finished laoding br cases")
        my_dict = utils.Dictionary()
        # ONDA POR ESTADO
        da.prepare_heatmap(br_cases, place_type="state_id")
        st.write("")
        pass
    except Exception as e:
        st.write(str(e))

    # ONDA POR MUNICIPIO
    st.write(
        """
        <div class="base-wrapper">
            <span class="section-header primary-span">ONDA MORTES DIÁRIAS POR MUNICÍPIO</span>
            <br><br>
            <span class="ambassador-question"><b>Selecione seu estado e município para prosseguir</b></span>
        </div>""",
        unsafe_allow_html=True,
    )
    dfs, places_ids = get_data(loader.config)
    state_name = st.selectbox("Estado ", utils.filter_place(dfs, "state"))
    city_name = st.selectbox(
        "Município ",
        utils.filter_place(dfs,
                           "city",
                           state_name=state_name,
                           health_region_name="Todos"),
    )

    deaths_or_cases = (st.selectbox(
        "Qual análise você quer ver: Número de mortes ou Taxa de letalidade (mortes por casos)?",
        ["Mortes", "Letalidade"]) == "Mortes por casos")
    # print("checking")
    if city_name != "Todos":  # the user selected something
        # print("passed")
        br_cases = br_cases[br_cases["state_name"] ==
                            state_name]  # .reset_index()
        # gen_banners()
        uf = my_dict.get_state_alphabetical_id_by_name(state_name)
        da.prepare_heatmap(
            br_cases,
            place_type="city_name",
            group=uf,
            your_city=city_name,
            deaths_per_cases=deaths_or_cases,
        )
        # print("finished preparation")

    # ONDA POR PAÍS
    st.write("")
    da.prepare_heatmap(
        loader.read_data(
            "br",
            loader.config,
            endpoint=config["br"]["api"]["endpoints"]["analysis"]["owid"],
        ),
        place_type="country_pt",
    )
Beispiel #17
0
def main():
    data = read_data()
    data_frames = []
    ctr = 0
    length = len(data)

    # Continue through the list until there is nothing left
    while ctr < length:
        data_frames.append(pd.DataFrame(np.transpose(data[ctr][2])))
        ctr += 1

    print("\n" + str(ctr) + " : Months Processed\n")

    # Setup the final datafram with the columns
    df = pd.concat(data_frames, ignore_index=True)
    df.columns = VARNAMES

    # Add in Label Column and make them 1 or 0
    df["Label"] = df["Precipitation"] > 0
    df.Label = df.Label.astype(int)
    df = df.drop(columns=["Precipitation"])
    print(df)

    # Get rid of precipitation
    correlation_matrix = df.corr()
    print(correlation_matrix["Label"].sort_values(ascending=False))

    # Create a stratified shuffle split for the data
    split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
    for train_index, test_index in split.split(df, df["Label"]):
        strat_train = df.loc[train_index]
        strat_test = df.loc[test_index]

    # Prepare the Data for Machine Learning
    weather_train = strat_train.drop("Label", axis=1)
    weather_labels = strat_train["Label"].copy()

    # Setup test set
    weather_test = strat_test.drop("Label", axis=1)
    test_labels = strat_test["Label"].copy()

    # Setup a Linear Regression Model
    lin_reg = LinearRegression()
    lin_reg.fit(weather_train, weather_labels)

    # Setup a Logistic Regression model
    log_reg = LogisticRegressionCV(
        cv=5,
        random_state=0,
        multi_class='multinomial',
    )
    log_reg.fit(weather_train, weather_labels)

    # Do Predictions
    linear_predictions = lin_reg.predict(weather_train)
    linear_test_predictions = lin_reg.predict(weather_test)
    logistic_predictions = log_reg.predict(weather_train)
    logistic_test_predictions = log_reg.predict(weather_test)

    lin_mae = mean_absolute_error(weather_labels, linear_predictions)
    linear_test_mae = mean_squared_error(test_labels, linear_test_predictions)
    log_mae = mean_absolute_error(weather_labels, logistic_predictions)
    logistic_test_mae = mean_squared_error(test_labels,
                                           logistic_test_predictions)
    print("Linear MeanAbsoluteError on training data  : " + str(1 - lin_mae) +
          "%")
    print("Linear MeanAbsoluteError on test data      : " +
          str(1 - linear_test_mae) + "%")
    print("Logistic MeanAbsoluteError on training data: " + str(1 - log_mae) +
          "%")
    print("Logistic MeanAbsoluteError on test data    : " +
          str(1 - logistic_test_mae) + "%")
def main():
    utils.localCSS("style.css")
    utils.localCSS("icons.css")

    # HEADER
    utils.genHeroSection()
    utils.genVideoTutorial()

    # GET DATA
    config = yaml.load(open('configs/config.yaml', 'r'),
                       Loader=yaml.FullLoader)
    # if abs(datetime.now().minute - FIXED) > config['refresh_rate']:
    #         caching.clear_cache()
    cities = loader.read_data('br', config, refresh_rate=refresh_rate(config))

    # REGION/CITY USER INPUT
    user_input = dict()

    utils.genStateInputSectionHeader()

    user_input['state'] = st.selectbox('Estado',
                                       add_all(cities['state_name'].unique()))
    cities_filtered = filter_options(cities, user_input['state'], 'state_name')

    utils.genMunicipalityInputSection()

    user_input['region'] = st.selectbox(
        'Região SUS',
        add_all(cities_filtered['health_system_region'].unique()))
    cities_filtered = filter_options(cities_filtered, user_input['region'],
                                     'health_system_region')

    user_input['city'] = st.selectbox(
        'Município', add_all(cities_filtered['city_name'].unique()))
    cities_filtered = filter_options(cities_filtered, user_input['city'],
                                     'city_name')

    sources = cities_filtered[[
        c for c in cities_filtered.columns
        if (('author' in c) or ('last_updated_' in c))
    ]]

    selected_region = cities_filtered.sum(numeric_only=True)

    # GET LAST UPDATE DATE
    if not np.all(cities_filtered['last_updated'].isna()):
        last_update_cases = cities_filtered['last_updated'].max().strftime(
            '%d/%m')

    # GET NOTIFICATION RATE
    if len(cities_filtered
           ) > 1:  # pega taxa do estado quando +1 municipio selecionado
        notification_rate = round(
            cities_filtered['state_notification_rate'].mean(), 4)

    else:
        notification_rate = round(
            cities_filtered['notification_rate'].values[0], 4)

    # pick locality according to hierarchy
    locality = choose_place(user_input['city'], user_input['region'],
                            user_input['state'])

    st.write('<br/>', unsafe_allow_html=True)

    utils.genInputCustomizationSectionHeader(locality)

    # SOURCES USER INPUT
    source_beds = sources[['author_number_beds',
                           'last_updated_number_beds']].drop_duplicates()
    authors_beds = source_beds.author_number_beds.str.cat(sep=', ')

    source_ventilators = sources[[
        'author_number_ventilators', 'last_updated_number_ventilators'
    ]].drop_duplicates()
    authors_ventilators = source_ventilators.author_number_ventilators.str.cat(
        sep=', ')

    if locality == 'Brasil':
        authors_beds = 'SUS e Embaixadores'
        authors_ventilators = 'SUS e Embaixadores'

    user_input['n_beds'] = st.number_input(
        f"Número de leitos destinados aos pacientes com Covid-19 (fonte: {authors_beds}, atualizado: {source_beds.last_updated_number_beds.max().strftime('%d/%m')})",
        0, None, int(selected_region['number_beds']))

    user_input['n_ventilators'] = st.number_input(
        f"Número de ventiladores destinados aos pacientes com Covid-19 (fonte: {authors_ventilators}, atualizado: {source_ventilators.last_updated_number_ventilators.max().strftime('%d/%m')}):",
        0, None, int(selected_region['number_ventilators']))

    # POP USER INPUTS
    user_input['population_params'] = {'N': int(selected_region['population'])}
    user_input['population_params']['D'] = st.number_input(
        'Mortes confirmadas:', 0, None, int(selected_region['deaths']))

    # get infected cases
    infectious_period = config['br']['seir_parameters'][
        'severe_duration'] + config['br']['seir_parameters'][
            'critical_duration']

    if selected_region['confirmed_cases'] == 0:
        st.write(f'''<div class="base-wrapper">
                Seu município ou regional de saúde ainda não possui casos reportados oficialmente. Portanto, simulamos como se o primeiro caso ocorresse hoje.
                <br><br>Caso queria, você pode mudar esse número abaixo:
                        </div>''',
                 unsafe_allow_html=True)

        user_input['population_params']['I'] = st.number_input(
            'Casos ativos estimados:', 0, None, 1)

    else:
        user_input['population_params']['I'] = int(
            selected_region['infectious_period_cases'] / notification_rate)

        st.write(f'''<div class="base-wrapper">
                O número de casos confirmados oficialmente no seu município ou regional de saúde é de {int(selected_region['confirmed_cases'].sum())} em {last_update_cases}. 
                Dada a progressão clínica da doença (em média, {infectious_period} dias) e a taxa de notificação ajustada para a região ({int(100*notification_rate)}%), 
                <b>estimamos que o número de casos ativos é de {user_input['population_params']['I']}</b>.
                <br><br>Caso queria, você pode mudar esse número para a simulação abaixo:
                        </div>''',
                 unsafe_allow_html=True)

        user_input['population_params']['I'] = st.number_input(
            'Casos ativos estimados:', 0, None,
            user_input['population_params']['I'])

    # calculate recovered cases
    user_input = calculate_recovered(user_input, selected_region,
                                     notification_rate)

    # AMBASSADOR SECTION
    utils.genAmbassadorSection()
    st.write('<br/>', unsafe_allow_html=True)

    # DEFAULT WORST SCENARIO
    user_input['strategy'] = {'isolation': 90, 'lockdown': 90}
    user_input['population_params']['I'] = [
        user_input['population_params']['I']
        if user_input['population_params']['I'] != 0 else 1
    ][0]
    _, dday_beds, dday_ventilators = simulator.run_evolution(
        user_input, config)

    worst_case = SimulatorOutput(
        color=BackgroundColor.GREY_GRADIENT,
        min_range_beds=dday_beds['worst'],
        max_range_beds=dday_beds['best'],
        min_range_ventilators=dday_ventilators['worst'],
        max_range_ventilators=dday_ventilators['best'])

    # DEFAULT BEST SCENARIO
    user_input['strategy'] = {'isolation': 0, 'lockdown': 90}
    _, dday_beds, dday_ventilators = simulator.run_evolution(
        user_input, config)

    best_case = SimulatorOutput(
        color=BackgroundColor.LIGHT_BLUE_GRADIENT,
        min_range_beds=dday_beds['worst'],
        max_range_beds=dday_beds['best'],
        min_range_ventilators=dday_ventilators['worst'],
        max_range_ventilators=dday_ventilators['best'])

    resources = ResourceAvailability(locality=locality,
                                     cases=selected_region['active_cases'],
                                     deaths=selected_region['deaths'],
                                     beds=user_input['n_beds'],
                                     ventilators=user_input['n_ventilators'])

    utils.genSimulationSection(int(user_input['population_params']['I']),
                               locality, resources, worst_case, best_case)

    utils.genActNowSection(locality, worst_case)
    utils.genStrategiesSection(Strategies)

    st.write('''
        <div class="base-wrapper">
                <span class="section-header primary-span">Etapa 4: Simule o resultado de possíveis intervenções</span>
                <br />
                <span>Agora é a hora de planejar como você pode melhor se preparar para evitar a sobrecarga hospitalar. Veja como mudanças na estratégia adotada afetam a necessidade de internação em leitos.</span>
        </div>''',
             unsafe_allow_html=True)

    user_input['strategy']['isolation'] = st.slider(
        'Em quantos dias você quer acionar a Estratégia 2, medidas restritivas? (deixe como 0 se a medida já estiver em vigor)',
        0,
        90,
        0,
        key='strategy2')

    user_input['strategy']['lockdown'] = st.slider(
        'Em quantos dias você quer acionar a Estratégia 3, quarentena?',
        0,
        90,
        90,
        key='strategy3')

    st.write('<br/><br/>', unsafe_allow_html=True)

    # SIMULATOR SCENARIOS: BEDS & RESPIRATORS
    fig, dday_beds, dday_ventilators = simulator.run_evolution(
        user_input, config)

    utils.genChartSimulationSection(
        user_input['strategy']['isolation'],
        user_input['strategy']['lockdown'],
        SimulatorOutput(color=BackgroundColor.SIMULATOR_CARD_BG,
                        min_range_beds=dday_beds['worst'],
                        max_range_beds=dday_beds['best'],
                        min_range_ventilators=dday_ventilators['worst'],
                        max_range_ventilators=dday_ventilators['best']), fig)

    utils.genWhatsappButton()
    utils.genFooter()
bilstm = True
optim = 'adam'
device = 1
dropout = 0.5
penalty = 0.0
l2_reg = 0.0001
division = 25
seed = 283953214
fine_tune = None

#data_set = 'product/books,product/music'

if type(data_set) is not list:
    data_set = data_set.split(',')
print(data_set)
train_data, dev_data, test_data, c, text_field, label_fields = loader.read_data(
    data_set, batch_size)

#model_path = '../model/conll_dvd_electronics_kitchen_video_music_books_0.0_82.74.torch'
#model_path = '../model/Model.SingleAttn_sentiment_88.03.torch'
#model_path = '../model/old/Model.ASPCatAttn_mr_imdb_sports_soft_mag_baby_video_toys_music_health_camera_apparel_kitchen_dvd_elec_books_0.0001_0.0001_87.11.torch'
#model_path = '../model/Old_Model.AttnOverAttn_sentiment_87.73.torch'
model_path = '../model/Model.AttnOverAttn_topic_sentiment_46.85.torch'
model = torch.load(model_path)
#model = torch.load(model_path, map_location=lambda storage, loc: storage)

model.eval()
for i in range(len(data_set)):
    acc = utils.evaluate(model,
                         test_data,
                         idx=i,
                         use_cuda=True,
Beispiel #20
0
def get_places_ids(config):
    return loader.read_data("br", config, "br/places/ids")
Beispiel #21
0
def loading_cached_states():
    return loader.read_data(
        "br",
        loader.config,
        endpoint=loader.config["br"]["api"]["endpoints"]["analysis"]["cases_states"],
    )
Beispiel #22
0
def main(ex_name,
         data_set,
         pre_emb,
         vocab_size,
         embed_dim,
         lr,
         use_model,
         use_cuda,
         n_epochs,
         batch_size,
         lstm_dim,
         da,
         r,
         bilstm,
         optim,
         device,
         _log,
         _run,
         l2_reg,
         division,
         fine_tune=None,
         seed=283953214,
         p_lambda=0.0,
         dropout=0,
         adversarial=False,
         save_model=False,
         transfer_params=False,
         penalty=0.0,
         params=[],
         window_size=0,
         epsilon=0.0,
         search_n=0,
         nlayers=1,
         p_lambda2=0.0,
         rl_batch=1,
         p_gamma=1.0,
         a_pen=0.0,
         rec_acc=False,
         lock_params=False):
    _run.info['ex_name'] = ex_name
    seed = 12345678

    torch.manual_seed(seed)
    data_set = data_set.split(',')

    #train_data, dev_data, test_data, c, vocab_size, word_vector = loader.read_data(data_set, batch_size)
    import dill
    import pickle
    import os
    if len(data_set) == 0 and data_set[0] == 'imdb':
        print('IMDB!')
        file_path = '../data/aclImdb/data_%d.pickle' % batch_size
        if os.path.exists(file_path):
            print('reading IMDB pickle')
            with open(file_path, 'r') as f:
                train_data, dev_data, test_data, c, text_field, label_fields = pickle.load(
                    f)
        else:
            train_data, dev_data, test_data, c, text_field, label_fields = loader.read_data(
                data_set, batch_size)
            print('writing IMDB pickle')
            with open(file_path, 'w') as f:
                pickle.dump((train_data, dev_data, test_data, c, text_field,
                             label_fields), f)
    else:
        train_data, dev_data, test_data, c, text_field, label_fields = loader.read_data(
            data_set, batch_size)
    #exit()
    lf = label_fields[-1]
    print(lf.vocab.itos)
    #exit(0)
    vocab_size = len(text_field.vocab.itos)
    word_vector = text_field.vocab.vectors

    if adversarial and len(data_set) >= 15:
        adv_train_data, _dev_data, _test_data, _c, _text_field, _label_fields = loader.read_data(
            ['product/topic'], batch_size)

    model_param = OrderedDict()
    model_param['vocab_size'] = vocab_size
    model_param['embed_dim'] = embed_dim
    model_param['lstm_dim'] = lstm_dim
    model_param['da'] = da
    if len(data_set) == 1:
        model_param['c'] = c
    else:
        model_param['cs'] = c
    model_param['batch_size'] = batch_size
    model_param['r'] = r
    model_param['bilstm'] = bilstm
    model_param['device'] = device
    model_param['dropout'] = dropout
    model_param['params'] = params
    model_param['window_size'] = window_size
    model_param['epsilon'] = epsilon
    model_param['p_lambda'] = p_lambda
    model_param['p_lambda2'] = p_lambda2
    model_param['p_gamma'] = p_gamma
    model_param['search_n'] = search_n
    model_param['nlayers'] = nlayers
    model_param['rl_batch'] = rl_batch
    model_param['a_pen'] = a_pen
    model_param['data_set'] = data_set

    if use_cuda and torch.cuda.is_available():
        model_param['use_cuda'] = True
    else:
        use_cuda = False
        model_param['use_cuda'] = False
    _run.info['model_param'] = model_param

    model = eval('Model.' + use_model)(model_param)
    print(model)
    _run.info['model'] = str(model)

    params = list(model.parameters())
    ans = 0
    for p in params:
        s = p.size()
        if len(s) == 1:
            ans += s[0]
        else:
            if s[0] >= 24739:
                continue
            ans += s[0] * s[1]
    #print(params)
    print(ans)
    exit()

    if model_param['use_cuda']:
        model.cuda(device)

    print("Initialize the word-embed layer")
    model.init_embed(word_vector)

    if transfer_params:
        #model_from = torch.load('../model/copy/Model.MaxAttn_sentiment_86.77_copy.torch')
        model_from = torch.load('../model/LM_imdb_151.torch')
        #model_from = torch.load('../model/copy/Model.MaxAttn_sentiment_0.50_0.50_86.80.torch')
        #model_from = torch.load('../model/copy/Model.MaxAttn_sentiment_0.00_1.00_87.17.torch')
        #model_from = torch.load('../model/copy/Model.MaxAttn_sentiment_0.50_1.00_86.58.torch')
        copy_param(model_from.encoder, model.embed)
        model.embed.cuda(model.device)
        copy_param(model_from.rnn, model.lstm)
        model.lstm.cuda(model.device)
        transfer_params = False
        #lock_params = True
        #lock_params = False
        '''MaxAttn
        copy_param(model_from.embed, model.embed)
        model.embed.cuda(model.device)
        copy_param(model_from.lstm, model.lstm)
        model.lstm.cuda(model.device)
        copy_param(model_from.attn, model.attn)
        model.attn.cuda(model.device)
        copy_param(model_from.mlp1, model.mlp1)
        model.mlp1.cuda(model.device)
        if 'unfix' in params:
            transfer_params = False
        '''
        """
        '''RelateNet'''
        model_from = torch.load('../model/Model.MultiSingleAttn_topic_sentiment__82.79.torch')
        copy_param(model_from.embed, model.embed)
        model.embed.cuda(model.device)
        copy_param(model_from.lstm, model.lstm)
        model.lstm.cuda(model.device)
        copy_param(model_from.attn[0], model.attn1[0])
        model.attn1[0].cuda(model.device)
        copy_param(model_from.attn[1], model.attn1[1])
        model.attn1[1].cuda(model.device)
        if 'unfix' in params:
            transfer_params = False
        """
        '''
        copy_param(model_from.embed, model.embed)
        model.embed.cuda(model.device)
        copy_param(model_from.lstm, model.lstm)
        model.lstm.cuda(model.device)
        copy_param(model_from.attn[0], model.attn[0])
        model.attn[0].cuda(model.device)
        copy_param(model_from.attn[1], model.attn[1])
        model.attn[1].cuda(model.device)
        #transfer_params = False
        '''

        #copy_param(model_from.attn[1], model.attn2)
        #model.attn2.cuda(model.device)

    stop = set([text_field.vocab.stoi[w] for w in ['.', '!', '?']])

    print('fine_tine', fine_tune)
    if fine_tune is not None:
        model = torch.load('../model/' + fine_tune,
                           map_location=lambda storage, loc: storage)
        model.cuda(device)
        model.device = device
        #utils.fine_tune(n_epochs, division, train_data, dev_data, model, lr, optim, use_cuda, l2_reg, data_set, p_lambda, _run)
        #exit()
        #utils.fine_tune(n_epochs, division, train_data, dev_data, test_data, model, lr, optim, use_cuda, l2_reg, data_set, p_lambda, p_gamma, save_model, adversarial, _run, lock_params=lock_params, penalty=penalty, params=params, stop=stop)
        #exit()

    if len(data_set) == 1:
        utils.train(n_epochs, division, train_data, dev_data, test_data, model,
                    lr, optim, use_cuda, l2_reg, data_set, save_model, stop,
                    _run)
    else:
        if adversarial and len(data_set) >= 15:
            utils.multi_train(n_epochs,
                              division,
                              train_data,
                              dev_data,
                              test_data,
                              model,
                              lr,
                              optim,
                              use_cuda,
                              l2_reg,
                              data_set,
                              p_lambda,
                              p_gamma,
                              save_model,
                              adversarial,
                              _run,
                              adv_train_iters=adv_train_data,
                              adv_label_field=_label_fields[0],
                              params=params)
        else:
            utils.multi_train(n_epochs,
                              division,
                              train_data,
                              dev_data,
                              test_data,
                              model,
                              lr,
                              optim,
                              use_cuda,
                              l2_reg,
                              data_set,
                              p_lambda,
                              p_gamma,
                              save_model,
                              adversarial,
                              _run,
                              lock_params=False,
                              penalty=penalty,
                              params=params,
                              stop=stop,
                              rec_acc=rec_acc)
def main():
    data = read_data()
    data_frames = []
    ctr = 0
    length = len(data)

    # Continue through the list until there is nothing left
    while ctr < length:
        data_frames.append(pd.DataFrame(np.transpose(data[ctr][2])))
        ctr += 1

    print("\n" + str(ctr) + " : Months Processed\n")

    # Setup the final datafram with the columns
    df = pd.concat(data_frames, ignore_index=True)
    df.columns = VARNAMES

    # Add in Label Column and make them 1 or 0
    df["Label"] = df["Precipitation"] > 0
    df.Label = df.Label.astype(int)
    df = df.drop(columns=["Precipitation"])
    print(df)

    # Get rid of precipitation
    correlation_matrix = df.corr()
    print(correlation_matrix["Label"].sort_values(ascending=False))

    # Create a stratified shuffle split for the data
    split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
    for train_index, test_index in split.split(df, df["Label"]):
        strat_train = df.loc[train_index]
        strat_test = df.loc[test_index]

    # Prepare the Data for Machine Learning
    weather_train = strat_train.drop("Label", axis=1)
    weather_labels = strat_train["Label"].copy()

    weather_test = strat_test.drop("Label", axis=1)
    test_labels = strat_test["Label"].copy()

    # A couple of statements to get rid of verbosity (only shows errors, not warnings)
    old_v = tf.logging.get_verbosity()
    tf.logging.set_verbosity(
        tf.logging.ERROR
    )  # old_v can be used to set_verbosity(old_v) back to old messages

    # Setup the inputs
    nn_inputs = 28 * 28
    nn_hidden = 16
    nn_outputs = 10

    # Setup variables
    X = tf.placeholder(tf.float32, shape=(None, nn_inputs), name="X")
    y = tf.placeholder(tf.int64, shape=(None), name="y")

    # Building the tf.name_scope
    learning_rate = 0.01

    # Implemented neural net using sigmoid function, found in activation parameter of hidden_1
    with tf.name_scope("dnn"):
        hidden_2 = tf.layers.dense(X,
                                   nn_hidden,
                                   name="hidden_2",
                                   reuse=tf.AUTO_REUSE,
                                   activation=tf.nn.sigmoid)
        logits = tf.layers.dense(hidden_2,
                                 nn_outputs,
                                 reuse=tf.AUTO_REUSE,
                                 name="outputs")

    # Created loss function
    with tf.name_scope("loss"):
        xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=y, logits=logits)
        loss = tf.reduce_mean(xentropy, name="loss")

    # Implemented a gradient descent
    with tf.name_scope("train"):
        optimizer = tf.train.GradientDescentOptimizer(learning_rate)
        training_op = optimizer.minimize(loss)

    # Evaluation Functions
    with tf.name_scope("eval"):
        correct = tf.nn.in_top_k(logits, y, 1)
        accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))

    init = tf.global_variables_initializer()
    saver = tf.train.Saver()

    # Set Epoch and Batch Size
    epochs = 40
    batch_size = 50

    # Begin evaluation
    with tf.Session() as sess:
        init.run()
        for epoch in range(epochs):
            sess.run(training_op,
                     feed_dict={
                         X: weather_train,
                         y: weather_labels
                     })

            # Report the training accuracies
            training_acc = accuracy.eval(feed_dict={
                X: weather_train,
                y: weather_labels
            })
            validation_acc = accuracy.eval(feed_dict={
                X: weather_test,
                y: test_labels
            })

            print("Epoch :" + str(epoch) + "\n[Train Accuracy] : " +
                  str(training_acc) + "\n[Validation Accuracy] :" +
                  str(validation_acc))
Beispiel #24
0
def main():
    data = read_data()
    data_frames = []
    ctr = 0
    length = len(data)

    # Continue through the list until there is nothing left
    while ctr < length:
        data_frames.append(pd.DataFrame(np.transpose(data[ctr][2])))
        ctr += 1

    print("\n" + str(ctr) + " : Months Processed\n")

    # Setup the final datafram with the columns
    df = pd.concat(data_frames, ignore_index=True)
    df.columns = VARNAMES

    # Add in Label Column and make them 1 or 0
    df["Label"] = df["Precipitation"] > 0
    df.Label = df.Label.astype(int)
    df = df.drop(columns=["Precipitation"])
    print(df)

    # Get rid of precipitation
    correlation_matrix = df.corr()
    print(correlation_matrix["Label"].sort_values(ascending=False))

    # Create a stratified shuffle split for the data 80/20 for train/test
    split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
    for train_index, test_index in split.split(df, df["Label"]):
        strat_train = df.loc[train_index]
        strat_test = df.loc[test_index]

    # Prepare the Data for Machine Learning
    weather_train = strat_train.drop("Label", axis=1)
    weather_labels = strat_train["Label"].copy()

    # Setup test set
    weather_test = strat_test.drop("Label", axis=1)
    test_labels = strat_test["Label"].copy()

    # Setup a Linear Regression model for comparison
    lin_reg = LinearRegression()
    lin_reg.fit(weather_train, weather_labels)

    # Setup a Grid Searched SVM
    params = best_parameters(weather_train, weather_labels, 5)
    support_vector = svm.SVC(kernel=params['kernel'],
                             C=params['C'],
                             gamma=params['gamma'])
    support_vector.fit(weather_train, weather_labels)

    # Do Linear Predictions
    linear_predictions = lin_reg.predict(weather_train)
    linear_test_predictions = lin_reg.predict(weather_test)

    # Do SVM Predictions
    svm_predictions = support_vector.predict(weather_train)
    svm_test_predictions = support_vector.predict(weather_test)

    # Print out errors
    lin_mae = mean_absolute_error(weather_labels, linear_predictions)
    lin_test_mae = mean_absolute_error(test_labels, linear_test_predictions)
    svm_mae = mean_absolute_error(weather_labels, svm_predictions)
    svm_test_mae = mean_absolute_error(test_labels, svm_test_predictions)
    print("Linear MeanAbsoluteError on train: " + str(1 - lin_mae) + "%")
    print("Linear MeanAbsoluteError on test : " + str(1 - lin_test_mae) + "%")
    print("SVM MeanAbsoluteError on train   : " + str(1 - svm_mae) + "%")
    print("SVM MeanAbsoluteError on test    : " + str(1 - svm_test_mae) + "%")