Exemplo n.º 1
0
async def get_info(rarity,
                   goal: int,
                   xp_boost: int,
                   current_level: int,
                   current_xp: int,
                   price: float = None,
                   skill: str = None):
    if price == None:
        price = pricing[skill.lower()]
    levelling_table = await find_Level_values(rarity)
    if current_level > 0:
        total_xp = int(levelling_table[current_level - 1]) + current_xp
    else:
        total_xp = int(levelling_table[current_level]) + current_xp
    xp_needed = levelling_table[goal - 1] - total_xp
    total_boost = float((xp_boost / 100) + 1)
    price_without_boost = int(round(xp_needed * price))
    if xp_boost > 0:
        total_price = int(round((xp_needed * price) / total_boost))
    else:
        total_price = price_without_boost

    explain = "You want to level up the pet to level " + str(
        goal) + ", which will take a total of " + str(
            levelling_table[goal - 1]) + " pet xp.\nYour " + str(
                levelling_table[100]) + " pet, at level " + str(current_level)
    if current_xp > 0:
        explain = explain + ' with an extra ' + str(
            current_xp) + ' xp on it, has a total of ' + str(
                total_xp) + ' pet xp.\n'
    explain = explain + ' So, the pet xp you need to reach your goal is equal to ' + str(
        levelling_table[goal - 1]
    ) + ' - ' + str(total_xp) + ', which is ' + str(
        xp_needed) + ' pet xp.\nA single skill xp costs ' + str(
            price
        ) + ' coins, and skill xp = pet xp (before counting xp boost items),\n'
    explain = explain + 'so all of your pet xp costs ' + str(
        xp_needed) + ' multiplied by ' + str(price) + ', which is ' + str(
            price_without_boost) + ' coins.\n'
    if total_boost > 1:
        explain = explain + 'Finally, with your ' + str(
            xp_boost) + '% pet xp boost, every 1 skill xp counts as ' + str(
                total_boost) + ' pet xp,\nso ' + str(
                    price) + ' skill xp divided by ' + str(
                        total_boost) + ' equals ' + str(
                            total_price) + ' coins as a final price.'
    return pet_info(numerize.numerize(total_xp), numerize.numerize(xp_needed),
                    numerize.numerize(total_price), explain)
Exemplo n.º 2
0
def get_repository_info(owner, name):
    """
    Get the relevant information needed for the repository from
    its owner login and name.
    """

    LOGGER.info("Getting info for %s/%s", owner, name)

    access_token = getenv("GITHUB_ACCESS_TOKEN")
    if not access_token:
        raise AssertionError(
            "Access token not present in the env variable `GITHUB_ACCESS_TOKEN`"
        )

    # create a logged in GitHub client
    client = login(token=access_token)

    info = {}

    # get the repository; if the repo is not found, log a warning
    try:
        repository = client.repository(owner, name)

        good_first_issues = list(
            repository.issues(
                labels=ISSUE_LABELS,
                state=ISSUE_STATE,
                number=ISSUE_LIMIT,
                sort=ISSUE_SORT,
                direction=ISSUE_SORT_DIRECTION,
            ))
        LOGGER.info("\t found %d good first issues", len(good_first_issues))
        # check if repo has at least one good first issue
        if good_first_issues:
            return RepositoryInfo(
                name=name,
                owner=owner,
                description=emojize(repository.description or ""),
                language=repository.language,
                url=repository.html_url,
                stars=repository.stargazers_count,
                stars_display=numerize.numerize(repository.stargazers_count),
                last_modified=repository.last_modified,
                id=str(repository.id),
                objectID=str(repository.id),  # for indexing on algolia,
                issues=[
                    Issue(
                        issue.title,
                        issue.html_url,
                        issue.number,
                        issue.created_at.isoformat(),
                    ) for issue in good_first_issues
                ],
            )

        LOGGER.info("\t skipping the repo")
        return None
    except exceptions.NotFoundError:
        LOGGER.warning("Not Found: %s", f"{owner}/{name}")
Exemplo n.º 3
0
def get_uer_info(username):
    response = urllib.request.urlopen('{instance}{user}'.format(
        instance=NITTERINSTANCE, user=username)).read()
    #rssFeed = feedparser.parse(response.content)

    html = BeautifulSoup(str(response), "lxml")
    if html.body.find('div', attrs={'class': 'error-panel'}):
        return False
    else:
        html = html.body.find('div', attrs={'class': 'profile-card'})

        if html.find('a', attrs={'class': 'profile-card-fullname'}):
            fullName = html.find('a', attrs={
                'class': 'profile-card-fullname'
            }).getText().encode('latin1').decode('unicode_escape').encode(
                'latin1').decode('utf8')
        else:
            fullName = None

        if html.find('div', attrs={'class': 'profile-bio'}):
            profileBio = html.find('div', attrs={
                'class': 'profile-bio'
            }).getText().encode('latin1').decode('unicode_escape').encode(
                'latin1').decode('utf8')
        else:
            profileBio = None

        user = {
            "profileFullName":
            fullName,
            "profileUsername":
            html.find('a', attrs={
                'class': 'profile-card-username'
            }).string.encode('latin_1').decode('unicode_escape').encode(
                'latin_1').decode('utf8'),
            "profileBio":
            profileBio,
            "tweets":
            html.find_all('span', attrs={'class':
                                         'profile-stat-num'})[0].string,
            "following":
            html.find_all('span', attrs={'class':
                                         'profile-stat-num'})[1].string,
            "followers":
            numerize.numerize(
                int(
                    html.find_all('span', attrs={'class': 'profile-stat-num'
                                                 })[2].string.replace(",",
                                                                      ""))),
            "likes":
            html.find_all('span', attrs={'class':
                                         'profile-stat-num'})[3].string,
            "profilePic":
            "{instance}{pic}".format(
                instance=NITTERINSTANCE,
                pic=html.find('a', attrs={'class':
                                          'profile-card-avatar'})['href'][1:])
        }
        return user
Exemplo n.º 4
0
async def getPrice(ctx, stock_name, period, interval):
    tickr = yf.Ticker(stock_name)
    tick = tickr.history(period=period, interval=interval)
    day_tick = tickr.history(period='1d')  # creates new db for 24hr Volume

    tick = tick.dropna()  # drops missing values

    OPEN = tick['Open'].iloc[0]
    CLOSE = tick['Close'].iloc[-1]
    VOLUME = day_tick['Volume'].iloc[0]

    color = line_color(OPEN, CLOSE)  # Selects Red or Green
    per_Change = round((CLOSE - OPEN) / OPEN * 100, 2)

    plt.clf()  # Clears previous chart
    plt.plot(tick['Close'], color)
    plt.text(0.1,
             0.9,
             str(per_Change) + '%',
             transform=plt.gca().transAxes,
             color=color)
    plt.title(stock_name.upper())
    plt.ylabel('Closing Price')
    plt.xticks(rotation=45)
    plt.tight_layout()

    plt.savefig('chart.png')
    file = discord.File('chart.png', filename='chart.png')
    await ctx.send('online.png', file=file)

    price = round(CLOSE, 2)
    _10dayVol = numerize.numerize(tickr.info['averageDailyVolume10Day'], 2)
    _24hrVol = numerize.numerize(int(VOLUME), 2)
    market_cap = numerize.numerize(tickr.info['marketCap'], 2)

    await ctx.send(f"""
Stock Price: ${price}
Market Cap: {market_cap}

24 Hour Vol: {_24hrVol}
10 Day Vol: {_10dayVol}""")
Exemplo n.º 5
0
def plot(y, df):
    """
    This function is used to create the bar plot for States within each party vs vaccination rate.
    :param y: Title for y axis.
    :param df: Dataframe
    """
    fig = px.bar(
        df,
        x='Party',
        y=y,
        log_y=True,
        title='Vaccination per Political Party',
        hover_name='Party',
        hover_data={
            'a':
            [numerize.numerize(x, 0) for x in df.Population_2019.tolist()],
            'tiv': [
                numerize.numerize(x, 0)
                for x in df['Total Individuals Vaccinated'].tolist()
            ],
            'vr': [round(x, 2) for x in df['VaccinationRate'].tolist()],
            y:
            False,
            'VaccinationRate':
            False,
            'Party':
            False,
            'Population_2019':
            False
        },
        labels={
            'a': 'population',
            'tiv': '# vaccinated',
            'vr': '% vaccinated'
        },
        color='Population_2019',
        color_continuous_scale='darkmint')
    fig.update_layout(plot_bgcolor='white')
    fig.show()
    def generate_trace(self, v1, v2):
        differences = self.differences(v1, v2)
        real_v1 = self.version_values(v1)
        real_v2 = self.version_values(v2)

        hovertemplate = "<b>%{x}</b> %{text}"
        text = []
        for n in range(len(self.y_columns)):
            text.append("Diff: {} | Current: {} | Prev: {}".format(
                numerize(differences[n]),
                numerize(real_v1[n]),
                numerize(real_v2[n]),
            ))

        return go.Scatter(
            x=self.y_columns,
            y=self.x_columns(v1, v2),
            mode="lines",
            name=self.version_pair(v1, v2),
            hovertemplate=hovertemplate,
            text=text,
        )
Exemplo n.º 7
0
def get_repository_info(owner, name):
    """
    Get the relevant information needed for the repository from
    its owner login and name.
    """

    LOGGER.info("Getting info for %s/%s", owner, name)

    # create an anonymous GitHub client
    client = GitHub()

    info = {}

    # get the repository; if the repo is not found, raise an error
    try:
        repository = client.repository(owner, name)

        # store the repo info
        info["name"] = name
        info["owner"] = owner
        info["language"] = repository.language
        info["url"] = repository.html_url
        info["stars"] = repository.stargazers_count
        info["stars_display"] = numerize.numerize(repository.stargazers_count)
        info["last_modified"] = repository.last_modified
        info["id"] = str(repository.id)
        info["objectID"] = str(repository.id)  # for indexing on algolia

        # get the latest issues with the tag
        issues = []
        for issue in repository.issues(
                labels=ISSUE_LABELS,
                state=ISSUE_STATE,
                number=ISSUE_LIMIT,
                sort=ISSUE_SORT,
                direction=ISSUE_SORT_DIRECTION,
        ):
            issues.append({
                "title": issue.title,
                "url": issue.html_url,
                "number": issue.number,
                "created_at": issue.created_at.isoformat()
            })

        info["issues"] = issues
        return info
    except exceptions.NotFoundError:
        raise RepoNotFoundException()
Exemplo n.º 8
0
def ubi(state_dropdown, level, agi_tax, benefits, taxes, include):
    """this does everything from microsimulation to figure creation.
        Dash does something automatically where it takes the input arguments
        in the order given in the @app.callback decorator
    Args:
        state_dropdown:  takes input from callback input, component_id="state-dropdown"
        level:  component_id="level"
        agi_tax:  component_id="agi-slider"
        benefits:  component_id="benefits-checklist"
        taxes:  component_id="taxes-checklist"
        include: component_id="include-checklist"

    Returns:
        ubi_line: outputs to  "ubi-output" in @app.callback
        revenue_line: outputs to "revenue-output" in @app.callback
        ubi_population_line: outputs to "revenue-output" in @app.callback
        winners_line: outputs to "winners-output" in @app.callback
        resources_line: outputs to "resources-output" in @app.callback
        fig: outputs to "econ-graph" in @app.callback
        fig2: outputs to "breakdown-graph" in @app.callback
    """

    # -------------------- calculations based on reform level -------------------- #
    # if the "Reform level" selected by the user is federal
    if level == "federal":
        # combine taxes and benefits checklists into one list to be used to
        #  subset spmu dataframe
        taxes_benefits = taxes + benefits
        # initialize new resources column with old resources as baseline
        spmu["new_resources"] = spmu.spmtotres
        # initialize revenue at zero
        revenue = 0

        # Calculate the new revenue and spmu resources from tax and benefit change
        for tax_benefit in taxes_benefits:
            # subtract taxes and benefits that have been changed from spm unit's resources
            spmu.new_resources -= spmu[tax_benefit]
            # add that same value to revenue
            revenue += mdf.weighted_sum(spmu, tax_benefit, "spmwt")

        # if "Income taxes" = ? and "child_tax_credit" = ?
        # in taxes/benefits checklist
        if ("fedtaxac" in taxes_benefits) & ("ctc" in taxes_benefits):
            spmu.new_resources += spmu.ctc
            revenue -= mdf.weighted_sum(spmu, "ctc", "spmwt")

        if ("fedtaxac" in taxes_benefits) & ("eitcred" in taxes_benefits):
            spmu.new_resources += spmu.eitcred
            revenue -= mdf.weighted_sum(spmu, "eitcred", "spmwt")

        # Calculate the new taxes from flat tax on AGI
        tax_rate = agi_tax / 100
        spmu["new_taxes"] = np.maximum(spmu.adjginc, 0) * tax_rate
        # subtract new taxes from new resources
        spmu.new_resources -= spmu.new_taxes
        # add new revenue when new taxes are applied on spmus, multiplied by weights
        revenue += mdf.weighted_sum(spmu, "new_taxes", "spmwt")

        # Calculate the total UBI a spmu recieves based on exclusions
        spmu["numper_ubi"] = spmu.numper

        # TODO make into linear equation on one line using array of some kind
        if "children" not in include:
            # subtract the number of children from the number of
            # people in spm unit receiving ubi benefit
            spmu["numper_ubi"] -= spmu.child

        if "non_citizens" not in include:
            spmu["numper_ubi"] -= spmu.non_citizen

        if ("children" not in include) and ("non_citizens" not in include):
            spmu["numper_ubi"] += spmu.non_citizen_child

        if "adults" not in include:
            spmu["numper_ubi"] -= spmu.adult

        if ("adults" not in include) and ("non_citizens" not in include):
            spmu["numper_ubi"] += spmu.non_citizen_adult

        # Assign UBI
        ubi_population = (spmu.numper_ubi * spmu.spmwt).sum()
        ubi_annual = revenue / ubi_population
        spmu["total_ubi"] = ubi_annual * spmu.numper_ubi

        # Calculate change in resources
        spmu.new_resources += spmu.total_ubi
        spmu["new_resources_per_person"] = spmu.new_resources / spmu.numper
        # Sort by state

        # NOTE: the "target" here refers to the population being
        # measured for gini/poverty rate/etc.
        # I.e. the total population of the state/country and
        # INCLUDING those excluding form recieving ubi payments

        # state here refers to the selection from the drop down, not the reform level
        if state_dropdown == "US":
            target_spmu = spmu
        else:
            target_spmu = spmu[spmu.state == state_dropdown]

    # if the "Reform level" dropdown selected by the user is State
    if level == "state":

        # Sort by state
        if state_dropdown == "US":
            target_spmu = spmu
        else:
            target_spmu = spmu[spmu.state == state_dropdown]

        # Initialize
        target_spmu["new_resources"] = target_spmu.spmtotres
        revenue = 0

        # Change income tax repeal to state level
        if "fedtaxac" in taxes:
            target_spmu.new_resources -= target_spmu.stataxac
            revenue += mdf.weighted_sum(target_spmu, "stataxac", "spmwt")

        # Calculate change in tax revenue
        tax_rate = agi_tax / 100
        target_spmu["new_taxes"] = target_spmu.adjginc * tax_rate

        target_spmu.new_resources -= target_spmu.new_taxes
        revenue += mdf.weighted_sum(target_spmu, "new_taxes", "spmwt")

        # Calculate the total UBI a spmu recieves based on exclusions
        target_spmu["numper_ubi"] = target_spmu.numper

        if "children" not in include:
            target_spmu["numper_ubi"] -= target_spmu.child

        if "non_citizens" not in include:
            target_spmu["numper_ubi"] -= target_spmu.non_citizen

        if ("children" not in include) and ("non_citizens" not in include):
            target_spmu["numper_ubi"] += target_spmu.non_citizen_child

        if "adults" not in include:
            target_spmu["numper_ubi"] -= target_spmu.adult

        if ("adults" not in include) and ("non_citizens" not in include):
            target_spmu["numper_ubi"] += target_spmu.non_citizen_adult

        # Assign UBI
        ubi_population = (target_spmu.numper_ubi * target_spmu.spmwt).sum()
        ubi_annual = revenue / ubi_population
        target_spmu["total_ubi"] = ubi_annual * target_spmu.numper_ubi

        # Calculate change in resources
        target_spmu.new_resources += target_spmu.total_ubi
        target_spmu["new_resources_per_person"] = (target_spmu.new_resources /
                                                   target_spmu.numper)

    # NOTE: code after this applies to both reform levels

    # Merge and create target_persons -
    # NOTE: the "target" here refers to the population being
    # measured for gini/poverty rate/etc.
    # I.e. the total population of the state/country and
    # INCLUDING those excluding form recieving ubi payments
    sub_spmu = target_spmu[[
        "spmfamunit", "year", "new_resources", "new_resources_per_person"
    ]]
    target_persons = person.merge(sub_spmu, on=["spmfamunit", "year"])

    # filter demog_stats for selected state from dropdown
    baseline_demog = demog_stats[demog_stats.state == state_dropdown]

    # TODO: return dictionary of results instead of return each variable
    def return_demog(demog, metric):
        """
        retrieve pre-processed data by demographic
        args:
            demog - string one of
                ['person', 'adult', 'child', 'black', 'white',
            'hispanic', 'pwd', 'non_citizen', 'non_citizen_adult',
            'non_citizen_child']
            metric - string, one of ['pov_rate', 'pop']
        returns:
            value - float
        """
        # NOTE: baseline_demog is a dataframe with global scope
        value = baseline_demog.loc[
            (baseline_demog["demog"] == demog)
            & (baseline_demog["metric"] == metric), "value",
            # NOTE: returns the first value as a float, be careful if you redefine baseline_demog
        ].values[0]

        return value

    population = return_demog(demog="person", metric="pop")
    child_population = return_demog(demog="child", metric="pop")
    non_citizen_population = return_demog(demog="non_citizen", metric="pop")
    non_citizen_child_population = return_demog(demog="non_citizen_child",
                                                metric="pop")

    # filter all state stats gini, poverty_gap, etc. for dropdown state
    baseline_all_state_stats = all_state_stats[all_state_stats.index ==
                                               state_dropdown]

    def return_all_state(metric):
        """filter baseline_all_state_stats and return value of select metric

        Keyword arguments:
        metric - string, one of 'poverty_gap', 'gini', 'total_resources'

        returns:
            value- float
        """

        return baseline_all_state_stats[metric].values[0]

    # Calculate total change in resources
    original_total_resources = return_all_state("total_resources")
    # DO NOT PREPROCESS, new_resources
    new_total_resources = (target_spmu.new_resources * target_spmu.spmwt).sum()
    change_total_resources = new_total_resources - original_total_resources
    change_pp = change_total_resources / population

    original_poverty_rate = return_demog("person", "pov_rate")

    original_poverty_gap = return_all_state("poverty_gap")
    # define orignal gini coefficient
    original_gini = return_all_state("gini")

    # function to calculate rel difference between one number and another
    def rel_change(new, old, round=3):
        return ((new - old) / old).round(round)

    # Calculate poverty gap
    target_spmu["new_poverty_gap"] = np.where(
        target_spmu.new_resources < target_spmu.spmthresh,
        target_spmu.spmthresh - target_spmu.new_resources,
        0,
    )
    poverty_gap = mdf.weighted_sum(target_spmu, "new_poverty_gap", "spmwt")
    poverty_gap_change = rel_change(poverty_gap, original_poverty_gap)

    # Calculate the change in poverty rate
    target_persons["poor"] = (target_persons.new_resources <
                              target_persons.spmthresh)
    total_poor = (target_persons.poor * target_persons.asecwt).sum()
    poverty_rate = total_poor / population
    poverty_rate_change = rel_change(poverty_rate, original_poverty_rate)

    # Calculate change in Gini
    gini = mdf.gini(target_persons, "new_resources_per_person", "asecwt")
    gini_change = rel_change(gini, original_gini, 3)

    # Calculate percent winners
    target_persons["winner"] = (target_persons.new_resources >
                                target_persons.spmtotres)
    total_winners = (target_persons.winner * target_persons.asecwt).sum()
    percent_winners = (total_winners / population * 100).round(1)

    # -------------- calculate all of the poverty breakdown numbers -------------- #
    # Calculate the new poverty rate for each demographic
    def pv_rate(column):
        return mdf.weighted_mean(target_persons[target_persons[column]],
                                 "poor", "asecwt")

    # Round all numbers for display in hover
    def hover_string(metric, round_by=1):
        """formats 0.121 to 12.1%"""
        string = str(round(metric * 100, round_by)) + "%"
        return string

    DEMOGS = ["child", "adult", "pwd", "white", "black", "hispanic"]
    # create dictionary for demographic breakdown of poverty rates
    pov_breakdowns = {
        # return precomputed baseline poverty rates
        "original_rates":
        {demog: return_demog(demog, "pov_rate")
         for demog in DEMOGS},
        "new_rates": {demog: pv_rate(demog)
                      for demog in DEMOGS},
    }

    # add poverty rate changes to dictionary
    pov_breakdowns["changes"] = {
        # Calculate the percent change in poverty rate for each demographic
        demog: rel_change(
            pov_breakdowns["new_rates"][demog],
            pov_breakdowns["original_rates"][demog],
        )
        for demog in DEMOGS
    }

    # create string for hover template
    pov_breakdowns["strings"] = {
        demog: "Original " + demog + " poverty rate: " +
        hover_string(pov_breakdowns["original_rates"][demog]) +
        "<br><extra></extra>" + "New " + demog + " poverty rate: " +
        hover_string(pov_breakdowns["new_rates"][demog])
        for demog in DEMOGS
    }

    # format original and new overall poverty rate
    original_poverty_rate_string = hover_string(original_poverty_rate)
    poverty_rate_string = hover_string(poverty_rate)

    original_poverty_gap_billions = "{:,}".format(
        int(original_poverty_gap / 1e9))

    poverty_gap_billions = "{:,}".format(int(poverty_gap / 1e9))

    original_gini_string = str(round(original_gini, 3))
    gini_string = str(round(gini, 3))

    # --------------SECTION populates "Results of your reform:" ------------ #

    # Convert UBI and winners to string for title of chart
    ubi_string = str("{:,}".format(int(round(ubi_annual / 12))))

    # populates Monthly UBI
    ubi_line = "Monthly UBI: $" + ubi_string

    # populates 'Funds for UBI'
    revenue_line = "Funds for UBI: $" + numerize.numerize(revenue, 1)

    # populates population and revenue for UBI if state selected from dropdown
    if state_dropdown != "US":
        # filter for selected state
        state_spmu = target_spmu[target_spmu.state == state_dropdown]
        # calculate population of state recieving UBI
        state_ubi_population = (state_spmu.numper_ubi * state_spmu.spmwt).sum()

        ubi_population_line = "UBI population: " + numerize.numerize(
            state_ubi_population, 1)

        state_revenue = ubi_annual * state_ubi_population

        revenue_line = ("Funds for UBI (" + state_dropdown + "): $" +
                        numerize.numerize(state_revenue, 1))

    else:
        ubi_population_line = "UBI population: " + numerize.numerize(
            ubi_population, 1)

    winners_line = "Percent better off: " + str(percent_winners) + "%"
    resources_line = ("Average change in resources per person: $" +
                      "{:,}".format(int(change_pp)))

    # ---------- populate economic breakdown bar chart ------------- #

    # Create x-axis labels for each chart
    econ_fig_x_lab = ["Poverty rate", "Poverty gap", "Gini index"]
    econ_fig_cols = [poverty_rate_change, poverty_gap_change, gini_change]
    econ_fig = go.Figure([
        go.Bar(
            x=econ_fig_x_lab,
            y=econ_fig_cols,
            text=econ_fig_cols,
            hovertemplate=[
                # poverty rates
                "Original poverty rate: " + original_poverty_rate_string +
                "<br><extra></extra>"
                "New poverty rate: " + poverty_rate_string,
                # poverty gap
                "Original poverty gap: $" + original_poverty_gap_billions +
                "B<br><extra></extra>"
                "New poverty gap: $" + poverty_gap_billions + "B",
                # gini
                "Original Gini index: <extra></extra>" + original_gini_string +
                "<br>New Gini index: " + gini_string,
            ],
            marker_color=BLUE,
        )
    ])

    # Edit text and display the UBI amount and percent winners in title
    econ_fig.update_layout(
        uniformtext_minsize=10,
        uniformtext_mode="hide",
        plot_bgcolor="white",
        title_text="Economic overview",
        title_x=0.5,
        hoverlabel_align="right",
        font_family="Roboto",
        title_font_size=20,
        paper_bgcolor="white",
        hoverlabel=dict(bgcolor="white", font_size=14, font_family="Roboto"),
        yaxis_tickformat="%",
    )
    econ_fig.update_traces(texttemplate="%{text:.1%f}", textposition="auto")

    econ_fig.update_xaxes(
        tickangle=45,
        title_text="",
        tickfont={"size": 14},
        title_standoff=25,
        title_font=dict(size=14, family="Roboto", color="black"),
    )

    econ_fig.update_yaxes(
        tickprefix="",
        tickfont={"size": 14},
        title_standoff=25,
        title_font=dict(size=14, family="Roboto", color="black"),
    )

    # ------------------ populate poverty breakdown charts ---------------- #

    breakdown_fig_x_lab = [
        "Child",
        "Adult",
        "People<br>with<br>disabilities",
        "White",
        "Black",
        "Hispanic",
    ]

    breakdown_fig_cols = [pov_breakdowns["changes"][demog] for demog in DEMOGS]
    hovertemplate = [pov_breakdowns["strings"][demog] for demog in DEMOGS]

    breakdown_fig = go.Figure([
        go.Bar(
            x=breakdown_fig_x_lab,
            y=breakdown_fig_cols,
            text=breakdown_fig_cols,
            hovertemplate=hovertemplate,
            marker_color=BLUE,
        )
    ])

    breakdown_fig.update_layout(
        uniformtext_minsize=10,
        uniformtext_mode="hide",
        plot_bgcolor="white",
        title_text="Poverty rate breakdown",
        title_x=0.5,
        hoverlabel_align="right",
        font_family="Roboto",
        title_font_size=20,
        paper_bgcolor="white",
        hoverlabel=dict(bgcolor="white", font_size=14, font_family="Roboto"),
        yaxis_tickformat="%",
    )
    breakdown_fig.update_traces(texttemplate="%{text:.1%f}",
                                textposition="auto")

    breakdown_fig.update_xaxes(
        tickangle=45,
        title_text="",
        tickfont=dict(size=14, family="Roboto"),
        title_standoff=25,
        title_font=dict(size=14, family="Roboto", color="black"),
    )

    breakdown_fig.update_yaxes(
        tickprefix="",
        tickfont=dict(size=14, family="Roboto"),
        title_standoff=25,
        title_font=dict(size=14, family="Roboto", color="black"),
    )

    # set both y-axes to the same range
    full_econ_fig = econ_fig.full_figure_for_development(warn=False)
    full_breakdown_fig = breakdown_fig.full_figure_for_development(warn=False)
    # find the minimum of both y-axes
    global_ymin = min(
        min(full_econ_fig.layout.yaxis.range),
        min(full_breakdown_fig.layout.yaxis.range),
    )
    global_ymax = max(
        max(full_econ_fig.layout.yaxis.range),
        max(full_breakdown_fig.layout.yaxis.range),
    )

    # update the yaxes of the figure to account for both ends of the ranges
    econ_fig.update_yaxes(
        dict(range=[global_ymin, global_ymax], autorange=False))
    breakdown_fig.update_yaxes(
        dict(range=[global_ymin, global_ymax], autorange=False))

    return (
        ubi_line,
        revenue_line,
        ubi_population_line,
        winners_line,
        resources_line,
        econ_fig,
        breakdown_fig,
    )
Exemplo n.º 9
0
def home(request):
    # to return results to html page
    context = {}

    import requests
    import json
    from numerize import numerize


    if request.method == 'POST':
        ticker = str(request.POST['ticker'])
        length_of_ticker = len(ticker.split(','))

        # create an account on rapidapi to use 
        url = "https://apidojo-yahoo-finance-v1.p.rapidapi.com/market/v2/get-quotes"

        # visit this site to look for ticker symbol - https://www.marketwatch.com/tools/quotes/lookup.asp

        # e.g ticker = 'amzn,aapl,goog,fb'
        querystring = {"region":"US","symbols":ticker}

        headers = {
            'x-rapidapi-key': API_KEY,
            'x-rapidapi-host': "apidojo-yahoo-finance-v1.p.rapidapi.com"
            }

        # call to the api
        response = requests.request("GET", url, headers=headers, params=querystring)

        try:
            res = json.loads(response.content)
            # print('length = ', len(res['quoteResponse']['result']))
            if len(res['quoteResponse']['result']) > 0 :
                # print('i am here\n')
                # list to store key value pair company wise
                tickers = []
                # retriving important results from the api call response
                for i in range(0,length_of_ticker):
                    temp = {}
                    # temp['company'] = res['quoteResponse']['result'][i]['longName']
                    temp['company'] = res['quoteResponse']['result'][i].get('longName')
                    temp['quoteType'] = res['quoteResponse']['result'][i].get('quoteType')
                    temp['currency'] = res['quoteResponse']['result'][i].get('currency')
                    temp['stockPrice'] = res['quoteResponse']['result'][i].get('regularMarketPrice')
                    temp['previousClose'] = res['quoteResponse']['result'][i].get('regularMarketPreviousClose')
                    temp['bidSize'] = res['quoteResponse']['result'][i].get('bidSize')
                    temp['askSize'] = res['quoteResponse']['result'][i].get('askSize')
                    temp['trailingPE'] = res['quoteResponse']['result'][i].get('trailingPE')
                    temp['forwardPE'] = res['quoteResponse']['result'][i].get('forwardPE')
                    # temp['marketCap'] = res['quoteResponse']['result'][i].get('marketCap')
                    temp['marketCap'] = numerize.numerize(res['quoteResponse']['result'][i].get('marketCap'),4 )
                    temp['fiftyTwoWeekLow'] = res['quoteResponse']['result'][i].get('fiftyTwoWeekLow')
                    temp['fiftyTwoWeekHigh'] = res['quoteResponse']['result'][i].get('fiftyTwoWeekHigh')
                    temp['fiftyTwoWeekRange'] = res['quoteResponse']['result'][i].get('fiftyTwoWeekRange')
                    temp['floatShares'] = res['quoteResponse']['result'][i].get('floatShares')
                    temp['marketVolume'] = res['quoteResponse']['result'][i].get('regularMarketVolume')
                    temp['priceToSales'] = res['quoteResponse']['result'][i].get('priceToSales')
                    # temp['revenue'] = res['quoteResponse']['result'][i].get('revenue')
                    temp['revenue'] = numerize.numerize(res['quoteResponse']['result'][i].get('revenue'),4 )
                    temp['pegRation'] = res['quoteResponse']['result'][i].get('pegRation')
                    temp['epsTrailingTwelveMonths'] = res['quoteResponse']['result'][i].get('epsTrailingTwelveMonths')
                    tickers.append(temp)

                # to check output in console
                # for elements in tickers:
                    # for key, val in elements.items():
                        # print(key,'->', val)
                    # print()

                context['tickers'] = tickers

            else:
                print('in the error\n')
                context['tickers'] = 'error...'

        except Exception as e:
            print('exception----->',e)
            context['tickers'] = 'error...'

        return render(request, 'home.html', context)

    else:
        return render(request, 'home.html', {'empty':'Enter company\'s ticker symbol in the search box and hit search......'})
Exemplo n.º 10
0
def get_repository_info(owner, name):
    """
    Get the relevant information needed for the repository from
    its owner login and name.
    """

    LOGGER.info("Getting info for %s/%s", owner, name)

    access_token = getenv("GITHUB_ACCESS_TOKEN")
    if not access_token:
        raise AssertionError(
            "Access token not present in the env variable `GITHUB_ACCESS_TOKEN`"
        )

    # create a logged in GitHub client
    client = login(token=access_token)

    info = {}

    # get the repository; if the repo is not found, log a warning
    try:
        repository = client.repository(owner, name)

        good_first_issues = list(
            repository.issues(
                labels=ISSUE_LABELS,
                state=ISSUE_STATE,
                number=ISSUE_LIMIT,
                sort=ISSUE_SORT,
                direction=ISSUE_SORT_DIRECTION,
            ))
        LOGGER.info("\t found %d good first issues", len(good_first_issues))
        # check if repo has at least one good first issue
        if good_first_issues and repository.language:
            # store the repo info
            info["name"] = name
            info["owner"] = owner
            info["description"] = emojize(repository.description or "")
            info["language"] = repository.language
            info["slug"] = slugify(repository.language,
                                   replacements=SLUGIFY_REPLACEMENTS)
            info["url"] = repository.html_url
            info["stars"] = repository.stargazers_count
            info["stars_display"] = numerize.numerize(
                repository.stargazers_count)
            info["last_modified"] = repository.last_modified
            info["id"] = str(repository.id)
            info["objectID"] = str(repository.id)  # for indexing on algolia

            # get the latest issues with the tag
            issues = []
            for issue in good_first_issues:
                issues.append({
                    "title": issue.title,
                    "url": issue.html_url,
                    "number": issue.number,
                    "comments_count": issue.comments_count,
                    "created_at": issue.created_at.isoformat(),
                })

            info["issues"] = issues
            return info
        LOGGER.info("\t skipping the repo")
        return None
    except exceptions.NotFoundError:
        LOGGER.warning("Not Found: %s", f"{owner}/{name}")
Exemplo n.º 11
0
def match_deal(industry, issuance_type, country, min_invest, max_invest):
    # INPUT THIS FILENAME MANUALLY FOR NOW
    filename = "C:/Users/Krish/Documents/GitHub/The-Great-Auto-Suggester/Mock_Deal_Database1.csv"
    df = pd.read_csv(filename)
    currency_type = min_invest[:1]

    # Convert everything to lowercase
    df = df.applymap(lambda s: s.lower() if type(s) == str else s)

    # Configure Pandas display options
    pd.set_option('mode.chained_assignment', None)
    pd.options.display.width = 0

    df['Target Raise'] = df['Target Raise'].apply(lambda x: (normalize(x) if x != "n/a" else x))


    # Handling when the user decides to leave an input blank for their query
    if industry != "":
        indust = (df['Industry Type'] == industry.lower())
    else:
        indust = True

    if issuance_type != "":
        issuance = (df['Issuance Type'] == issuance_type.lower())
    else:
        issuance = True

    if country != "":
        loc = (df["Location"] == country.lower())
    else:
        loc = True

    if min_invest != "":
        min_investment = normalize(min_invest.lower())
        min = (df['Target Raise'] > min_investment)
    else:
        min = True

    if max_invest != "":
        max_investment = normalize(max_invest.lower())
        max = (df['Target Raise'] <= max_investment)
    else:
        max = True



    # formats the visual output of the dataframe
    output_df = df[indust & issuance & loc & min & max]
    output_df['Target Raise'] = output_df['Target Raise'].apply(lambda x: currency_type + (numerize.numerize(x) if x != "n/a" else x))


    if output_df.empty:
        print("No results. Please tweak your search criteria.")

    print(output_df)
    return(output_df)
Exemplo n.º 12
0
    (profilesizex, profilesizey), Image.ANTIALIAS).convert("RGB")
textcolor = (textcolor >> 16, (textcolor >> 8) % (1 << 16),
             textcolor % (1 << 8))

bar = Image.new("RGB", bg.size, (0, 0, 0, 255))
pasteImage(bar, profile, (profilex, profiley))
pixels = bar.load()
for x in range(int(barx * (xp / maxXp)) + 1):
    for y in range(bary):
        pixels[barposx + x,
               barposy + y] = (color >> 16, (color >> 8) % (1 << 8),
                               color % (1 << 8))
bar.paste(bg, (0, 0), bg)

draw = ImageDraw.Draw(bar)
xp = numerize.numerize(xp)
maxXp = numerize.numerize(maxXp)
totalXp = numerize.numerize(totalXp)

font = ImageFont.truetype("./ressources/arial.ttf", 20)
draw.text((profilex + profilesizex + 10, profiley + 10),
          "#{}  {}".format(rank, name),
          textcolor,
          font=font)
font = ImageFont.truetype("./ressources/arial.ttf", 15)
size, offset = font.font.getsize("Total: {}".format(totalXp))
size2, offset2 = font.font.getsize("{}/{}".format(xp, maxXp))

draw.text((barposx, barposy + bary + 3),
          "Level {}".format(level),
          textcolor,
Exemplo n.º 13
0
def add_stock(request):
    from numerize import numerize

    if request.method == 'POST':
        form = StockForm(request.POST or None)

        if form.is_valid():
            form.save()
            messages.success(request, 'Stock Has Been Added Successfully')
            return redirect('add_stock')

    else:
        import requests
        import json
        tickers = Stock.objects.all()
        context = {}


        symbols = ''
        ids = []
        for item in tickers:
            # print('item id = ',item.id)
            ids.append(item)
            symbols += str(item)+','
        symbols = symbols[:-1]
        # context['ids'] = ids
        output = []


        # create an account on rapidapi to use 
        url = "https://apidojo-yahoo-finance-v1.p.rapidapi.com/market/v2/get-quotes"

        # visit this site to look for ticker symbol - https://www.marketwatch.com/tools/quotes/lookup.asp

        # e.g ticker = 'amzn,aapl,goog,fb'
        querystring = {"region":"US","symbols":symbols}

        headers = {
            'x-rapidapi-key': API_KEY,
            'x-rapidapi-host': "apidojo-yahoo-finance-v1.p.rapidapi.com"
            }

        # call to the api
        response = requests.request("GET", url, headers=headers, params=querystring)

        err = ''

        try:
            length_of_ticker = len(symbols.split(','))
            # print(length_of_ticker)

            res = json.loads(response.content)
            # print('len of request = ', length_of_ticker, '------len of response = ', len(res['quoteResponse']['result']))
            if length_of_ticker > len(res['quoteResponse']['result']):
                messages.success(request, 'Invalid input deleting this stock')
                return delete(request, ids[len(ids)-1].id)
            else:
                # print('length = ', len(res['quoteResponse']['result']))
                if len(res['quoteResponse']['result']) > 0 :
                    # print('i am here\n')
                    # list to store key value pair company wise
                    # tickers = []
                    # retriving important results from the api call response
                    for i in range(0,length_of_ticker):
                        temp = {}
                        temp['symbol'] = res['quoteResponse']['result'][i].get('symbol')
                        temp['company'] = res['quoteResponse']['result'][i].get('longName')
                        temp['quoteType'] = res['quoteResponse']['result'][i].get('quoteType')
                        temp['currency'] = res['quoteResponse']['result'][i].get('currency')
                        temp['stockPrice'] = res['quoteResponse']['result'][i].get('regularMarketPrice')
                        temp['previousClose'] = res['quoteResponse']['result'][i].get('regularMarketPreviousClose')
                        temp['bidSize'] = res['quoteResponse']['result'][i].get('bidSize')
                        temp['askSize'] = res['quoteResponse']['result'][i].get('askSize')
                        temp['trailingPE'] = res['quoteResponse']['result'][i].get('trailingPE')
                        temp['forwardPE'] = res['quoteResponse']['result'][i].get('forwardPE')
                        # temp['marketCap'] = res['quoteResponse']['result'][i].get('marketCap')
                        temp['marketCap'] = numerize.numerize(res['quoteResponse']['result'][i].get('marketCap'),4 )
                        temp['fiftyTwoWeekLow'] = res['quoteResponse']['result'][i].get('fiftyTwoWeekLow')
                        temp['fiftyTwoWeekHigh'] = res['quoteResponse']['result'][i].get('fiftyTwoWeekHigh')
                        temp['fiftyTwoWeekRange'] = res['quoteResponse']['result'][i].get('fiftyTwoWeekRange')
                        temp['floatShares'] = res['quoteResponse']['result'][i].get('floatShares')
                        temp['marketVolume'] = res['quoteResponse']['result'][i].get('regularMarketVolume')
                        temp['priceToSales'] = res['quoteResponse']['result'][i].get('priceToSales')
                        # temp['revenue'] = res['quoteResponse']['result'][i].get('revenue')
                        temp['revenue'] = numerize.numerize(res['quoteResponse']['result'][i].get('revenue'),4 )
                        temp['pegRation'] = res['quoteResponse']['result'][i].get('pegRation')
                        temp['epsTrailingTwelveMonths'] = res['quoteResponse']['result'][i].get('epsTrailingTwelveMonths')
                        output.append(temp)

                    # to check output in console
                    # for elements in output:
                        # for key, val in elements.items():
                            # print(key,'->', val)
                        # print()

                    # context['tickers'] = tickers

                else:
                    print('in the error\n')
                    # context['tickers'] = 'error...'
                    # output.append({'error':'error...'})
                    err = 'error...'

        except Exception as e:
            print('exception--->',e)
            # context['tickers'] = 'error...'
            # output.append({'error':'error...'})
            err = 'error...'
        

        zipped_data = zip(ids,output)
        return render(request, 'add_stock.html', {'zipped_data':zipped_data, 'tickers':tickers, 'output':output, 'err':err})
Exemplo n.º 14
0
def get_repository_info(owner, name):
    """
    Get the relevant information needed for the repository from
    its owner login and name.
    """

    LOGGER.info("Getting info for %s/%s", owner, name)

    access_token = getenv('GITHUB_ACCESS_TOKEN')
    if not access_token:
        raise AssertionError(
            'Access token not present in the env variable `GITHUB_ACCESS_TOKEN`'
        )

    # create a logged in GitHub client
    client = login(token=access_token)

    info = {}

    # get the repository; if the repo is not found, raise an error
    try:
        repository = client.repository(owner, name)

        good_first_issues = list(
            repository.issues(
                labels=ISSUE_LABELS,
                state=ISSUE_STATE,
                number=ISSUE_LIMIT,
                sort=ISSUE_SORT,
                direction=ISSUE_SORT_DIRECTION,
            ))
        LOGGER.info('\t found %d good first issues', len(good_first_issues))
        # check if repo has at least one good first issue
        if good_first_issues:
            # store the repo info
            info["name"] = name
            info["owner"] = owner
            info["language"] = repository.language
            info["url"] = repository.html_url
            info["stars"] = repository.stargazers_count
            info["stars_display"] = numerize.numerize(
                repository.stargazers_count)
            info["last_modified"] = repository.last_modified
            info["id"] = str(repository.id)
            info["objectID"] = str(repository.id)  # for indexing on algolia

            # get the latest issues with the tag
            issues = []
            for issue in good_first_issues:
                issues.append({
                    "title": issue.title,
                    "url": issue.html_url,
                    "number": issue.number,
                    "created_at": issue.created_at.isoformat()
                })

            info["issues"] = issues
            return info
        LOGGER.info('\t skipping the repo')
        return None
    except exceptions.NotFoundError:
        raise RepoNotFoundException()
Exemplo n.º 15
0
def diplayOnGauge(maxSquareKm):
    return numerize(maxSquareKm[0]), numerize(maxSquareKm[1])
Exemplo n.º 16
0
    re_char='')

svalidloader, svalidset, svalidset_list, svalid_config = loader_multi.load_multi_dataset(
    specific_dataset_root,
    mode='valid',
    batch_size=BATCH_SIZE,
    num_workers=NUM_WORKERS,
    img_size=IMG_SIZE,
    grayscale=GRAYSCALE,
    character=CHARACTER,
    re_char='')

# ======================================= info ==============================================

print()
print('total specific trainset:', numerize(len(strainset)))
for idx in range(len(strainset_list)):
    print(numerize(len(strainset_list[idx])), '-',
          str(strainset_list[idx].__class__).split('.')[-1])

print()
print('total specific validset:', numerize(len(svalidset)))
for idx in range(len(svalidset_list)):
    print(numerize(len(svalidset_list[idx])), '-',
          str(svalidset_list[idx].__class__).split('.')[-1])

# ======================================= info ==============================================

# ======================================= model preparation ==============================================

mlflow.set_tracking_uri("http://localhost:54849")
    st.title("Exploring ITZY's Spotify Data")
    st.markdown('<div style="color: transparent;">.</div>',unsafe_allow_html=True) # space #
    st.markdown('<div style="color: transparent;">.</div>',unsafe_allow_html=True) # space #
    
    st.markdown('The data we used for our exploratory data analysis (EDA) were scraped through Spotify API and are the daily top 200  most streamed tracks from January 1, 2017 to January 15, 2021. We then focused our analysis on K-Pop girl groups and on ITZY.',unsafe_allow_html=True)
    st.markdown('<div style="color: transparent;">.</div>',unsafe_allow_html=True) # space #
    st.markdown('<div style="color: transparent;">.</div>',unsafe_allow_html=True) # space #
    
    st.markdown('<div style="font-size: 25px;font-weight: bold;">ITZY is the 5th most streamed K-Pop girl group</div>',unsafe_allow_html=True)
    st.markdown('<div style="color: gray; font-size: 18px;">With 23M total streams for their charting songs</div>',unsafe_allow_html=True)
    top5 = df1.groupby('artist')[['streams']].sum().sort_values(by="streams", ascending=False).head(5).reset_index()
    top5.sort_values(by="streams", ascending=True, inplace=True)
    
    fig = px.bar(top5, x="streams", y="artist", orientation='h', height=350,
                 text=top5["streams"].apply(lambda x: numerize.numerize(x)))
    
    colors = ['#E0B336'] * 5
    colors[0] = '#B88F89'

    fig.update_traces(marker=dict(color=colors), textposition='outside',
                      textfont=dict(size=14, color=colors), width = 0.65)

    fig.update_layout(plot_bgcolor='rgba(0,0,0,0)', paper_bgcolor='rgba(0,0,0,0)', hovermode=False,
                      xaxis = {'title': 'Total Streams for Charting Songs', 'range': [0,300000000],
                               'showline':True, 'linecolor':'#999999', 'tickfont':dict(color= '#999999'),
                               'showgrid' : False,'fixedrange':True,'zeroline': False,
                               'titlefont' : dict(color = "#999999", size = 16)},
                      yaxis = {'title': '', 'showgrid' : False, 'fixedrange':True},
                      margin=dict(l=0, r=0, b=15, t=25, pad=15), font=dict(size=14)
                     )
Exemplo n.º 18
0
                        html.P([value],
                               className=
                               "is-size-4 has-text-info has-text-weight-bold",
                               id=id)
                    ],
                             className="column") for label, value, id, icon in
                    [(u"Max Km\u00B2", 0, "display_min_square_km",
                      "fas fa-ruler"),
                     (u"Max Km\u00B2", 0, "display_max_square_km",
                      "fas fa-ruler"),
                     (u"Min Ab/Km\u00B2", 0, "display_min_density",
                      "fas fa-users"),
                     (u"Max Ab/Km\u00B2", 0, "display_max_density",
                      "fas fa-users"),
                     ("Total vaccinated",
                      numerize(int(geo_df["totale"].sum())), "",
                      "fas fa-syringe"),
                     ("Percent Vacinnated",
                      f'{round((100*int(geo_df["totale"].sum()))/int(geo_df["totale_abitanti"].sum()), 2)}%',
                      "", "fas fa-percentage"),
                     ("Last update", lasDate, "", "far fa-calendar-alt")]
                ],
                         className="columns")
            ],
                     className="box"),
            dcc.Markdown('''
                Example of code
                ```py
                import pandas as pd
                import geopandas as gpd
Exemplo n.º 19
0
    img_size=IMG_SIZE, grayscale=GRAYSCALE,
    character=CHARACTER, re_char=''
)

svalidloader, svalidset, svalidset_list, svalid_config = loader_multi.load_multi_dataset(
    specific_dataset_root, mode='valid', 
    batch_size=BATCH_SIZE, num_workers=NUM_WORKERS,
    img_size=IMG_SIZE, grayscale=GRAYSCALE,
    character=CHARACTER, re_char=''
)



# ======================================= info ==============================================
print()
print('total general trainset:', numerize(len(gtrainset)))
for idx in range(len(gtrainset_list)):
    print(numerize(len(gtrainset_list[idx])), '-',str(gtrainset_list[idx].__class__).split('.')[-1], ' -- ', gtrainset_list[idx].root)
# print('total general trainset:', numerize(len(gtrainset)))
    
print()
print('total general validset:', numerize(len(gvalidset)))
for idx in range(len(gvalidset_list)):
    print(numerize(len(gvalidset_list[idx])), '-',str(gvalidset_list[idx].__class__).split('.')[-1], ' -- ', gvalidset_list[idx].root)
# print('total general validset:', numerize(len(gvalidset)))

print()
print('total intermediate trainset:', numerize(len(itrainset)))
for idx in range(len(itrainset_list)):
    print(numerize(len(itrainset_list[idx])), '-',str(itrainset_list[idx].__class__).split('.')[-1], ' -- ', itrainset_list[idx].root)
# print('total general trainset:', numerize(len(gtrainset)))
Exemplo n.º 20
0
def diplayOnGauge(maxDensity):
    return numerize(maxDensity[0]), numerize(maxDensity[1])
Exemplo n.º 21
0
# d[q] = 1
#
# for key, value in d.items():
#     print(key, ':', value)

from numerize import numerize

user = {
    'username': '******',
    'online': True,
    'email': '*****@*****.**',
    'rating': int(input('Enter rating: ')),
    'friends': ['Marry888', 'Candy001', 'Peter99']
}

r = numerize.numerize(user['rating'])
if user["rating"] > 1:
    print('Our user has ', r, ' Likes')
elif user["rating"] == 1:
    print('Our user has ', r, ' Like')
elif user["rating"] == 0:
    print('Our user has no Like')
else:
    print('Pls enter correct value!')

while True:
    add_friend = str(input("Enter friend name: "))
    user["friends"].append(add_friend)
    print(type(add_friend))
    if add_friend == str:
        print('Name OK')
Exemplo n.º 22
0
async def _(event):
    url = event.pattern_match.group(1).decode("UTF-8")
    lets_split = url.split("_", maxsplit=1)
    vid_id = lets_split[1].split(":")[0]
    link = _yt_base_url + vid_id
    format = url.split(":")[1]
    if lets_split[0] == "audio":
        opts = {
            "format": str(format),
            "addmetadata": True,
            "key": "FFmpegMetadata",
            "prefer_ffmpeg": True,
            "geo_bypass": True,
            "outtmpl": "%(id)s.mp3",
            "quiet": True,
            "logtostderr": False,
        }
        ytdl_data = await dler(event, link)
        YoutubeDL(opts).download([link])
        title = ytdl_data["title"]
        artist = ytdl_data["uploader"]
        views = numerize.numerize(ytdl_data["view_count"])
        urlretrieve(f"https://i.ytimg.com/vi/{vid_id}/hqdefault.jpg",
                    f"{title}.jpg")
        thumb = f"{title}.jpg"
        duration = ytdl_data["duration"]
        os.rename(f"{ytdl_data['id']}.mp3", f"{title}.mp3")
        c_time = time.time()
        file = await uploader(f"{title}.mp3", f"{title}.mp3", c_time, event,
                              "Uploading " + title + "...")
        attributes = [
            DocumentAttributeAudio(
                duration=int(duration),
                title=title,
                performer=artist,
            ),
        ]
    elif lets_split[0] == "video":
        opts = {
            "format": str(format),
            "addmetadata": True,
            "key": "FFmpegMetadata",
            "prefer_ffmpeg": True,
            "geo_bypass": True,
            "outtmpl": "%(id)s.mp4",
            "logtostderr": False,
            "quiet": True,
        }
        ytdl_data = await dler(event, link)
        YoutubeDL(opts).download([link])
        title = ytdl_data["title"]
        artist = ytdl_data["uploader"]
        views = numerize.numerize(ytdl_data["view_count"])
        urlretrieve(f"https://i.ytimg.com/vi/{vid_id}/hqdefault.jpg",
                    f"{title}.jpg")
        thumb = f"{title}.jpg"
        duration = ytdl_data["duration"]
        try:
            os.rename(f"{ytdl_data['id']}.mp4", f"{title}.mp4")
        except FileNotFoundError:
            try:
                os.rename(f"{ytdl_data['id']}.mkv", f"{title}.mp4")
            except FileNotFoundError:
                os.rename(f"{ytdl_data['id']}.webm", f"{title}.mp4")
        except Exception as ex:
            return await event.edit(str(ex))
        wi, _ = await bash(f'mediainfo "{title}.mp4" | grep "Width"')
        hi, _ = await bash(f'mediainfo "{title}.mp4" | grep "Height"')
        c_time = time.time()
        file = await uploader(f"{title}.mp4", f"{title}.mp4", c_time, event,
                              "Uploading " + title + "...")
        attributes = [
            DocumentAttributeVideo(
                duration=int(duration),
                w=int(wi.split(":")[1].split()[0]),
                h=int(hi.split(":")[1].split()[0]),
                supports_streaming=True,
            ),
        ]
    text = f"**Title:** `{title}`\n"
    text += f"**Duration:** `{time_formatter(int(duration)*1000)}`\n"
    text += f"**Views:** `{views}`\n"
    text += f"**Artist:** `{artist}`\n\n"
    await event.edit(
        text,
        file=file,
        buttons=Button.switch_inline("Search More",
                                     query="yt ",
                                     same_peer=True),
        attributes=attributes,
        thumb=thumb,
    )
    os.system(f'rm "{title}"*')
Exemplo n.º 23
0
def analyzer(stock):
    """Gathers all the necessary details."""
    global count_404, printed
    try:
        info = Ticker(stock).info
    except (ValueError, KeyError, IndexError):
        info = None
        logger.info(f'Unable to analyze {stock}')
    except HTTPError as err:
        info = None
        # A 503 response or 50% 404 response with only 20% processed requests indicates an IP range denial
        if err.code == 503 or (count_404 > 50 * overall / 100
                               and len(stock_map) < 20 * overall / 100):
            print(f'\nNoticing repeated 404s, which indicates an IP range denial by {"/".join(err.url.split("/")[:3])}'
                  '\nPlease wait for a while before re-running this code. '
                  'Also, reduce number of max_workers in concurrency and consider switching to a new Network ID.') if \
                not printed else None
            printed = True  # makes sure the print statement happens only once
            ThreadPoolExecutor().shutdown(
            )  # stop future threads to avoid progress bar on screen post print
            raise ConnectionRefusedError  # handle exception so that spreadsheet is created with existing stock_map dict
        elif err.code == 404:
            count_404 += 1  # increases count_404 for future handling
            logger.info(
                f'Failed to analyze {stock}. Faced error code {err.code} while requesting {err.url}. '
                f'Reason: {err.reason}.')
        else:
            logger.info(
                f'Failed to analyze {stock}. Faced error code {err.code} while requesting {err.url}. '
                f'Reason: {err.reason}.')
    if info:
        stock_name = info['shortName'] if 'shortName' in info.keys(
        ) and info['shortName'] else None
        capital = numerize.numerize(
            info['marketCap']
        ) if 'marketCap' in info.keys() and info['marketCap'] else None
        dividend_yield = make_float(
            info['dividendYield']) if 'dividendYield' in info.keys() else None
        pe_ratio = make_float(
            info['forwardPE']) if 'forwardPE' in info.keys() else None
        pb_ratio = make_float(
            info['priceToBook']) if 'priceToBook' in info.keys() else None
        price = make_float(info['ask']) if 'ask' in info.keys() else None
        today_high = make_float(
            info['dayHigh']) if 'dayHigh' in info.keys() else None
        today_low = make_float(
            info['dayLow']) if 'dayLow' in info.keys() else None
        high_52_weeks = make_float(
            info['fiftyTwoWeekHigh']) if 'fiftyTwoWeekHigh' in info.keys(
            ) else None
        low_52_weeks = make_float(
            info['fiftyTwoWeekLow']) if 'fiftyTwoWeekLow' in info.keys(
            ) else None
        d_yield_5y = make_float(
            info['fiveYearAvgDividendYield']
        ) if 'fiveYearAvgDividendYield' in info.keys() else None
        profit_margin = make_float(
            info['profitMargins']) if 'profitMargins' in info.keys() else None
        industry = info['industry'] if 'industry' in info.keys() else None
        employees = numerize.numerize(info['fullTimeEmployees']) if 'fullTimeEmployees' in info.keys() and \
                                                                    info['fullTimeEmployees'] else None

        result = stock_name, capital, dividend_yield, pe_ratio, pb_ratio, price, today_high, today_low, \
            high_52_weeks, low_52_weeks, d_yield_5y, profit_margin, industry, employees
        stock_map.update({stock: result})