Esempio n. 1
0
def problem_edit_view(request, **kwargs):
    problem = get_object_or_404(Problem, pk=kwargs['pk'])
    return render(request, 'patients_app/problems/problem_form.html', {
        'problem': problem,
        'onset_date': date_to_str(problem.date_onset),
        'diagnosis_date': date_to_str(problem.date_diagnosis),
        'method': 'PATCH',
    })
Esempio n. 2
0
 def pass_tostr(pass_data, sig_freq_hz=1000):
     if pass_data is not None:
         return "%s (%3d azim, %+2.2f kHz) -> %s (%2.0f deg) -> %s (%3d azim, %+2.2f kHz)" % (
             utils.date_to_str(
                 pass_data["rise_time"]), pass_data["rise_azimuth"],
             (sig_freq_hz / 1000.0) * pass_data["rise_doppler_factor"],
             utils.date_to_str(pass_data["max_alt_time"]),
             pass_data["max_alt"], utils.date_to_str(
                 pass_data["set_time"]), pass_data["set_azimuth"],
             (sig_freq_hz / 1000.0) * pass_data["set_doppler_factor"])
     else:
         return None
Esempio n. 3
0
def medication_edit_view(request, **kwargs):
    medication = get_object_or_404(Medication, pk=kwargs['pk'])
    return render(request, 'patients_app/medications/med_form.html', {
        'medication': medication,
        'date_prescribed': date_to_str(medication.date_prescribed),
        'date_started_taking': date_to_str(medication.date_started_taking),
        'date_stopped_taking,': date_to_str(medication.date_stopped_taking),
        'dispense_quantity': num_to_str(medication.dispense_quantity),
        'dosage_quantity': num_to_str(medication.dosage_quantity),
        'number_refills': num_to_str(medication.number_refills),
        'method': 'PATCH',
    })
Esempio n. 4
0
    def create_xbrl_url_json(self):
        result_dict = {}
        target_date = utils.str_to_date(settings.since)

        count = 0

        while True:
            response_string = self.get_link_info_str(utils.date_to_str(target_date))
            target_list = self.__json_parse(response_string)
            if not target_list:
                target_date = target_date + timedelta(days=1)
                continue

            info_dict = self.get_link(target_list)
            result_dict.update(info_dict)

            time.sleep(1)
            target_date = target_date + timedelta(days=1)
            count += 1
            logger.info(f'action=create_xbrl_url_json count={count}')

            if target_date >= utils.str_to_date(settings.until):
                break

        return result_dict
Esempio n. 5
0
 def to_dict(self):
     return {
         'id': self.id,
         'date': date_to_str(self.date),
         'name': self.name,
         'raid_id': self.raid.id,
     }
Esempio n. 6
0
async def random_comic() -> Response:
    """Serve a random comic."""
    first = str_to_date(FIRST_COMIC)
    latest = curr_date()
    rand_date = date_to_str(random.uniform(first, latest))  # type: ignore
    # If there is no comic for this date yet, "dilbert.com" will auto-redirect
    # to the latest comic.
    return redirect(f"/{rand_date}")
Esempio n. 7
0
async def _serve_template(date: str, data: dict, latest_comic: str) -> str:
    """Serve the HTML given scraped data.

    Both input dates must be in the format used by "dilbert.com".

    Args:
        date: The (possibly corrected) date of the comic
        data: The scraped comic data
        latest_comic: The date of the latest comic

    Returns:
        The rendered template for the comic page

    """
    date_obj = str_to_date(date)

    # Links to previous and next comics
    previous_comic = date_to_str(
        max(str_to_date(FIRST_COMIC), date_obj - timedelta(days=1))
    )
    next_comic = date_to_str(
        min(str_to_date(latest_comic), date_obj + timedelta(days=1))
    )

    # Whether to disable left/right navigation buttons
    disable_left_nav = date == FIRST_COMIC
    disable_right_nav = date == latest_comic

    # Link to original strip on "dilbert.com"
    permalink = SRC_PREFIX + date

    return await render_template(
        "layout.html",
        data=data,
        date=date,
        first_comic=FIRST_COMIC,
        previous_comic=previous_comic,
        next_comic=next_comic,
        disable_left_nav=disable_left_nav,
        disable_right_nav=disable_right_nav,
        permalink=permalink,
        repo=REPO,
    )
Esempio n. 8
0
async def latest_comic() -> str:
    """Serve the latest comic."""
    # If there is no comic for this date yet, "dilbert.com" will auto-redirect
    # to the latest comic.
    today = date_to_str(curr_date())

    # If there is no comic for this date yet, we still want to keep this as the
    # homepage, as a redirection would alter the URL, and lead to slower
    # loading.
    return await serve_comic(today, allow_redirect=False)
Esempio n. 9
0
    async def cache_data(self, data, date):
        """Cache the comic data into the database."""
        # The given date can be invalid (i.e. we may have been redirected to a
        # comic with a different date), hence get the correct date from the
        # scraped data.
        date = date_to_str(str_to_date(data["dateStr"], fmt=ALT_DATE_FMT))

        # This lock ensures that the no. of rows in the cache doesn't increase.
        # This can happen, as the code involves first clearing excess rows,
        # then adding a new row. Therefore, the following can increase the no.
        # of rows:
        #   1. Coroutine 1 clears excess rows
        #   2. Coroutine 2 clears no excess rows, as coroutine 1 did them
        #   3. Coroutine 1 adds its row
        #   4. Coroutine 2 adds its row
        async with Lock():
            try:
                await self._clean_cache()
            except Exception as ex:
                # This crash means that there can be some extra rows in the
                # cache. As the row limit is a little conservative, this should
                # not be a big issue.
                self.logger.error(f"Failed to clean cache: {ex}")
                self.logger.debug("", exc_info=True)

            date_obj = str_to_date(date)

            try:
                async with self.pool.acquire() as conn:
                    await conn.execute(
                        """INSERT INTO comic_cache (comic, img_url, title)
                        VALUES ($1, $2, $3);""",
                        date_obj,
                        data["imgURL"],
                        data["title"],
                    )
            except UniqueViolationError:
                # This comic date exists, so some other coroutine has already
                # cached this date in parallel. So we can simply update
                # `last_used` later (outside the lock).
                self.logger.warn(
                    f"Trying to cache date {date}, which is already cached."
                )
            else:
                return  # succeeded in caching data, so exit

        # This only executes if caching data led to a UniqueViolation error.
        # The lock isn't needed here, as this command cannot increase the no.
        # of rows in the cache.
        self.logger.info("Now trying to update `last_used` in cache.")
        async with self.pool.acquire() as conn:
            await conn.execute(
                "UPDATE comic_cache SET last_used = DEFAULT WHERE comic = $1;",
                date_obj,
            )
Esempio n. 10
0
def past_price(ticker_obj,
               days):  # past real days, automatically displays graph
    delta = int(round(days * (5 / 7)))
    delta = str(delta) + "d"
    hist = ticker_obj.history(period=delta)
    start_date = utils.date_to_str(hist.index[0].date())
    today = utils.date_to_str(datetime.date.today())

    close = list(hist['Close'])
    days_list = [i for i in range(len(close))]
    df = pd.DataFrame({"Price": close, "Days": days_list})

    plot = sb.lineplot(x="Days", y="Price", data=df)
    plot.set(xlabel=("{start} to {today} ~ {days} days"
                     ).format(start=utils.date_concat(start_date),
                              today=utils.date_concat(today),
                              days=days))
    plot.set(ylabel=str(ticker_obj.info['symbol'] + " Price"))
    plt.show()
    return
Esempio n. 11
0
 def option_data(self):  # returns dataframe of options info
     if not self.expiry:
         return ("Invalid option; err1")
     exp_date = utils.date_to_str(self.expiry)
     chain = self.obj.option_chain(exp_date)
     if self.put:
         chain = chain.puts
     else:
         chain = chain.calls
     option_data = chain[chain['strike'] == self.strike]
     return option_data
Esempio n. 12
0
 def default(self, obj):
     if isinstance(obj, datetime.datetime):
         # convert any datetime to RFC 1123 format
         return date_to_str(obj)
     elif isinstance(obj, (datetime.time, datetime.date)):
         # should not happen since the only supported date-like format
         # supported at dmain schema level is 'datetime' .
         return obj.isoformat()
     elif isinstance(obj, ObjectId):
         # BSON/Mongo ObjectId is rendered as a string
         return str(obj)
     return json.JSONEncoder.default(self, obj)
Esempio n. 13
0
 def on_bt_select_clicked(self, data=None):
     """
     Check user information and accept or reject the login
     """
     # Calendar widget holds month in zero based index 0..11
     y, m, d = self.calendar.get_date()
     date = datetime.date(y, m + 1, d)
     try:
         self.__entry.set_text(u.date_to_str(date))
         if self.__callback:
             self.__callback()
         self.window.destroy()
     except:
         self.message_error(_(u'Selecione a data corretamente'))
Esempio n. 14
0
async def serve_comic(
    date: str, allow_redirect: bool = True
) -> Union[str, Response]:
    """Serve the requested comic.

    Args:
        date: The date of the requested comic, in the format used by
            "dilbert.com"
        allow_redirect: If there is no comic found for this date, then
            whether to redirect to the correct date

    Returns:
        The rendered template for the comic page

    """
    # Execute both in parallel, as they are independent of each other
    data, latest_comic = await asyncio.gather(
        app.comic_scraper.get_data(date), app.latest_date_scraper.get_data(),
    )

    # This date differs from the input date if the input is invalid (i.e.
    # "dilbert.com" would redirect to a comic with a different date).
    actual_date_obj = str_to_date(data["dateStr"], fmt=ALT_DATE_FMT)
    actual_date = date_to_str(actual_date_obj)

    # Replicates the behaviour of "dilbert.com" by redirecting to the correct
    # date.
    if allow_redirect and actual_date != date:
        return redirect(f"/{actual_date}")

    # This will contain awaitables for caching data (if required) and rendering
    # the template. They are both independent of each other, and thus can be
    # run in parallel.
    todos = []

    # The date of the latest comic is often retrieved from the cache. If
    # "dilbert.com" has redirected to a date which is newer than the cached
    # value, then there is a new "latest comic". So cache the answer of
    # "dilbert.com".
    if str_to_date(latest_comic) < actual_date_obj:
        latest_comic = actual_date
        todos.append(app.latest_date_scraper.cache_data(actual_date))

    todos.append(_serve_template(actual_date, data, latest_comic))
    results = await asyncio.gather(*todos)
    return results[-1]  # this is the rendered template
Esempio n. 15
0
def check(update, context):
    user = get_user(update)
    if len(user.followed_issues) == 0:
        update.message.reply_text('Non segui nessun manga, aggiungine con /add_issue')
    elif user.last_check + CHECK_COOLDOWN <= time.time():
        locale.setlocale(locale.LC_TIME, user.language or 'it_IT')
        new_issues = spider.get_new_chapters(user.followed_issues, user.last_check)
        user.last_check = int(time.time())
        ud.save_user_data(user)

        if len(new_issues) == 0:
            update.message.reply_text('Non ci sono novità dall\'ultimo controllo')

        else:
            for i, issue in enumerate(new_issues):
                reply = [
                    f'<b>{issue}</b>'
                ]
                volumes = list(new_issues[issue].keys())
                if len(volumes) > 3:
                    volumes = volumes[0:3]
 
                for volume in volumes:
                    reply.append(volume)
                    chapters = new_issues[issue][volume]['chapters']
                    chapters.sort(key=lambda x: x['title'])

                    for chapter in chapters:
                        title = chapter['title']
                        url = chapter['url']
                        released = date_to_str(chapter['release'])

                        reply.append(f'&#9679; <a href="{url}">{title}</a> - <i>{released}</i>')

                    reply.append('\n')

                update.message.reply_html('\n'.join(reply))

                if i < len(new_issues) - 1:
                    time.sleep(1.0)

    else:
        update.message.reply_text('Si può effettuare un controllo ogni 15 minuti')
Esempio n. 16
0
    async def scrape_data(self):
        """Scrape the date of the latest comic from "dilbert.com"."""
        # If there is no comic for this date yet, "dilbert.com" will
        # auto-redirect to the latest comic.
        latest = date_to_str(curr_date())
        url = SRC_PREFIX + latest

        async with self.sess.get(url) as resp:
            self.logger.debug(f"Got response for latest date: {resp.status}")
            date = resp.url.path.split("/")[-1]

        # Check to see if the date is invalid
        try:
            str_to_date(date)
        except ValueError:
            raise ScrapingException(
                "Error in scraping the latest date from the URL")

        return date
Esempio n. 17
0
    async def get_cached_data(self):
        """Get the cached latest date from the database.

        If the latest date entry is stale (i.e. it was updated a long time
        back), or it wasn't found in the cache, None is returned.
        """
        async with self.pool.acquire() as conn:
            # The interval for "freshness" of the entry has to be given this
            # way instead of '$1 hours', because of PostgreSQL's syntax.
            # All dates managed by asyncpg are set to UTC.
            date = await conn.fetchval(
                """SELECT latest FROM latest_date
                WHERE last_check >= CURRENT_TIMESTAMP - INTERVAL '1 hour' * $1;
                """,
                LATEST_DATE_REFRESH,
            )

        if date is not None:
            # A "fresh" entry was found
            date = date_to_str(date)

        return date
Esempio n. 18
0
    async def get_cached_data(self, date):
        """Get the cached comic data from the database."""
        async with self.pool.acquire() as conn:
            # The other columns in the table are: `comic`, `last_used`. `comic`
            # is not required here, as we already have the date as a function
            # argument. In case the date given here is invalid (i.e. it would
            # redirect to a comic with a different date), we cannot retrieve
            # the correct date from the cache, as we aren't caching the mapping
            # of incorrect:correct dates. `last_used` will be updated later.
            row = await conn.fetchrow(
                "SELECT img_url, title FROM comic_cache WHERE comic = $1;",
                str_to_date(date),
            )

        if row is None:
            # This means that the comic for this date wasn't cached, or the
            # date is invalid (i.e. it would redirect to a comic with a
            # different date).
            return None

        data = {
            "title": row[1],
            "dateStr": date_to_str(str_to_date(date), fmt=ALT_DATE_FMT),
            "imgURL": row[0],
        }

        # Update `last_used`, so that this comic isn't accidently de-cached. We
        # want to keep the most recently used comics in the cache, and we are
        # currently using this comic.
        self.logger.info("Updating `last_used` for data in cache")
        async with self.pool.acquire() as conn:
            await conn.execute(
                "UPDATE comic_cache SET last_used = DEFAULT WHERE comic = $1;",
                str_to_date(date),
            )

        return data
Esempio n. 19
0
 def __init__(self, option):
     # assert(type(option) == Option)
     self.date_init = utils.date_to_str(dt.date.today)
     self.last_price = 0
Esempio n. 20
0
 def count_someDays_request(self,start_date,end_date):
     all_dates = utils.get_all_dates(start_date,end_date)
     for d in all_dates:
         print d
         self.count_daily_request(utils.date_to_str(d))
date_borders = date_range(min_date, max_date, delta_days=granularity_in_days)
date_borders.append(max_date)

# extract and clean data
current_timeseries = timseries_data[
    (timseries_data['currency'] == currency)
    & (timseries_data['category'] == category)
     # &  (timseries_data['departments_ids'].str.contains('29'))  # per vedere singolo dipartimento

    # &  (  timseries_data['departments_ids'].str.contains('29') | timseries_data['departments_ids'].str.contains('74')  | timseries_data['departments_ids'].str.contains('111') )  # per vedere piuù dipartimenti
    ]
current_timeseries.dropna(inplace=True)

# create aggregated timeseries
dates = [date_to_str(d) for d in date_borders[:-1]]
ys = []
ys_err = []

for i in range(len(date_borders) - 1):
    cur_date = date_borders[i]
    next_date = date_borders[i + 1]

    partial_timeseries = current_timeseries[(current_timeseries['date'] >= cur_date)
                                            & (current_timeseries['date'] <= next_date)]

    ys.append(partial_timeseries[kpi].mean())  # aggregate with mean
    # ys_err.append(partial_timeseries[kpi].std() / sqrt(partial_timeseries[kpi].count()) )
    # ys.append(partial_timeseries[kpi].sum()) # aggregate with sum

# stat analysis
Esempio n. 22
0
 def __str__(self):
     return "Examen Realizado el: %s" %date_to_str(self.fecha)
Esempio n. 23
0
 def fecha_(self):
     return  date_to_str(self.fecha)
def req_historical_data(bbg_identifier, startDate, endDate):

    # Recast start & end dates in Bloomberg's format
    startDate = date_to_str(startDate, "%Y%m%d")
    endDate = date_to_str(endDate, "%Y%m%d")

    if (pd.to_datetime(startDate) >= pd.to_datetime(endDate)):
        sys.exit(
            "in req_historical_data in 'bloomberg_functions.py': " + \
            "specified startDate is later than endDate!"
        )

    # First, check to see if there is already a local .p data file with the
    # data you need for bbg_identifier. If it's not there, create it.
    if not os.path.isdir("bbg_data"):
        os.makedirs("bbg_data")
        print("created the 'bbg_data' folder.")

    if (bbg_identifier + ".csv") in os.listdir("bbg_data"):
        old_bbg_data = pd.read_csv("bbg_data/" + bbg_identifier + ".csv")

        first_old = pd.to_datetime(min(old_bbg_data['Date'])).date()
        last_old = pd.to_datetime(max(old_bbg_data['Date'])).date()

        first_new = pd.to_datetime(startDate).date()
        last_new = pd.to_datetime(endDate).date()

        if first_old <= first_new and last_old >= last_new:
            # Don't need to make a query; have all data we need.
            histdata = old_bbg_data[[(pd.to_datetime(x).date() <= last_new) &
                                     (pd.to_datetime(x).date() >= first_new)
                                     for x in old_bbg_data['Date']]]
            histdata.reset_index(drop=True, inplace=True)
            return histdata

        if first_old > first_new and last_old < last_new:
            # do nothing for now, just requery the bigger dataset. Can refine
            # this case later.
            print(
                "overwriting old data with date range: " + startDate + \
                " to " + endDate
            )
        else:
            if first_new < first_old:
                endDate = date_to_str(first_old, "%Y%m%d")
            else:
                startDate = date_to_str(last_old, "%Y%m%d")

    print(startDate)

    options = parseCmdLine()

    # Fill SessionOptions
    sessionOptions = blpapi.SessionOptions()
    sessionOptions.setServerHost(options.host)
    sessionOptions.setServerPort(options.port)

    print("Connecting to %s:%s" % (options.host, options.port))
    # Create a Session
    session = blpapi.Session(sessionOptions)

    # Start a Session
    if not session.start():
        print("Failed to start session.")
        return

    try:
        # Open service to get historical data from
        if not session.openService("//blp/refdata"):
            print("Failed to open //blp/refdata")
            return

        # Obtain previously opened service
        refDataService = session.getService("//blp/refdata")

        # Create and fill the request for the historical data
        request = refDataService.createRequest("HistoricalDataRequest")
        request.getElement("securities").appendValue(bbg_identifier)
        request.getElement("fields").appendValue("OPEN")
        request.getElement("fields").appendValue("HIGH")
        request.getElement("fields").appendValue("LOW")
        request.getElement("fields").appendValue("PX_LAST")
        request.getElement("fields").appendValue("EQY_WEIGHTED_AVG_PX")
        request.set("periodicityAdjustment", "ACTUAL")
        request.set("periodicitySelection", "DAILY")
        request.set("startDate", startDate)
        request.set("endDate", endDate)
        request.set("maxDataPoints", 1400)  # Don't adjust please :)

        print("Sending Request:", request)
        # Send the request
        session.sendRequest(request)

        # Process received events
        while (True):
            # We provide timeout to give the chance for Ctrl+C handling:
            ev = session.nextEvent(500)
            for msg in ev:
                if str(msg.messageType()) == "HistoricalDataResponse":

                    histdata = []

                    for fd in msg.getElement("securityData").getElement(
                            "fieldData").values():
                        histdata.append([fd.getElementAsString("date"), \
                                         fd.getElementAsFloat("OPEN"),
                                         fd.getElementAsFloat(
                                             "HIGH"),
                                         fd.getElementAsFloat("LOW"), \
                                         fd.getElementAsFloat("PX_LAST"), \
                                         fd.getElementAsFloat(
                                             "EQY_WEIGHTED_AVG_PX")])

                    histdata = pd.DataFrame(histdata,
                                            columns=[
                                                "Date", "Open", "High", "Low",
                                                "Close", "VWAP"
                                            ])

            if ev.eventType() == blpapi.Event.RESPONSE:
                # Response completely received, so we could exit
                if 'old_bbg_data' in locals():
                    histdata = pd.concat([histdata, old_bbg_data], axis=0)
                    histdata = histdata.drop_duplicates('Date')
                    histdata = histdata.sort_values('Date')
                    histdata.reset_index(drop=True, inplace=True)

                pd.DataFrame.to_csv(histdata,
                                    "bbg_data/" + bbg_identifier + ".csv",
                                    index=False)

                return histdata
    finally:
        # Stop the session
        session.stop()