Exemplo n.º 1
0
    def __init__(self, datetime, timeframe, radars,
                 declutter, basedir, multiscandir, grid):
        """ Do some argument checking. """
        # Attributes
        self.datetime = datetime
        self.timeframe = timeframe
        self.radars = radars
        self.declutter = declutter
        self.basedir = basedir
        self.multiscandir = multiscandir
        self.grid = grid

        # Derived attributes
        self.timedelta = config.TIMEFRAME_DELTA[timeframe]
        self.code = self.CODE[self.timedelta]
        # Prevent illegal combinations of dt and dd, can be nicer...
        if self.code == '48uur':
            if datetime != datetime.replace(hour=8, minute=0,
                                            second=0, microsecond=0):
                raise ValueError
        if self.code == '24uur':
            if datetime != datetime.replace(hour=8, minute=0,
                                            second=0, microsecond=0):
                raise ValueError
        if self.code == 'uur':
            if datetime != datetime.replace(minute=0,
                                            second=0, microsecond=0):
                raise ValueError
        if self.code == '5min':
            if datetime != datetime.replace(second=0,
                                            microsecond=0,
                                            minute=(datetime.minute // 5) * 5):
                raise ValueError
        self.path = self.get_path()
Exemplo n.º 2
0
 def text_to_absolute_time(self):
   import time
   dtime = datetime.today()
   
   self.assertEquals(utils.text_to_absolute_time("3:32:08 AM"),
     datetime.replace(hour=3, minute=32, second=8))
   self.assertEquals(utils.text_to_absolute_time("3:32:08 p.m."),
     datetime.replace(hour=15, minute=32, second=8))
   self.assertEquals(utils.text_to_absolute_time("5:34 pm"),
     datetime.replace(hour=17, minute=34, second=0))
Exemplo n.º 3
0
def date(datetime, arg=None):
    from django.template.defaultfilters import date
    from django.utils import timezone

    if not timezone.is_aware(datetime):
        datetime = datetime.replace(tzinfo=timezone.utc)
    return date(datetime, arg)
Exemplo n.º 4
0
    def results_iter(self):
        """
        Returns an iterator over the results from executing this query.
        """
        resolve_columns = hasattr(self, 'resolve_columns')
        if resolve_columns:
            from django.db.models.fields import DateTimeField
            fields = [DateTimeField()]
        else:
            from django.db.backends.util import typecast_timestamp
            needs_string_cast = self.connection.features.needs_datetime_string_cast

        offset = len(self.query.extra_select)
        for rows in self.execute_sql(MULTI):
            for row in rows:
                datetime = row[offset]
                if resolve_columns:
                    datetime = self.resolve_columns(row, fields)[offset]
                elif needs_string_cast:
                    datetime = typecast_timestamp(str(datetime))
                # Datetimes are artifically returned in UTC on databases that
                # don't support time zone. Restore the zone used in the query.
                if settings.USE_TZ:
                    datetime = datetime.replace(tzinfo=None)
                    datetime = timezone.make_aware(datetime, self.query.tzinfo)
                yield datetime
Exemplo n.º 5
0
def str2datetime(s):
    parts = s.split('.')
    dt = datetime.strptime(parts[0], "%Y-%m-%d %H:%M:%S")
    
    if len(parts) == 1:
        return dt
    elif len(parts) == 2:
        return dt.replace(microsecond=int(parts[1]))
Exemplo n.º 6
0
def datetime_to_isoformat_timestr(datetime):
    try:
        datetime = datetime.replace(microsecond=0)
        pytz_obj = pytz.timezone(settings.TIME_ZONE)
        isoformat_timestr = pytz_obj.localize(datetime).isoformat()
        return isoformat_timestr
    except Exception as e:
        logger.error(e)
        return ''
Exemplo n.º 7
0
def replace_timezone(datetime, tz):
    """
    Change timezone without changing the time value.
    We cannot replace tzinfo directly, as it does not work properly.
    See http://pytz.sourceforge.net/#localized-times-and-date-arithmetic
    :param datetime: :class:`datetime.datetime` object.
    :param tz: :class:`pytz` timezone.
    :return: new :class:`datetime.datetime` object.
    """
    return tz.localize(datetime.replace(tzinfo=None))
Exemplo n.º 8
0
def datetime_to_isoformat_timestr(datetime):
    try:
        # This library only supports two ways of building a localized time.
        # The first is to use the localize() method provided by the pytz library.
        # This is used to localize a naive datetime (datetime with no timezone information):
        datetime = datetime.replace(microsecond=0)
        isoformat_timestr = current_timezone.localize(datetime).isoformat()
        return isoformat_timestr
    except Exception as e:
        logger.error(e)
        return ''
Exemplo n.º 9
0
 def adventure_from_geohash(self, adventurer, datetime):
     location = adventurer.location
     destination = self.geohash(location, datetime.date())
     mission = midgard.mgdschema.ttoa_mission()
     mission.type = 1
     mission.text = "Geohash for %s, %s" % (int(math.floor(destination.lat)), int(math.floor(destination.lon)))
     mission.pubDate = datetime
     mission.validDate = datetime.replace(hour=23, minute=59, second=59)
     mission.latitude = destination.lat
     mission.longitude = destination.lon
     mission.create()
     return self.adventure_from_mission(mission, adventurer)
Exemplo n.º 10
0
    def to_local_timezone(self, datetime):
        """Returns a datetime object converted to the local timezone.

        :param datetime:
            A ``datetime`` object.
        :returns:
            A ``datetime`` object normalized to a timezone.
        """
        if datetime.tzinfo is None:
            datetime = datetime.replace(tzinfo=pytz.UTC)

        return self.tzinfo.normalize(datetime.astimezone(self.tzinfo))
Exemplo n.º 11
0
def generate_data_range(iResidents, Dwell, iIrradianceThreshold, iRandomHouse, from_date, to_date):
    if to_date < from_date:
        raise ValueError("from_date > to_date!")
    days = []
    start_date = from_date
    while start_date < to_date:
        days.append(start_date)
        start_date = start_date + timedelta(days=1)
    first = True
    data = []
    for day in days:
        addHeader = False
        if first:
            addHeader = True
            first = False
        bWeekend = day.weekday() == 5 or day.weekday() == 6
        # Generate each day...
        ResultofOccupancySim = occsimread.OccupanceSim(iResidents, bWeekend)
        iMonth = day.month
        day_data = generate_date_single_day(
            Dwell, ResultofOccupancySim, bWeekend, iMonth, iIrradianceThreshold, iRandomHouse, addHeader
        )
        datetime = day
        datetime.replace(hour=0, minute=0, second=0)
        # copy day_data, but add timestamp
        if addHeader:
            first_row = day_data.pop(0)
            first_row.insert(0, "Timestamp")
            data.append(first_row)
        for row in day_data:
            if datetime < from_date or datetime > to_date:  # check for hour-range
                continue
            row.insert(0, datetime)
            data.append(row)
            datetime = datetime + timedelta(minutes=1)
        print("Generated %s" % day)
    return data
Exemplo n.º 12
0
    def shift_local(self, datetime, delta):
        """Moves a local datetime by a timedelta, taking into account DST.

        Args:
            datetime: An aware, local datetime.
            delta: A timedelta by which the datetime should be shifted.

        Returns:
            The result of moving datetime by 'delta' amount of local time.  When
            applying over timezone boundaries, the effect of DST will be
            compensated for (for example, midnight BST + 4 hours = 4am GMT).
        """
        naive = datetime.replace(tzinfo=None)
        new_naive = naive + delta
        return self.timezone.localize(new_naive)
Exemplo n.º 13
0
 def _get_value(self):
     if self._value_bool in (1, 0):
         return self._value_bool
     elif self._value_int != None:
         # this is only not a float when the field was not saved
         if type(self._value_int) is not int and self._value_int.is_integer():
             return int(self._value_int)
         return self._value_int
     elif self._value_datetime != None:
         if type(self._value_datetime).__name__ in ('str', 'unicode'):
             datetime = dateutil.parser.parse(self._value_datetime)
             return datetime.replace(tzinfo=None)
         else:
             return self._value_datetime
     else:
         return self._value_char
Exemplo n.º 14
0
def projectdata():
    '''Create a dictionary that contains data from individual firm-key:project name, value:project detail'''
    for key in data.keys():
        data[key].date = pd.to_datetime(data[key].date, format='%m/%d')
        data[key].date = data[key].date.apply(
            lambda dt: dt.replace(year=today.year))
        #Create working hour
        data[key]['wtime'] = data[key].time_end - data[key].time_start
        #data[key]['hwage'] = np.random.uniform(40,50,len(data[key].index))
        data[key]['dinditotal'] = data[key].wtime * data[key].hwage
        data[key]['construction'] = str(key)
        data[key] = data[key].round(2)
        wdata = {}
        wdata[key] = pd.pivot_table(data[key],
                                    values='dinditotal',
                                    index='name',
                                    columns='date')
        wdata[key] = wdata[key].fillna(0)
        wdata[key]['winditotal'] = wdata[key].sum(axis=1)
        wdata[key]['totalpay'] = wdata[key]['winditotal'].cumsum(axis=0)
    return data, wdata
    def results_iter(self):
        if self.connection.ops.oracle:
            from django.db.models.fields import DateTimeField
            fields = [DateTimeField()]
        else:
            needs_string_cast = self.connection.features.needs_datetime_string_cast

        offset = len(self.query.extra_select)
        for rows in self.execute_sql(MULTI):
            for row in rows:
                datetime = row[offset]
                if self.connection.ops.oracle:
                    datetime = self.resolve_columns(row, fields)[offset]
                elif needs_string_cast:
                    datetime = typecast_timestamp(str(datetime))
                # Datetimes are artifically returned in UTC on databases that
                # don't support time zone. Restore the zone used in the query.
                if settings.USE_TZ:
                    datetime = datetime.replace(tzinfo=None)
                    datetime = timezone.make_aware(datetime, self.query.tzinfo)
                yield datetime
Exemplo n.º 16
0
def parse_datetime(string: str):
    """Takes in a string of the format `dd/mm/yyyy|hh:mm` and returns a datetime object. Assumes that the time given is UTC."""
    if not string: return None
    date, time = string.split("|")
    day, month, year = date.split("/")
    hour, minutes = time.split(":")

    if not (day.isdigit() and month.isdigit() and year.isdigit() and hour.isdigit() and minutes.isdigit()):
        raise DateTimeParserError # If any component is not integer
    else:
        day = int(day)
        month = int(month)
        year = int(year)
        hour = int(hour)
        minutes = int(minutes)

    try:
        obj = datetime.replace(datetime.utcnow(), year, month, day, hour, minutes, 0, 0)
        return obj
    except ValueError:
        # The datetime information provided is out of the range. E.g. the day provided may be 40.
        raise DateTimeParserError
Exemplo n.º 17
0
def get_data(page, csv_save, printable):

    file = open(csv_save, 'w')
    headers = 'date, network, teams, scores\n'
    file.write(headers)

    page_soup = soup(page, 'html.parser')

    # tag - <div class="scorebox-wrapper'>
    scores = page_soup.find_all('div', {'class': 'scorebox-wrapper'})

    for score_data in scores:

        # tag - <span class="date"....>
        date = score_data.find('span', {'class': 'date'}).text

        # tag - <span class="network"....>
        network = score_data.find('span', {'class': 'network'}).text

        # tag - <p class="team-name">
        team1 = score_data.find_all('p', {'class': 'team-name'})[0].text
        team2 = score_data.find_all('p', {'class': 'team-name'})[1].text
        teams = team1 + ' vs. ' + team2

        score1 = score_data.find_all('p', {'class', 'total-score'})[0].text
        score2 = score_data.find_all('p', {'class', 'total-score'})[1].text
        scores = score1 + ' to ' + score2

        if printable == 'y':
            print('Date: ' + date + '\n' + 'Network: ' + network + '\n' +
                  'Teams: ' + teams + '\n' + 'Score: ' + scores + '\n')
        else:
            file.write(
                date.replace(',', '  /') + ',' + network + ',' + teams + ',' +
                scores + ' \n')

    file.close()
Exemplo n.º 18
0
def um_file_list(runid, startd, endd, freq):
    """
    Give a (thoretical) list of UM date format files between 2 dates.
    Assuming no missing dates.

    args
    ----
    runid: model runid
    startd: start date(date)
    endd: end date(date)
    freq:
    Specifies frequency of data according to PRECIS technical manual
    table D1
    http://www.metoffice.gov.uk/binaries/content/assets/mohippo/pdf/4/m/tech_man_v2.pdf#page=118
    Currently only supports:
    pa: Timeseries of daily data spanning 1 month (beginning 0z on the 1st day)
    pj: Timeseries of hourly data spanning 1 day (0z - 24z)
    pm: Monthly average data for 1 month
    Not currently supported: ps, px, p1, p2, p3, p4, mY

    returns
    -------
    filelist: list of strings giving the filenames

    Notes
    -----
    See below for examples

    >>> runid = 'akwss'
    >>> startd = datetime(1980, 9, 1)
    >>> endd = datetime(1980, 9, 3)
    >>> freq = 'pa'
    >>> print (um_file_list(runid, startd, endd, freq)) # doctest: +NORMALIZE_WHITESPACE
    ['akwssa.pai0910.pp', 'akwssa.pai0920.pp', 'akwssa.pai0930.pp']

    >>> startd = datetime(1980, 9, 1)
    >>> endd = datetime(1980, 12, 31)
    >>> freq = 'pm'
    >>> print (um_file_list(runid, startd, endd, freq)) # doctest: +NORMALIZE_WHITESPACE
    ['akwssa.pmi0sep.pp', 'akwssa.pmi0oct.pp',
     'akwssa.pmi0nov.pp', 'akwssa.pmi0dec.pp']

    """

    # list to store the output
    filelist = []

    dt = startd
    # check if the value of the start year is <= end year
    if dt.year > endd.year:
        raise ValueError("Start date {} must be <= end year {}".format(
            dt.year, endd.year))

    while dt <= endd:
        # Monthly frequency
        if freq == "pm":
            fname = "{}a.pm{}.pp".format(runid,
                                         convert_to_um_stamp(dt, "YYMMM"))
            # Add a month to the date
            if dt.month == 12:
                dt = dt.replace(year=dt.year + 1, month=1)
            else:
                dt = dt.replace(month=dt.month + 1)

        # Daily frequency
        elif freq == "pa":
            # build file name string
            fname = "{}a.pa{}.pp".format(runid,
                                         convert_to_um_stamp(dt, "YYMDH"))
            # add a day to the date
            dt = dt + timedelta(days=1)

        # Hourly frequency
        elif freq == "pj":
            # build file name string
            fname = "{}a.pj{}.pp".format(runid,
                                         convert_to_um_stamp(dt, "YYMDH"))
            # add a day to the date
            dt = dt + timedelta(days=1)

        # Not recognized frequency
        else:
            raise ValueError("Unsupported freq {} supplied.".format(freq))

        # Add to list
        filelist.append(fname)

    return filelist
def cut_to_hour(datetime):
	return datetime.replace(minute=0, second=0, microsecond=0)
Exemplo n.º 20
0
def dump_datetime(datetime):
    """Deserialize datetime object into int timestamp."""
    if datetime is None:
        return None
    return int(datetime.replace(tzinfo=timezone.utc).timestamp())
Exemplo n.º 21
0
def to_json(input_file_path, has_class=False):
    event_list = []
    activity_intervals = {}
    first_timestamp = None

    previous_activity_class = None
    current_activity_start = None

    with safe_open(input_file_path) as input_file:
        for line in input_file:
            tokens = line.strip().replace('\t', ' ').split(' ')
            tokens = [t for t in tokens if t != ' ' and t != '']

            # parse date
            datetime = parse_date(tokens)
            if not datetime:
                print("[ERROR] Date not correctly formatted: " + line +
                      ". Ignoring ...")
                #sys.exit(-1)
                continue
            datetime = datetime.replace(tzinfo=pytz.UTC)

            # parse and validate event type
            event_id = tokens[2]
            if event_id not in sensors.keys():
                print("[ERROR] Sensor id not correctly formatted: " + line)
                continue

            # parse and validate event value
            event = sensors[event_id]
            event_value = tokens[3]
            if event["value_type"] is DISCRETE:
                if event_value not in event["accepted_values"]:
                    print("[ERROR] Sensor value " + event_value + " of type " +
                          event['value_type'] + " not correctly formatted: " +
                          line + ". Ignoring ...")
                    #sys.exit(-1)
                    continue
            elif event["value_type"] is NUMERIC:
                # if float(event_value) < event["accepted_values"][0] or float(event_value) > event["accepted_values"][1]:
                #     print("[ERROR] Sensor value not correctly formatted: " + line)
                #     sys.exit(-1)
                if float(event_value) < event["accepted_values"][0]:
                    print("[ERROR] Sensor value " + event_value + " of type " +
                          event['value_type'] + " not correctly formatted: " +
                          line + ". Value less than min accepted value: " +
                          str(event["accepted_values"][0]) + ". Ignoring ...")
                    continue
                elif float(event_value) > event["accepted_values"][1]:
                    print("[ERROR] Sensor value " + event_value + " of type " +
                          event['value_type'] + " not correctly formatted: " +
                          line + ". Value greater than max accepted value: " +
                          str(event["accepted_values"][1]) + ". Ignoring ...")
                    continue
            else:
                print("[ERROR] Sensor value_type not correctly formatted: " +
                      line)
                print(tokens)
                #sys.exit(-1)
                continue

            # parse and validate event class
            # if has_class:
            #     if len(tokens) == 6:
            #         event_class = tokens[4]
            #         interval_tick = tokens[5]

            #         if event_class in activity_types:
            #             if not event_class in activity_intervals and interval_tick == "begin":
            #                 activity_intervals[event_class] = {
            #                     "start" : int(unix_time_millis(datetime))
            #                 }
            #             elif event_class in activity_intervals and interval_tick == "end":
            #                 activity_intervals[event_class].update({
            #                     "end" : int(unix_time_millis(datetime))
            #                 })
            print("event_class = " + tokens[4])
            event_class = int(tokens[4])

            if not previous_activity_class:
                current_activity_start = int(unix_time_millis(datetime))
                previous_activity_class = event_class
                first_timestamp = int(unix_time_millis(datetime))
            else:
                if event_class != previous_activity_class:
                    if previous_activity_class in activity_intervals:
                        activity_intervals[previous_activity_class].append({
                            "activity_type":
                            interweaved_activity_map[previous_activity_class],
                            "interval": {
                                "start": current_activity_start,
                                "end": int(unix_time_millis(datetime))
                            }
                        })
                    else:
                        activity_intervals[previous_activity_class] = \
                          [{
                            "activity_type": interweaved_activity_map[previous_activity_class],
                            "interval" : {"start" : current_activity_start, "end": int(unix_time_millis(datetime))}
                          }
                          ]

                    previous_activity_class = event_class
                    current_activity_start = int(unix_time_millis(datetime))

            event = create_json_event(event_id, event_value, datetime)
            # event = {
            #     "event": {
            #         "event_type": event_id,
            #         "event_info": {
            #             "value": event_value,
            #             "annotations": {
            #                 "startTime": str(datetime),
            #                 "endTime": str(datetime)
            #             }
            #         }
            #     }
            # }

            event_list.append(event)
    return event_list, activity_intervals, first_timestamp
Exemplo n.º 22
0
import os
import sys
stderr = sys.stderr
sys.stderr = open(os.devnull, 'w')
import tensorflow as tf
sys.stderr = stderr
tf.logging.set_verbosity(tf.logging.FATAL)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'


# docs.python.org/3/library/datetime.html
# stackoverflow.com/questions/22715086/scheduling-python-script-to-run-every-hour-accurately

while 1:
	dt = datetime.now()
	dt = dt.replace(hour=0,minute=59)

	# if it's sometime after midnight...but before 1 AM
	if datetime.now() < dt:

		inFile = pd.read_csv('stock_performance.csv')

		for x in range(0, len(inFile['stock_ticker'])):


			def plot_results_triple(stkFile, npFinal, file_to_save, picName, predicted_data, true_data, prediction_len):

				df2 = pd.read_csv(file_to_save)
				for i in npFinal:
					df2 = df2.append({'Close':i}, ignore_index=True)
Exemplo n.º 23
0
def combine_csv_data(*,
                     start_date,
                     end_date,
                     sampling_rate='MS',
                     obs_list,
                     data_path,
                     model_path,
                     day_of_month=1):
    """Read and combine observatory and model SV data for several locations.

    Calls read_csv_data to read observatory data and field model predictions
    for each observatory in a list. The data and predictions for individual
    observatories are combined into their respective large dataframes. The
    first column contains datetime objects and subsequent columns contain X, Y
    and Z secular variation/field components (in groups of three) for all
    observatories.

    Args:
        start_date (datetime.datetime): the start date of the data analysis.
        end_date (datetime.datetime): the end date of the analysis.
        sampling_rate (str): the sampling rate for the period of interest. The
            default is 'MS', which creates a range of dates between the
            specified values at monthly intervals with the day fixed as the
            first of each month. Use 'M' for the final day of each month. Other
            useful options are 'AS' (a series of dates at annual intervals,
            with the day and month fixed at 01 and January respectively) and
            'A' (as for 'AS' but with the day/month fixed as 31 December.)
        obs_list (list): list of observatory names (as 3-digit IAGA codes).
        data_path (str): path to the CSV files containing observatory data.
        model_path (str): path to the CSV files containing model SV data.
        day_of_month (int): For SV data, first differences of
            monthly means have dates at the start of the month (i.e. MF of
            mid-Feb minus MF of mid-Jan should give SV at Feb 1st. For annual
            differences of monthly means the MF of mid-Jan year 2 minus MF of
            mid-Jan year 1 gives SV at mid-July year 1. The dates of COV-OBS
            output default to the first day of the month (compatible with dates
            of monthly first differences SV data, but not with those of
            annual differences). This option is used to set the day part of the
            dates column if required. Default to 1 (all output dataframes
            will have dates set at the first day of the month.)

    Returns:
        (tuple): tuple containing:

        - obs_data (*pandas.DataFrame*):
            dataframe containing SV data for all observatories in obs_list.
        - model_sv_data (*pandas.DataFrame*):
            dataframe containing SV predictions for all observatories in
            obs_list.
        - model_mf_data (*pandas.DataFrame*):
            dataframe containing magnetic field predictions for all
            observatories in obs_list.
    """
    # Initialise the dataframe with the appropriate date range
    dates = pd.date_range(start_date, end_date, freq=sampling_rate)
    obs_data = pd.DataFrame({'date': dates})
    model_sv_data = pd.DataFrame({'date': dates})
    model_mf_data = pd.DataFrame({'date': dates})

    for observatory in obs_list:

        obs_file = observatory + '.csv'
        model_sv_file = 'sv_' + observatory + '.dat'
        model_mf_file = 'mf_' + observatory + '.dat'
        obs_data_temp = read_csv_data(fname=os.path.join(data_path, obs_file),
                                      data_type='sv')
        model_sv_data_temp = covobs_readfile(fname=os.path.join(
            model_path, model_sv_file),
                                             data_type='sv')
        model_mf_data_temp = covobs_readfile(fname=os.path.join(
            model_path, model_mf_file),
                                             data_type='mf')

        model_sv_data_temp['date'] = model_sv_data_temp['date'].apply(
            lambda dt: dt.replace(day=1))

        obs_data_temp.rename(columns={
            'dX': 'dX' + '_' + observatory,
            'dY': 'dY' + '_' + observatory,
            'dZ': 'dZ' + '_' + observatory
        },
                             inplace=True)
        obs_data_temp['date'] = obs_data_temp['date'].apply(
            lambda dt: dt.replace(day=1))
        model_sv_data_temp.rename(columns={
            'dX': 'dX' + '_' + observatory,
            'dY': 'dY' + '_' + observatory,
            'dZ': 'dZ' + '_' + observatory
        },
                                  inplace=True)
        model_mf_data_temp.rename(columns={
            'X': 'X' + '_' + observatory,
            'Y': 'Y' + '_' + observatory,
            'Z': 'Z' + '_' + observatory
        },
                                  inplace=True)
        # Combine the current observatory data with those of other
        # observatories
        if observatory == obs_list[0]:
            obs_data = pd.merge(left=obs_data,
                                right=obs_data_temp,
                                how='left',
                                on='date')
            model_sv_data = pd.merge(left=model_sv_data,
                                     right=model_sv_data_temp,
                                     how='left',
                                     on='date')
            model_mf_data = pd.merge(left=model_mf_data,
                                     right=model_mf_data_temp,
                                     how='left',
                                     on='date')

        else:
            obs_data = pd.merge(left=obs_data,
                                right=obs_data_temp,
                                how='left',
                                on='date')
            model_sv_data = pd.merge(left=model_sv_data,
                                     right=model_sv_data_temp,
                                     how='left',
                                     on='date')
            model_mf_data = pd.merge(left=model_mf_data,
                                     right=model_mf_data_temp,
                                     how='left',
                                     on='date')
    if day_of_month is not 1:
        model_sv_data['date'] = model_sv_data['date'].apply(
            lambda dt: dt.replace(day=day_of_month))
        model_mf_data['date'] = model_sv_data['date']
        obs_data['date'] = model_sv_data['date']
    return obs_data, model_sv_data, model_mf_data
Exemplo n.º 24
0
 def __set_datetime(self, datetime):
   if datetime.tzinfo is None:
     msg = 'gcdomain.Timestamp.datetime cannot be set to naive datetime [%s]' % datetime
     raise ValueError(msg)
   self.__datetime = datetime.replace(microsecond=0)
Exemplo n.º 25
0
def normalize_datetime(datetime, minutes=MINUTE_NORMALIZATION):
    minutes = (datetime.minute - (datetime.minute % minutes))
    normalized_datetime = datetime.replace(second=0, microsecond=0, minute=minutes)
    return normalized_datetime
Exemplo n.º 26
0
def strip_tz(datetime):
    """Strip the timezone for USE_TZ=False"""
    return datetime.replace(tzinfo=None)
Exemplo n.º 27
0
 def __init__(self, datetime, nanos=None):
   if datetime.tzinfo is None:
     raise ValueError('gcdomain.Timestamp cannot be created with naive datetime [%s]' % datetime)
   self.__datetime = datetime.replace(microsecond=0)
   self.nanos = None if nanos is None else int(nanos)
Exemplo n.º 28
0
def datetime_to_isoformat_timestr(datetime):
    datetime = datetime.replace(microsecond=0)
    pytz_obj = pytz.timezone(settings.TIME_ZONE)
    isoformat_timestr = pytz_obj.localize(datetime).isoformat()
    return isoformat_timestr
 def to_naive_utc(self, datetime, tz_name):
     tz = tz_name and pytz.timezone(tz_name) or pytz.UTC
     return tz.localize(datetime.replace(tzinfo=None),
                        is_dst=False).astimezone(
                            pytz.UTC).replace(tzinfo=None)
Exemplo n.º 30
0
def local_time(iso_timestamp, timezone=None):
    datetime = pd.to_datetime(iso_timestamp)
    if not datetime.tzinfo:
        datetime = datetime.replace(
            tzinfo=dateutil.tz.gettz('America/Los_Angeles'))
    return datetime.isoformat()
Exemplo n.º 31
0
 def _scalefkt(self, dt_val):
     return dt.replace(tzinfo=self._from_timezone).astimezone(
         self._to_timezone)
Exemplo n.º 32
0
def performance_as_whole(request,pk_id=None):
    global tweets_num
    global number
    global twitter_handler_count
    global retweets
    id = int(pk_id)
    user_data_id = None
    consumer_key = 'URCTNLThkHQAxCFLtMjWOnMlA'
    consumer_secret = 'lVQoh3ywFUeGSzMYkXjyA3g3kYHMpkrRpiX8ccfrvVmSKvJr7y'
    access_token = ''
    access_secret = ''
    Screen_Name = ''
    #id for Twitter Accounts user
    instance = get_object_or_404(Accounts_Data, account=id)
    print(instance)
    print(id)
    id_field = Accounts_Data.objects.get(account=id)
    print(id_field)
    if request.user.is_active:
        user_data_id = request.user.id
        print(user_data_id)
        user_data_name = request.user.username
        print(user_data_name)
    tokens = Accounts_Data.objects.filter(user_id=user_data_id)
    if id is None:
        messages.error(request,'Please select any account')
    print('IN THIS VIEW CHECKER****')
    print("Id of tokens",id)
    data = Accounts_Data.objects.get(account=id)
    access_token = data.Access_token
    access_secret = data.Access_secret
    check_point_keys = [ key.user_id for key in tokens]
    if len(check_point_keys)== 0 :
        messages.error(request, 'please Enter your credentials to go further')
        return redirect('/home_page/Account_id/%d/'%id)
    else:
        pass
    auth = OAuthHandler(consumer_key, consumer_secret)
    auth.set_access_token(access_token, access_secret)
    api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True, compression=True)
    user = api.me()
    print(user.screen_name)
    logging.info("begin")
    sleep(2.5)
    like_data = []
    object = FavouriteKeywords.objects.all()
    restricted_object = Res_Keywords.objects.filter(Account_id=id)
    data_to_watch = TweetsData.objects.all()
    # object = FavouriteKeywords.objects.all()
    object = Fav_Keywords.objects.filter(Account_id=id)
    check_point = [data.keyword.Fav_keywords for data in object]
    if len(check_point) == 0:
        messages.error(request,'please enter Favourite keywords')
        return redirect('/home_page/Account_id/%d/perform' % id)
    # black_list = Black_List_Names.objects.all()
    black_list_data = Black_user.objects.filter(Account_id=id)
    black_list_words = [words.black_list.block_users for words in black_list_data]
    print(black_list_words)
    follow = api.followers(screen_name=Screen_Name)
    following = [followers.screen_name for followers in follow]
    print(following)
    print (type(following))
    #getting time for liking tweets from database
    getting_time = Jobs.objects.filter(Account_id=id).latest('Job_id')
    print(getting_time)
    jobs_id = getting_time.Job_id
    print(jobs_id)
    initial_time = getting_time.initial_time
    final_time = getting_time.final_time
    time = str(initial_time)
    print('type of initial time',type(initial_time))
    time_2 = str(final_time)
    print('type of final time',type(final_time))
    PERIOD = (final_time - initial_time).total_seconds()
    print(PERIOD)
    if PERIOD<0:
        messages.error(request,'Please enter correct datetime')
        return redirect('/home_page/Account_id/%d/#fav_keywords/'%id)
    elif not PERIOD < 0:
        pass
    if request.is_ajax:
        block_users = Black_List_Names.objects.all()
        datetime_list = []
        for n in range(3):
            dt = initial_time + timedelta(seconds=randint(0, PERIOD))
            dt = dt.replace(tzinfo=None)
            print(dt)
            datetime_list.append(dt)
        datetime_list.sort()
        print(datetime_list)
        for date_time in datetime_list:
            date_time = (date_time)
            print(date_time)
            date_time = str(date_time)
            print(type(date_time))
            flag = True
            while flag:
                current_time_2 = datetime.now() + timedelta(hours=5)
                current_time_2 = current_time_2.strftime("%Y-%m-%d %H:%M:%S")
                current_time_2 = str(current_time_2)
                print ('current time', current_time_2)
                if str(current_time_2).__contains__(date_time):
                    for usi in object:
                        print (usi.keyword.Fav_keywords)
                        fav_keyword = usi.keyword.Fav_keywords
                        print (fav_keyword)
                        num = usi.num_limits
                        print(num)
                        list_words = [words.keyword.Restrited_keywords for words in restricted_object]
                        list_words = list_words
                        print(list_words)
                        tweets = tweepy.Cursor(api.search, q=fav_keyword, rpp=100, result_type='mixed', tweet_mode="extended").items(num)
                        for tweet in tweets:
                            try:
                                twitter_handler = tweet.user.screen_name
                                if twitter_handler in black_list_words:
                                    if not twitter_handler in follow:
                                        twitter_handler_count +=1
                                        continue
                                    if twitter_handler in follow:
                                        pass
                                        if ('RT @' in tweet.text):
                                            continue
                                if any(restrict_word in tweet.full_text for restrict_word in list_words):
                                    if not tweet.favorited:
                                        number +=1
                                        print('restricted words found ', tweet.full_text)
                                    if tweet.favorited:
                                        unlike = api.destroy_favorite(tweet.id)
                                        print ('==>>', unlike.text)
                                        continue
                                if not any(restrict_word in tweet.full_text for restrict_word in list_words):
                                    # if tweet.favorite_count >= 3:
                                    if ('RT @' in tweet.full_text):
                                        retweets +=1
                                        continue
                                    if not tweet.favorited:
                                        print(type(tweet.full_text))
                                        name = tweet.user.screen_name
                                        print(name)
                                        like = api.create_favorite(tweet.id)
                                        print (like)
                                        tweets_num +=1
                                        like_data.append(like)
                                        new_tweets = TweetsData(tweet_id=tweet.id, tweet_content=like.text,
                                                                      Name=name,
                                                                      timestamp=tweet.created_at, jobs= getting_time,Account_id=id_field)
                                        new_tweets.save()
                                        Tweet_content = TweetsData.objects.only('id').get(tweet_content=like.text).id
                                        print(Tweet_content)
                                        Tweet_like_content = TweetsData.objects.get(id=Tweet_content)
                                        like_tweets = LikeTweetsContent(Account_id=id_field, TweetContent = Tweet_like_content)
                                        like_tweets.save()
                                        flag = False
                                        if ('RT @' in tweet.full_text):
                                            continue
                                    if tweet.favorited:
                                        print('Tweet is already liked')
                                    # elif tweet.favorite_count < 3:
                                    #     print(tweet.id, 'You have less then 3 likes')
                                    #
                                    #     if ('RT @' in tweet.text):
                                    #         continue
                            except BaseException as e:
                                name = tweet.user.screen_name
                                print (str(e), tweet.id,)
                elif str(current_time_2) > (date_time):
                    break
                else:
                    print('current time===>', current_time_2, 'date time ====>', date_time)
        try:
            # if number >=1:
            #     data = number
            #     data = {
            #         'is_taken': data
            #     }
            #     if data['is_taken']:
            #         data['error_message'] = 'Restricted words found in %d Tweets'%number
            #         return JsonResponse(data)
            if number>=1:
                messages.error(request,'restricted words in %d tweets'%number)
                return redirect('/home_page/Like_Tweets/')
            if tweets_num or retweets >= 1:
                messages.info(request, '%dTweets Liked!%dRetweeted Tweets are not liked'%(tweets_num,retweets))
                return redirect('/home_page/Like_Tweets/')
        except BaseException as e:
            print(e)
    else:
        print('Not an Ajax')
    return render(request, 'test.html', {'tokens':tokens,'id':id})
Exemplo n.º 33
0
 def time_in_seconds(dt):
     start_of_day = dt.replace(hour=0, minute=0, second=0, microsecond=0)
     return int(dt.timestamp() - start_of_day.timestamp())
Exemplo n.º 34
0
def date(datetime, arg=None):
    from django.template.defaultfilters import date
    from django.utils import timezone
    if not timezone.is_aware(datetime):
        datetime = datetime.replace(tzinfo=timezone.utc)
    return date(datetime, arg)
Exemplo n.º 35
0
 def adjustDatetime(selfi, datetime):
     """adjust datetime to second level (ignore microsecond)"""
     return datetime.replace(microsecond=0)
Exemplo n.º 36
0
 def datetime(self, start_date='-30y', end_date='now', tzinfo=False):
     dt = self.generator.date_time_between(start_date, end_date)
     if tzinfo:
         dt = dt.replace(tzinfo=UTC)
     return dt
Exemplo n.º 37
0
 def datetime(self, start_date='-30y', end_date='now', tzinfo=False):
     dt = self.generator.date_time_between(start_date, end_date)
     if tzinfo:
         dt = dt.replace(tzinfo=UTC)
     return dt
Exemplo n.º 38
0
def timestamp_from_datetime(datetime):
    if datetime is None:
        return 0
    return int(datetime.replace(microsecond=0).timestamp() * 1000)
Exemplo n.º 39
0
 def __get_date_millseconds_eccluded(self, datetime):
     base_date = datetime.replace(microsecond=0)
     return base_date.timestamp()
Exemplo n.º 40
0
def to_tz(datetime, tz_name):
    tz = pytz.timezone(tz_name)
    return pytz.UTC.localize(datetime.replace(tzinfo=None),
                             is_dst=False).astimezone(tz).replace(tzinfo=None)
Exemplo n.º 41
0
def set_time_zone(datetime):		
	return datetime.replace(tzinfo=tzlocal())
Exemplo n.º 42
0
def to_tz(datetime, tz_name):
    tz = pytz.timezone(tz_name)
    return pytz.UTC.localize(datetime.replace(tzinfo=None), is_dst=False).astimezone(tz).replace(tzinfo=None)
Exemplo n.º 43
0
def datetime_convert_timezone(datetime, from_zone, to_zone):
    datetime = datetime.replace(tzinfo=from_zone)
    converted_datetime = datetime.astimezone(to_zone)
    converted_datetime = converted_datetime.replace(tzinfo=None)
    return converted_datetime
Exemplo n.º 44
0
from pathlib import PosixPath
from datetime import timezone
from fnmatch import fnmatchcase

from lars.apache import ApacheSource, COMMON, COMMON_VHOST, COMBINED

from .. import __version__, terminal, const, protocols, transport
from ..format import canonicalize_name

# Workaround: lars bug; User-Agent instead of User-agent
COMBINED = '%h %l %u %t "%r" %>s %b "%{Referer}i" "%{User-Agent}i"'
UTC = timezone.utc

get_package_name = lambda path: canonicalize_name(str(path).split('/')[2])
get_access_ip = lambda rh: str(rh)
get_access_time = lambda dt: dt.replace(tzinfo=UTC)
get_arch = lambda ud: ud.get('cpu')
get_distro_name = lambda ud: ud.get('distro', {}).get('name')
get_distro_version = lambda ud: ud.get('distro', {}).get('version')
get_os_name = lambda ud: ud.get('system', {}).get('name')
get_os_version = lambda ud: ud.get('system', {}).get('release')
get_py_name = lambda ud: ud.get('implementation', {
    'name': 'CPython'
}).get('name')
get_py_version = lambda ud: ud.get('implementation', {
    'version': ud.get('python')
}).get('version')
get_installer_name = lambda ud: ud.get('installer', {}).get('name')
get_installer_version = lambda ud: ud.get('installer', {}).get('version')
get_setuptools_version = lambda ud: ud.get('setuptools_version')
clean_page_name = lambda path: str(path).replace('/', '').replace('.html', '')
def utc_to_local(datetime):
    utc = datetime.replace(tzinfo=tz.tzutc())
    return utc.astimezone(tz.tzlocal())
Exemplo n.º 46
0
def datetime_to_isoformat_timestr(datetime):
    datetime = datetime.replace(microsecond=0)
    pytz_obj = pytz.timezone(settings.TIME_ZONE)
    isoformat_timestr = pytz_obj.localize(datetime).isoformat()
    return isoformat_timestr
Exemplo n.º 47
0
def to_naive_utc(datetime, record):
    tz_name = record._context.get('tz') or record.env.user.tz
    tz = tz_name and pytz.timezone(tz_name) or pytz.UTC
    return tz.localize(datetime.replace(tzinfo=None),
                       is_dst=False).astimezone(pytz.UTC).replace(tzinfo=None)
Exemplo n.º 48
0
 def adjustDatetime(selfi, datetime):
     """adjust datetime to second level (ignore microsecond)"""
     return datetime.replace(microsecond=0)
Exemplo n.º 49
0
def datetime_convert_timezone(datetime, from_zone, to_zone):
    datetime = datetime.replace(tzinfo=from_zone)
    converted_datetime = datetime.astimezone(to_zone)
    converted_datetime = converted_datetime.replace(tzinfo=None)
    return converted_datetime
Exemplo n.º 50
0
 def __cleanup_datetime(self, datetime):
     return datetime.replace(second=0, microsecond=0)
Exemplo n.º 51
0
def to_naive_utc(datetime, record):
    tz_name = record._context.get('tz') or record.env.user.tz
    tz = tz_name and pytz.timezone(tz_name) or pytz.UTC
    return tz.localize(datetime.replace(tzinfo=None), is_dst=False).astimezone(pytz.UTC).replace(tzinfo=None)
Exemplo n.º 52
0
 def fmtdatestr(datetime):
     return int(datetime.replace('-',''))
Exemplo n.º 53
0
def set_time_zone(datetime):
    return datetime.replace(tzinfo=tzlocal())
Exemplo n.º 54
0
 def month(cls, datetime):
   return datetime.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
Exemplo n.º 55
0
def normalize_datetime(datetime, minutes=MINUTE_NORMALIZATION):
    minutes = (datetime.minute - (datetime.minute % minutes))
    normalized_datetime = datetime.replace(second=0,
                                           microsecond=0,
                                           minute=minutes)
    return normalized_datetime
Exemplo n.º 56
0
def to_naive_utc(datetime):
    if datetime.utcoffset() is not None:
        return datetime.replace(tzinfo=None) - datetime.utcoffset()
        
    return datetime
Exemplo n.º 57
0
# 데이터 저장
df_msci_index.to_pickle('./Market_Watch_Data/investpy_msci.pkl')

########################################################################################################################
investpy_economic_calendar = pd.read_pickle(
    './Market_Watch_Data/investpy_economic_calendar_us_20000101_20220215.pkl')

investpy_PMI = investpy_economic_calendar[investpy_economic_calendar[
    'event'].str.contains("ISM Manufacturing PMI")].copy()
investpy_PMI["datetime_announced"] = pd.to_datetime(investpy_PMI["date"],
                                                    errors='coerce',
                                                    format="%d/%m/%Y")
investpy_PMI["datetime"] = investpy_PMI["datetime_announced"] + pd.DateOffset(
    months=-1)
investpy_PMI["datetime"] = investpy_PMI["datetime"].apply(
    lambda dt: dt.replace(day=1))

########################################################################################################################
investpy_snp500 = pd.read_pickle('./Market_Watch_Data/investpy_snp500.pkl')
investpy_kospi = pd.read_pickle('./Market_Watch_Data/investpy_kospi.pkl')
investpy_snp500["snp500"] = investpy_snp500["Close"]
investpy_kospi["KOSPI"] = investpy_kospi["Close"]

investpy_index = pd.merge(investpy_snp500[["snp500"]],
                          investpy_kospi[["KOSPI"]],
                          left_index=True,
                          right_index=True,
                          how='outer')
investpy_index["snp500"] = investpy_index["snp500"].fillna(method='ffill')
investpy_index["KOSPI"] = investpy_index["KOSPI"].fillna(method='ffill')
Exemplo n.º 58
0
 def day(cls, datetime):
   return datetime.replace(hour=0, minute=0, second=0, microsecond=0)