コード例 #1
0
 def apply(self, other):
     if isinstance(other, datetime):
         moved = 0
         remaining = self.n - moved
         bday, interval = self._custom_business_day_for(other, remaining)
         result = bday.apply(other)
         while not interval.left <= result <= interval.right:
             previous_other = other
             if result < interval.left:
                 other = interval.left
             elif result > interval.right:
                 other = interval.right
             else:
                 raise RuntimeError("Should not reach here")
             moved += self._moved(previous_other, other, bday)
             remaining = self.n - moved
             if remaining == 0:
                 break
             bday, interval = self._custom_business_day_for(other,
                                                            remaining,
                                                            is_edge=True)
             result = bday.apply(other)
         return result
     elif isinstance(other, timedelta) or isinstance(other, Tick):
         return BusinessDay(self.n,
                            offset=self.offset + other,
                            normalize=self.normalize)
     else:
         raise TypeError("Only know how to combine trading day with "
                         "datetime, datetime64 or timedelta.")
コード例 #2
0
def test_non_fixed_variable_window_indexer(closed, expected_data):
    index = date_range("2020", periods=10)
    df = DataFrame(range(10), index=index)
    offset = BusinessDay(1)
    indexer = VariableOffsetWindowIndexer(index=index, offset=offset)
    result = df.rolling(indexer, closed=closed).sum()
    expected = DataFrame(expected_data, index=index)
    tm.assert_frame_equal(result, expected)
コード例 #3
0
    def query(site):
        """Call which_fname_date but shorter."""
        # query_date = getDate.which_fname_date()
        weekend, query_date = False, False
        if date.today().weekday() in (5, 6):
            weekend = True

        if site in ('cboe', 'occ'):
            if getDate.time_cutoff(cutoff_hm=16.15) or weekend:
                query_date = (date.today() - BusinessDay(n=1)).date()
        elif site in ('sec_rss'):  # 6 am start
            if getDate.time_cutoff(cutoff_hm=6.0) or weekend:
                query_date = (date.today() - BusinessDay(n=1)).date()
        elif site in ('iex_close'):
            if weekend:
                query_date = (date.today() - BusinessDay(n=1)).date()
        elif site in ('mkt_open'):
            if getDate.time_cutoff(cutoff_hm=9.0) or weekend:
                query_date = (date.today() - BusinessDay(n=1)).date()
        elif site in ('iex_eod'):
            if getDate.time_cutoff(cutoff_hm=16.15) or weekend:
                query_date = (date.today() - BusinessDay(n=1)).date()
        elif site in ('sec_master'):
            if getDate.time_cutoff(cutoff_hm=22.35) or weekend:
                query_date = (date.today() - BusinessDay(n=1)).date()
        elif site in ('iex_previous'):
            if getDate.time_cutoff(cutoff_hm=5.50) or weekend:
                query_date = (date.today() - BusinessDay(n=2)).date()
            else:
                query_date = (date.today() - BusinessDay(n=1)).date()
        elif site in ('last_syms'):
            pass

        # If none of the prev time/weekend conditions apply
        if not query_date:
            query_date = (date.today() - BusinessDay(n=0)).date()

        return query_date
コード例 #4
0
def create_data():
    """ create the pickle/msgpack data """

    data = {
        'A': [0., 1., 2., 3., np.nan],
        'B': [0, 1, 0, 1, 0],
        'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
        'D': date_range('1/1/2009', periods=5),
        'E': [0., 1, Timestamp('20100101'), 'foo', 2.]
    }

    scalars = dict(timestamp=Timestamp('20130101'), period=Period('2012', 'M'))

    index = dict(int=Index(np.arange(10)),
                 date=date_range('20130101', periods=10),
                 period=period_range('2013-01-01', freq='M', periods=10),
                 float=Index(np.arange(10, dtype=np.float64)),
                 uint=Index(np.arange(10, dtype=np.uint64)),
                 timedelta=timedelta_range('00:00:00', freq='30T', periods=10))

    if _loose_version >= LooseVersion('0.18'):
        from pandas import RangeIndex
        index['range'] = RangeIndex(10)

    if _loose_version >= LooseVersion('0.21'):
        from pandas import interval_range
        index['interval'] = interval_range(0, periods=10)

    mi = dict(reg2=MultiIndex.from_tuples(tuple(
        zip(*[['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
              ['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']])),
                                          names=['first', 'second']))

    series = dict(float=Series(data['A']),
                  int=Series(data['B']),
                  mixed=Series(data['E']),
                  ts=Series(np.arange(10).astype(np.int64),
                            index=date_range('20130101', periods=10)),
                  mi=Series(np.arange(5).astype(np.float64),
                            index=MultiIndex.from_tuples(tuple(
                                zip(*[[1, 1, 2, 2, 2], [3, 4, 3, 4, 5]])),
                                                         names=['one',
                                                                'two'])),
                  dup=Series(np.arange(5).astype(np.float64),
                             index=['A', 'B', 'C', 'D', 'A']),
                  cat=Series(Categorical(['foo', 'bar', 'baz'])),
                  dt=Series(date_range('20130101', periods=5)),
                  dt_tz=Series(
                      date_range('20130101', periods=5, tz='US/Eastern')),
                  period=Series([Period('2000Q1')] * 5))

    mixed_dup_df = DataFrame(data)
    mixed_dup_df.columns = list("ABCDA")
    frame = dict(float=DataFrame({
        'A': series['float'],
        'B': series['float'] + 1
    }),
                 int=DataFrame({
                     'A': series['int'],
                     'B': series['int'] + 1
                 }),
                 mixed=DataFrame({k: data[k]
                                  for k in ['A', 'B', 'C', 'D']}),
                 mi=DataFrame(
                     {
                         'A': np.arange(5).astype(np.float64),
                         'B': np.arange(5).astype(np.int64)
                     },
                     index=MultiIndex.from_tuples(tuple(
                         zip(*[['bar', 'bar', 'baz', 'baz', 'baz'],
                               ['one', 'two', 'one', 'two', 'three']])),
                                                  names=['first', 'second'])),
                 dup=DataFrame(np.arange(15).reshape(5, 3).astype(np.float64),
                               columns=['A', 'B', 'A']),
                 cat_onecol=DataFrame({'A': Categorical(['foo', 'bar'])}),
                 cat_and_float=DataFrame({
                     'A':
                     Categorical(['foo', 'bar', 'baz']),
                     'B':
                     np.arange(3).astype(np.int64)
                 }),
                 mixed_dup=mixed_dup_df,
                 dt_mixed_tzs=DataFrame(
                     {
                         'A': Timestamp('20130102', tz='US/Eastern'),
                         'B': Timestamp('20130603', tz='CET')
                     },
                     index=range(5)),
                 dt_mixed2_tzs=DataFrame(
                     {
                         'A': Timestamp('20130102', tz='US/Eastern'),
                         'B': Timestamp('20130603', tz='CET'),
                         'C': Timestamp('20130603', tz='UTC')
                     },
                     index=range(5)))

    cat = dict(int8=Categorical(list('abcdefg')),
               int16=Categorical(np.arange(1000)),
               int32=Categorical(np.arange(10000)))

    timestamp = dict(normal=Timestamp('2011-01-01'),
                     nat=NaT,
                     tz=Timestamp('2011-01-01', tz='US/Eastern'))

    if _loose_version < LooseVersion('0.19.2'):
        timestamp['freq'] = Timestamp('2011-01-01', offset='D')
        timestamp['both'] = Timestamp('2011-01-01',
                                      tz='Asia/Tokyo',
                                      offset='M')
    else:
        timestamp['freq'] = Timestamp('2011-01-01', freq='D')
        timestamp['both'] = Timestamp('2011-01-01', tz='Asia/Tokyo', freq='M')

    off = {
        'DateOffset': DateOffset(years=1),
        'DateOffset_h_ns': DateOffset(hour=6, nanoseconds=5824),
        'BusinessDay': BusinessDay(offset=timedelta(seconds=9)),
        'BusinessHour': BusinessHour(normalize=True, n=6, end='15:14'),
        'CustomBusinessDay': CustomBusinessDay(weekmask='Mon Fri'),
        'SemiMonthBegin': SemiMonthBegin(day_of_month=9),
        'SemiMonthEnd': SemiMonthEnd(day_of_month=24),
        'MonthBegin': MonthBegin(1),
        'MonthEnd': MonthEnd(1),
        'QuarterBegin': QuarterBegin(1),
        'QuarterEnd': QuarterEnd(1),
        'Day': Day(1),
        'YearBegin': YearBegin(1),
        'YearEnd': YearEnd(1),
        'Week': Week(1),
        'Week_Tues': Week(2, normalize=False, weekday=1),
        'WeekOfMonth': WeekOfMonth(week=3, weekday=4),
        'LastWeekOfMonth': LastWeekOfMonth(n=1, weekday=3),
        'FY5253': FY5253(n=2, weekday=6, startingMonth=7, variation="last"),
        'Easter': Easter(),
        'Hour': Hour(1),
        'Minute': Minute(1)
    }

    return dict(series=series,
                frame=frame,
                index=index,
                scalars=scalars,
                mi=mi,
                sp_series=dict(float=_create_sp_series(),
                               ts=_create_sp_tsseries()),
                sp_frame=dict(float=_create_sp_frame()),
                cat=cat,
                timestamp=timestamp,
                offsets=off)
コード例 #5
0
def process_start(verbose=True):
    cols = [
        'LoanId', 'Status', 'LoanDate', 'FirstPaymentDate', 'ContractEndDate',
        'ReportAsOfEOD', 'DefaultDate', 'MonthlyPaymentDay', 'EAD1'
    ]
    loans = pd.read_csv('datas/LoanData.csv', sep=',', usecols=cols)
    loans = loans[~((loans['Status'] == 'Repaid') &
                    (loans['ContractEndDate'].isnull()))]
    loans['LoanDate'] = pd.to_datetime(loans['LoanDate'])
    loans['FirstPaymentDate'] = pd.to_datetime(loans['FirstPaymentDate'])
    loans['ContractEndDate'] = pd.to_datetime(loans['ContractEndDate'])
    loans['ReportAsOfEOD'] = pd.to_datetime(loans['ReportAsOfEOD'])
    loans['DefaultDate'] = pd.to_datetime(loans['DefaultDate'])

    payments = pd.read_csv('datas/RepaymentsData.csv', sep=',')
    payments['Date'] = pd.to_datetime(payments['Date'])

    # Processing:
    #   with default date: take DefaultDate interval, status = 1
    #   no default date & repaid: take ContractEndDate interval, status = 0
    #   no default date & current: take ReportAsOfEOD interval, status = 0
    #   no default date & late: take ReportAsOfEOD interval, status = 0

    # For some, monthly payment date is the date of first payment, for some it is monthly payment day, first interval is approx uniformly 30-60 days
    df_T = []
    df_Y = []
    df_M = []
    i = 1
    for asdf, loan in loans.iterrows():
        loanid = loan['LoanId']

        if (i % 100 == 0) and verbose:
            print(i, "/", len(loans), "(", loanid, ")")

        P0 = loan['LoanDate']
        Ps = loan['FirstPaymentDate']
        Pe = loan['ReportAsOfEOD']
        m = loan['MonthlyPaymentDay']  #Ps.day

        s, e = pd.Timestamp(year=Ps.year, month=Ps.month,
                            day=1), pd.Timestamp(year=Pe.year,
                                                 month=Pe.month,
                                                 day=1)
        payment_dates = pd.date_range(s, e, freq='MS') + pd.Timedelta(
            m - 1, 'D') + 0 * BusinessDay()
        payment_dates = payment_dates[(payment_dates > P0)
                                      & (payment_dates < Pe)]
        index = pd.Series(pd.DatetimeIndex([P0]).append(payment_dates))
        intervals = pd.DataFrame({
            'interval': np.arange(1,
                                  len(index) + 1),
            'start': index,
            'end': index.shift(-1).fillna(Pe)
        })

        dt = loan['DefaultDate']
        if not pd.isnull(dt):
            shift = 3
            t = dt
        elif loan['Status'] == 'Repaid':
            shift = 1
            t = loan['ContractEndDate']
        else:
            shift = 3
            t = loan['ReportAsOfEOD']

        T = intervals.loc[(t >= intervals['start']) & (t <= intervals['end']),
                          'interval'].iloc[0]
        T = T - shift
        if T >= 1:
            idx = np.arange(T) + 1
            seq = np.zeros_like(idx)
            if not pd.isnull(dt):
                seq[-1] = 1
            df_Y.extend([(loanid, t, y) for t, y in zip(idx, seq)])

        df_payments = payments.loc[payments['loan_id'] == loanid, [
            'Date', 'PrincipalRepayment', 'InterestRepayment',
            'LateFeesRepayment'
        ]]
        if len(payment_dates) > 1:
            df_payments = df_payments.set_index('Date').sort_index().sum(
                axis=1)
            df_payments.index = pd.cut(df_payments.index, bins=index)
            df_payments = df_payments.groupby(level=0).agg('sum')
            idx = np.arange(1, len(index))
            seq = df_payments.values
            dts = (dt <= index.iloc[1:]).values
            df_M.extend([(loanid, t, p, s) for t, p, s in zip(idx, seq, dts)])

        i += 1

    print("Samples:")
    dfs = pd.DataFrame(df_Y, columns=['LoanId', 'T', 'Y'])
    dfs.to_csv('datas/df_Y.csv', index=False)
    print(len(dfs['LoanId'].unique()), "/", len(loans['LoanId'].unique()))

    dfs = pd.DataFrame(df_M, columns=['LoanId', 'T', 'M', 'Default'])
    dfs.to_csv('datas/df_M.csv', index=False)
    print(len(dfs['LoanId'].unique()), "/", len(loans['LoanId'].unique()))

    dfs['T'] = dfs.groupby('LoanId')['Default'].agg('cumsum').astype(int)
    dfs = dfs[dfs['Default']].copy()
    dfs = loans.loc[loans['EAD1'] > 400, ['LoanId', 'EAD1']].merge(dfs)
    dfs['Y'] = dfs['M'] / dfs['EAD1']
    dfs = dfs[['LoanId', 'T', 'Y']]
    dfs.to_csv('datas/df_R.csv', index=False)
    print(len(dfs['LoanId'].unique()), "/", len(loans['LoanId'].unique()))
コード例 #6
0
def create_data():
    """ create the pickle/msgpack data """

    data = {
        "A": [0.0, 1.0, 2.0, 3.0, np.nan],
        "B": [0, 1, 0, 1, 0],
        "C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
        "D": date_range("1/1/2009", periods=5),
        "E": [0.0, 1, Timestamp("20100101"), "foo", 2.0],
    }

    scalars = dict(timestamp=Timestamp("20130101"), period=Period("2012", "M"))

    index = dict(
        int=Index(np.arange(10)),
        date=date_range("20130101", periods=10),
        period=period_range("2013-01-01", freq="M", periods=10),
        float=Index(np.arange(10, dtype=np.float64)),
        uint=Index(np.arange(10, dtype=np.uint64)),
        timedelta=timedelta_range("00:00:00", freq="30T", periods=10),
    )

    index["range"] = RangeIndex(10)

    if _loose_version >= LooseVersion("0.21"):
        from pandas import interval_range

        index["interval"] = interval_range(0, periods=10)

    mi = dict(reg2=MultiIndex.from_tuples(
        tuple(
            zip(*[
                ["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
                ["one", "two", "one", "two", "one", "two", "one", "two"],
            ])),
        names=["first", "second"],
    ))

    series = dict(
        float=Series(data["A"]),
        int=Series(data["B"]),
        mixed=Series(data["E"]),
        ts=Series(np.arange(10).astype(np.int64),
                  index=date_range("20130101", periods=10)),
        mi=Series(
            np.arange(5).astype(np.float64),
            index=MultiIndex.from_tuples(tuple(
                zip(*[[1, 1, 2, 2, 2], [3, 4, 3, 4, 5]])),
                                         names=["one", "two"]),
        ),
        dup=Series(np.arange(5).astype(np.float64),
                   index=["A", "B", "C", "D", "A"]),
        cat=Series(Categorical(["foo", "bar", "baz"])),
        dt=Series(date_range("20130101", periods=5)),
        dt_tz=Series(date_range("20130101", periods=5, tz="US/Eastern")),
        period=Series([Period("2000Q1")] * 5),
    )

    mixed_dup_df = DataFrame(data)
    mixed_dup_df.columns = list("ABCDA")
    frame = dict(
        float=DataFrame({
            "A": series["float"],
            "B": series["float"] + 1
        }),
        int=DataFrame({
            "A": series["int"],
            "B": series["int"] + 1
        }),
        mixed=DataFrame({k: data[k]
                         for k in ["A", "B", "C", "D"]}),
        mi=DataFrame(
            {
                "A": np.arange(5).astype(np.float64),
                "B": np.arange(5).astype(np.int64)
            },
            index=MultiIndex.from_tuples(
                tuple(
                    zip(*[
                        ["bar", "bar", "baz", "baz", "baz"],
                        ["one", "two", "one", "two", "three"],
                    ])),
                names=["first", "second"],
            ),
        ),
        dup=DataFrame(np.arange(15).reshape(5, 3).astype(np.float64),
                      columns=["A", "B", "A"]),
        cat_onecol=DataFrame({"A": Categorical(["foo", "bar"])}),
        cat_and_float=DataFrame({
            "A": Categorical(["foo", "bar", "baz"]),
            "B": np.arange(3).astype(np.int64),
        }),
        mixed_dup=mixed_dup_df,
        dt_mixed_tzs=DataFrame(
            {
                "A": Timestamp("20130102", tz="US/Eastern"),
                "B": Timestamp("20130603", tz="CET"),
            },
            index=range(5),
        ),
        dt_mixed2_tzs=DataFrame(
            {
                "A": Timestamp("20130102", tz="US/Eastern"),
                "B": Timestamp("20130603", tz="CET"),
                "C": Timestamp("20130603", tz="UTC"),
            },
            index=range(5),
        ),
    )

    cat = dict(
        int8=Categorical(list("abcdefg")),
        int16=Categorical(np.arange(1000)),
        int32=Categorical(np.arange(10000)),
    )

    timestamp = dict(
        normal=Timestamp("2011-01-01"),
        nat=NaT,
        tz=Timestamp("2011-01-01", tz="US/Eastern"),
    )

    timestamp["freq"] = Timestamp("2011-01-01", freq="D")
    timestamp["both"] = Timestamp("2011-01-01", tz="Asia/Tokyo", freq="M")

    off = {
        "DateOffset": DateOffset(years=1),
        "DateOffset_h_ns": DateOffset(hour=6, nanoseconds=5824),
        "BusinessDay": BusinessDay(offset=timedelta(seconds=9)),
        "BusinessHour": BusinessHour(normalize=True, n=6, end="15:14"),
        "CustomBusinessDay": CustomBusinessDay(weekmask="Mon Fri"),
        "SemiMonthBegin": SemiMonthBegin(day_of_month=9),
        "SemiMonthEnd": SemiMonthEnd(day_of_month=24),
        "MonthBegin": MonthBegin(1),
        "MonthEnd": MonthEnd(1),
        "QuarterBegin": QuarterBegin(1),
        "QuarterEnd": QuarterEnd(1),
        "Day": Day(1),
        "YearBegin": YearBegin(1),
        "YearEnd": YearEnd(1),
        "Week": Week(1),
        "Week_Tues": Week(2, normalize=False, weekday=1),
        "WeekOfMonth": WeekOfMonth(week=3, weekday=4),
        "LastWeekOfMonth": LastWeekOfMonth(n=1, weekday=3),
        "FY5253": FY5253(n=2, weekday=6, startingMonth=7, variation="last"),
        "Easter": Easter(),
        "Hour": Hour(1),
        "Minute": Minute(1),
    }

    return dict(
        series=series,
        frame=frame,
        index=index,
        scalars=scalars,
        mi=mi,
        sp_series=dict(float=_create_sp_series(), ts=_create_sp_tsseries()),
        sp_frame=dict(float=_create_sp_frame()),
        cat=cat,
        timestamp=timestamp,
        offsets=off,
    )
コード例 #7
0
ファイル: sec_13_.py プロジェクト: etomasso1/algotrading
dt = 'none'
url = f"https://algotrading.ventures/api/v1/sec/master_idx/date/most_recent"
get = requests.get(url)

df = pd.DataFrame(get.json())

importlib.reload(sys.modules['api'])

df = serverAPI('sec_master_mr', val='most_recent').df
df['Form Type'].value_counts()

df = serverAPI('sec_inst_holdings').df
df.shape
df.head(10)

dt = (date.today() - BusinessDay(n=1)).date()

# """
url_base = "https://algotrading.ventures/api/v1/sec/master_idx/date/"
for n in list(range(15, 40)):
    dt = (date.today() - BusinessDay(n=n)).date()
    requests.get(f"{url_base}{dt.strftime('%Y%m%d')}")
    time.sleep(.5)
# """

# url = f"https://algotrading.ventures/api/v1/sec/master_idx/date/{dt.strftime('%Y%m%d')}"
# get = requests.get(url)
# overview_df = pd.DataFrame(tag_dict, index=range(1))
# print(CnM.from_bytes(get.content[0:10000]).best().first())

url = "https://algotrading.ventures/api/v1/sec/data/master_idx/all/false"
コード例 #8
0
# --- Main Program ---
if __name__ == "__main__":

    day = Date.datetime.today().strftime("%d-%m-%Y")
    wks, wksInput = Gsheets.init(day)
    g_driver.init()

    # Get User Data
    dateIn = Date.datetime.strptime(wksInput.acell("B2").value,
                                    "%d-%m-%Y").date()
    months = int(wksInput.acell("C2").value)
    numberRooms = str(wksInput.acell("D2").value)
    totalDays = int(wksInput.acell("E2").value)
    totalAdults = str(wksInput.acell("F2").value)
    cleaningFee = int(wksInput.acell("G2").value)
    # Find and Copy Values to Sheet
    print("\nSTART!")

    i = 0
    while i < months:
        print("---- ", dateIn, " ----")
        new_url.get(dateIn, totalDays, totalAdults, numberRooms)
        WebScrap.loop_pages(day, dateIn, totalDays, cleaningFee, totalAdults)
        dateOut = dateIn + BusinessDay(
            20) if i % 2 == 0 else dateIn + BusinessDay(25)
        dateIn = dateOut.date()
        i += 1

    g_driver.close()
コード例 #9
0
def create_data():
    """create the pickle data"""
    data = {
        "A": [0.0, 1.0, 2.0, 3.0, np.nan],
        "B": [0, 1, 0, 1, 0],
        "C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
        "D": date_range("1/1/2009", periods=5),
        "E": [0.0, 1, Timestamp("20100101"), "foo", 2.0],
    }

    scalars = {
        "timestamp": Timestamp("20130101"),
        "period": Period("2012", "M")
    }

    index = {
        "int": Index(np.arange(10)),
        "date": date_range("20130101", periods=10),
        "period": period_range("2013-01-01", freq="M", periods=10),
        "float": Index(np.arange(10, dtype=np.float64)),
        "uint": Index(np.arange(10, dtype=np.uint64)),
        "timedelta": timedelta_range("00:00:00", freq="30T", periods=10),
    }

    index["range"] = RangeIndex(10)

    index["interval"] = interval_range(0, periods=10)

    mi = {
        "reg2":
        MultiIndex.from_tuples(
            tuple(
                zip(*[
                    ["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
                    ["one", "two", "one", "two", "one", "two", "one", "two"],
                ])),
            names=["first", "second"],
        )
    }

    series = {
        "float":
        Series(data["A"]),
        "int":
        Series(data["B"]),
        "mixed":
        Series(data["E"]),
        "ts":
        Series(np.arange(10).astype(np.int64),
               index=date_range("20130101", periods=10)),
        "mi":
        Series(
            np.arange(5).astype(np.float64),
            index=MultiIndex.from_tuples(tuple(
                zip(*[[1, 1, 2, 2, 2], [3, 4, 3, 4, 5]])),
                                         names=["one", "two"]),
        ),
        "dup":
        Series(np.arange(5).astype(np.float64),
               index=["A", "B", "C", "D", "A"]),
        "cat":
        Series(Categorical(["foo", "bar", "baz"])),
        "dt":
        Series(date_range("20130101", periods=5)),
        "dt_tz":
        Series(date_range("20130101", periods=5, tz="US/Eastern")),
        "period":
        Series([Period("2000Q1")] * 5),
    }

    mixed_dup_df = DataFrame(data)
    mixed_dup_df.columns = list("ABCDA")
    frame = {
        "float":
        DataFrame({
            "A": series["float"],
            "B": series["float"] + 1
        }),
        "int":
        DataFrame({
            "A": series["int"],
            "B": series["int"] + 1
        }),
        "mixed":
        DataFrame({k: data[k]
                   for k in ["A", "B", "C", "D"]}),
        "mi":
        DataFrame(
            {
                "A": np.arange(5).astype(np.float64),
                "B": np.arange(5).astype(np.int64)
            },
            index=MultiIndex.from_tuples(
                tuple(
                    zip(*[
                        ["bar", "bar", "baz", "baz", "baz"],
                        ["one", "two", "one", "two", "three"],
                    ])),
                names=["first", "second"],
            ),
        ),
        "dup":
        DataFrame(np.arange(15).reshape(5, 3).astype(np.float64),
                  columns=["A", "B", "A"]),
        "cat_onecol":
        DataFrame({"A": Categorical(["foo", "bar"])}),
        "cat_and_float":
        DataFrame({
            "A": Categorical(["foo", "bar", "baz"]),
            "B": np.arange(3).astype(np.int64),
        }),
        "mixed_dup":
        mixed_dup_df,
        "dt_mixed_tzs":
        DataFrame(
            {
                "A": Timestamp("20130102", tz="US/Eastern"),
                "B": Timestamp("20130603", tz="CET"),
            },
            index=range(5),
        ),
        "dt_mixed2_tzs":
        DataFrame(
            {
                "A": Timestamp("20130102", tz="US/Eastern"),
                "B": Timestamp("20130603", tz="CET"),
                "C": Timestamp("20130603", tz="UTC"),
            },
            index=range(5),
        ),
    }

    cat = {
        "int8": Categorical(list("abcdefg")),
        "int16": Categorical(np.arange(1000)),
        "int32": Categorical(np.arange(10000)),
    }

    timestamp = {
        "normal": Timestamp("2011-01-01"),
        "nat": NaT,
        "tz": Timestamp("2011-01-01", tz="US/Eastern"),
    }

    timestamp["freq"] = Timestamp("2011-01-01", freq="D")
    timestamp["both"] = Timestamp("2011-01-01", tz="Asia/Tokyo", freq="M")

    off = {
        "DateOffset": DateOffset(years=1),
        "DateOffset_h_ns": DateOffset(hour=6, nanoseconds=5824),
        "BusinessDay": BusinessDay(offset=timedelta(seconds=9)),
        "BusinessHour": BusinessHour(normalize=True, n=6, end="15:14"),
        "CustomBusinessDay": CustomBusinessDay(weekmask="Mon Fri"),
        "SemiMonthBegin": SemiMonthBegin(day_of_month=9),
        "SemiMonthEnd": SemiMonthEnd(day_of_month=24),
        "MonthBegin": MonthBegin(1),
        "MonthEnd": MonthEnd(1),
        "QuarterBegin": QuarterBegin(1),
        "QuarterEnd": QuarterEnd(1),
        "Day": Day(1),
        "YearBegin": YearBegin(1),
        "YearEnd": YearEnd(1),
        "Week": Week(1),
        "Week_Tues": Week(2, normalize=False, weekday=1),
        "WeekOfMonth": WeekOfMonth(week=3, weekday=4),
        "LastWeekOfMonth": LastWeekOfMonth(n=1, weekday=3),
        "FY5253": FY5253(n=2, weekday=6, startingMonth=7, variation="last"),
        "Easter": Easter(),
        "Hour": Hour(1),
        "Minute": Minute(1),
    }

    return {
        "series": series,
        "frame": frame,
        "index": index,
        "scalars": scalars,
        "mi": mi,
        "sp_series": {
            "float": _create_sp_series(),
            "ts": _create_sp_tsseries()
        },
        "sp_frame": {
            "float": _create_sp_frame()
        },
        "cat": cat,
        "timestamp": timestamp,
        "offsets": off,
    }
コード例 #10
0
ファイル: timemethods.py プロジェクト: mesalas/SimAnalysis
def sim_time_to_date_time(simTime, start_date):
    day_and_frac = math.modf(
        simTime)  # containing the frac of day and trading day day no
    return start_date + BusinessDay(day_and_frac[1]) + pd.Timedelta(
        day_and_frac[0] * 23400 * 1e9)
コード例 #11
0
ファイル: studies_exp.py プロジェクト: etomasso1/algotrading
    'ytdChange', 'iexOpen', 'iexClose', 'closeDate'
])
iex_filter_df = iex_df[cols_to_filter].copy(deep=True)
# Get all the symbols that aren't warrants
iex_filter_df = iex_filter_df[~iex_filter_df['symbol'].isin(syms_wt)]

iex_filter_df['vol/prev'] = (iex_filter_df['volume'] /
                             iex_filter_df['previousVolume']).round(1)
iex_filter_df['vol/avg'] = (iex_filter_df['volume'] /
                            iex_filter_df['avgTotalVolume']).round(1)
iex_filter_df['prevVol/avg'] = (iex_filter_df['previousVolume'] /
                                iex_filter_df['avgTotalVolume']).round(1)

date_list = []
for x in range(1, 5):
    date_list.append((date.today() - BusinessDay(n=x)).date())

(iex_filter_df[(iex_filter_df['latestPrice'] < 15)
               & (iex_filter_df['week52High'] < 25)
               & (iex_filter_df['iexClose'] > iex_filter_df['iexOpen'])
               & (iex_filter_df['closeDate'].isin(date_list))
               # (iex_filter_df['changePercent'] > .05)
               ].sort_values(by=['vol/avg', 'ytdChange'],
                             ascending=False).head(25).reset_index(drop=True))

# %% codecell
#############################################

# Fibonacci retracements and extensions - if none of the prices are within
# the levels or within 5% of the closest level, indidcate that levels
# are no longer providing anything useful
コード例 #12
0

res = requests.get('https://fintual.cl/api/real_assets/186/days') 

data = res.json()['data']

df = pd.DataFrame(columns=['price'])

for attribute in data:
    df.loc[attribute['attributes']['date'] ] = attribute['attributes']['price']

df.index = pd.to_datetime(df.index, format="%Y/%m/%d")

# precios
p     = df['price'][0]
p_eod = df['price'].loc[df.index[0] + BusinessDay(-1)]
p_eom = df['price'].loc[df.index[0] + MonthEnd(-1)]
p_eoy = df['price'].loc[df.index[0] + YearEnd(-1)]


@app.route('/')
def hello():
    return "Risky Norris P&L {}  <br/> \
            <br/>\
            {:10.2f}% DTD  <br/>  \
            {:10.2f}% MTD  <br/> \
            {:10.2f}% YTD  <br/>".format(df.index[0].date(), p/p_eod-1, p/p_eom, p/p_eoy)

if __name__ == '__main__':
    app.run()
コード例 #13
0
# '2h30min', every two hours and thirty minutes
offset1 = Hour(2) + Minute(30)

# every day
offset2 = pd.DateOffset(days=1)

# every third Friday of each month
'WOM-3FRI'

# Friday of each week
'W-FRI'

dt = datetime(2017, 3, 3)
dt + offset1  # next 150 minutes
dt + 2 * BusinessDay(1)  # next two business days

# shift ######################################################
# from 2016-11-1 to 2016-11-30
index5 = pd.date_range('2016-11-1', '2016-11-30')

# from 2016-11-2 to 2016-12-01
index5.shift(1)

# from 2016-11-1 00:30:00 to 2016-12-01 00:30:00
index5.shift(1, freq='30min')

# deal with timezone #########################################
# By default, pandas objects that are time zone-aware do not
# utilize a timezone object for purposes of efficiency.
native_ts = pd.Timestamp('2017-02-23')
コード例 #14
0
def process_start(verbose=True):

    cols = [
        'UserName', 'LoanId', 'Status', 'LoanDate', 'MonthlyPaymentDay',
        'FirstPaymentDate', 'MaturityDate_Last', 'ReportAsOfEOD',
        'ContractEndDate', 'DefaultDate', 'ReScheduledOn'
    ]

    loans = pd.read_csv('datas/LoanData.csv', sep=',', usecols=cols)
    loans['LoanDate'] = pd.to_datetime(loans['LoanDate'])
    loans['FirstPaymentDate'] = pd.to_datetime(loans['FirstPaymentDate'])
    loans['MaturityDate_Last'] = pd.to_datetime(loans['MaturityDate_Last'])
    loans['ReportAsOfEOD'] = pd.to_datetime(loans['ReportAsOfEOD'])
    loans['ContractEndDate'] = pd.to_datetime(loans['ContractEndDate'])
    loans['DefaultDate'] = pd.to_datetime(loans['DefaultDate'])
    loans['ReScheduledOn'] = pd.to_datetime(loans['ReScheduledOn'])

    dates = []
    i = 1
    n = len(loans['UserName'].unique())
    for username, userloans in loans.groupby('UserName'):
        if (i % 100 == 0) and verbose:
            print(i, "/", n, "(", username, ")")
        payment_dates = []
        for asdf, loan in userloans.iterrows():
            m = loan['MonthlyPaymentDay']
            P0 = loan['LoanDate']
            Ps = loan['FirstPaymentDate']
            Pe = loan['MaturityDate_Last']
            Pf = loan['DefaultDate'] if not pd.isnull(
                loan['DefaultDate']) else (
                    loan['ContractEndDate']
                    if loan['Status'] == 'Repaid' else loan['ReportAsOfEOD'])
            s, e = pd.Timestamp(year=Ps.year, month=Ps.month,
                                day=1), pd.Timestamp(year=Pe.year,
                                                     month=Pe.month,
                                                     day=1)
            dm = pd.date_range(s, e, freq='MS') + pd.Timedelta(
                m - 1, 'D') + 0 * BusinessDay()
            payment_dates.extend(dm[(dm > P0) & (dm <= Pf)])
        loan_dates = userloans['LoanDate']
        default_dates = userloans.loc[~userloans['DefaultDate'].isnull(),
                                      'DefaultDate']
        repaid_dates = userloans.loc[userloans['DefaultDate'].isnull() &
                                     (userloans['Status'] == 'Repaid'),
                                     'ContractEndDate']
        rescheduled_dates = userloans.loc[~userloans['ReScheduledOn'].isnull(),
                                          'ReScheduledOn']
        dates.extend([(username, t, 1, 0, 0, 0, 0) for t in payment_dates] +
                     [(username, t, 0, 1, 0, 0, 0)
                      for t in loan_dates] + [(username, t, 0, -1, 1, 0, 0)
                                              for t in default_dates] +
                     [(username, t, 0, -1, 0, 1, 0)
                      for t in repaid_dates] + [(username, t, 0, 0, 0, 0, 1)
                                                for t in rescheduled_dates])
        i += 1
    dates = pd.DataFrame(dates,
                         columns=[
                             'UserName', 'Date', 'inc_intervals',
                             'inc_current', 'inc_default', 'inc_repaid',
                             'inc_rescheduled'
                         ])
    dates.sort_values(['UserName', 'Date'], inplace=True)

    #print(dates.groupby('UserName').agg('sum'))

    dates.to_csv('datas/UserHistories.csv', index=False)