Пример #1
0
def macro_cons_silver_change():
    """
    全球最大白银ETF--iShares Silver Trust持仓报告, 数据区间从20060429-至今
    :return: pandas.Series
    2006-04-29         0
    2006-05-02      0.00
    2006-05-03    342.11
    2006-05-04    202.15
    2006-05-05    108.86
                   ...
    2019-10-17    -58.16
    2019-10-18      0.00
    2019-10-21    -34.89
    2019-10-22    -61.06
    2019-10-23      0.00
    """
    t = time.time()
    res = requests.get(
        JS_CONS_SLIVER_ETF_URL.format(str(int(round(t * 1000))),
                                      str(int(round(t * 1000)) + 90)))
    json_data = json.loads(res.text[res.text.find("{"):res.text.rfind("}") +
                                    1])
    date_list = [item["date"] for item in json_data["list"]]
    value_list = [item["datas"]["白银"] for item in json_data["list"]]
    value_df = pd.DataFrame(value_list)
    value_df.columns = json_data["kinds"]
    value_df.index = pd.to_datetime(date_list)
    temp_df = value_df["增持/减持(吨)"]
    temp_df.name = "silver_change"
    return temp_df
Пример #2
0
def macro_cons_silver_amount():
    """
    全球最大白银ETF--iShares Silver Trust持仓报告, 数据区间从20060429-至今
    :return: pandas.Series
    2006-04-29    263651152
    2006-05-02    263651152
    2006-05-03    445408550
    2006-05-04    555123947
    2006-05-05    574713264
                    ...
    2019-10-17     Show All
    2019-10-18     Show All
    2019-10-21     Show All
    2019-10-22     Show All
    2019-10-23     Show All
    """
    t = time.time()
    res = requests.get(
        JS_CONS_SLIVER_ETF_URL.format(str(int(round(t * 1000))),
                                      str(int(round(t * 1000)) + 90)))
    json_data = json.loads(res.text[res.text.find("{"):res.text.rfind("}") +
                                    1])
    date_list = [item["date"] for item in json_data["list"]]
    value_list = [item["datas"]["白银"] for item in json_data["list"]]
    value_df = pd.DataFrame(value_list)
    value_df.columns = json_data["kinds"]
    value_df.index = pd.to_datetime(date_list)
    temp_df = value_df["总价值(美元)"]
    temp_df.name = "silver_amount"
    return temp_df
Пример #3
0
def macro_cons_silver_volume():
    """
    全球最大白银ETF--iShares Silver Trust持仓报告, 数据区间从20060429-至今
    :return: pandas.Series
    2006-04-29      653.17
    2006-05-02      653.17
    2006-05-03      995.28
    2006-05-04     1197.43
    2006-05-05     1306.29
                    ...
    2019-10-17    11847.91
    2019-10-18    11847.91
    2019-10-21    11813.02
    2019-10-22    11751.96
    2019-10-23    11751.96
    """
    t = time.time()
    res = requests.get(
        JS_CONS_SLIVER_ETF_URL.format(str(int(round(t * 1000))),
                                      str(int(round(t * 1000)) + 90)))
    json_data = json.loads(res.text[res.text.find("{"):res.text.rfind("}") +
                                    1])
    date_list = [item["date"] for item in json_data["list"]]
    value_list = [item["datas"]["白银"] for item in json_data["list"]]
    value_df = pd.DataFrame(value_list)
    value_df.columns = json_data["kinds"]
    value_df.index = pd.to_datetime(date_list)
    temp_df = value_df["总库存(吨)"]
    temp_df.name = "silver_volume"
    return temp_df
Пример #4
0
def macro_cons_silver_amount():
    """
    全球最大白银ETF--iShares Silver Trust持仓报告, 数据区间从20060429-至今
    :return: pandas.Series
    2006-04-29    263651152
    2006-05-02    263651152
    2006-05-03    445408550
    2006-05-04    555123947
    2006-05-05    574713264
                    ...
    2019-10-17     Show All
    2019-10-18     Show All
    2019-10-21     Show All
    2019-10-22     Show All
    2019-10-23     Show All
    """
    t = time.time()
    res = requests.get(
        JS_CONS_SLIVER_ETF_URL.format(str(int(round(t * 1000))),
                                      str(int(round(t * 1000)) + 90)))
    json_data = json.loads(res.text[res.text.find("{"):res.text.rfind("}") +
                                    1])
    date_list = [item["date"] for item in json_data["list"]]
    value_list = [item["datas"]["白银"] for item in json_data["list"]]
    value_df = pd.DataFrame(value_list)
    value_df.columns = json_data["kinds"]
    value_df.index = pd.to_datetime(date_list)
    temp_df = value_df["总价值(美元)"]
    url = "https://datacenter-api.jin10.com/reports/list_v2"
    params = {
        "max_date": "",
        "category": "etf",
        "attr_id": "2",
        "_": str(int(round(t * 1000))),
    }
    headers = {
        "accept": "*/*",
        "accept-encoding": "gzip, deflate, br",
        "accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
        "cache-control": "no-cache",
        "origin": "https://datacenter.jin10.com",
        "pragma": "no-cache",
        "referer":
        "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
        "sec-fetch-dest": "empty",
        "sec-fetch-mode": "cors",
        "sec-fetch-site": "same-site",
        "user-agent":
        "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
        "x-app-id": "rU6QIu7JHe2gOUeR",
        "x-csrf-token": "",
        "x-version": "1.0.0",
    }
    r = requests.get(url, params=params, headers=headers)
    temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, [0, 3]]
    temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
    temp_se = temp_se.iloc[:, 1]
    temp_df = temp_df.append(temp_se)
    temp_df.dropna(inplace=True)
    temp_df.sort_index(inplace=True)
    temp_df = temp_df.reset_index()
    temp_df.drop_duplicates(subset="index", keep="last", inplace=True)
    temp_df.set_index("index", inplace=True)
    temp_df = temp_df.squeeze()
    temp_df.index.name = None
    temp_df.name = "silver_amount"

    url = "https://cdn.jin10.com/data_center/reports/etf_2.json"
    r = requests.get(url)
    data_json = r.json()
    append_temp_df = pd.DataFrame(data_json["values"]).T
    append_temp_df.columns = [item["name"] for item in data_json["keys"]]
    temp_append_df = append_temp_df["总价值"]
    temp_append_df.name = "silver_amount"

    temp_df = temp_df.reset_index()
    temp_df["index"] = temp_df["index"].astype(str)
    temp_df = temp_df.append(temp_append_df.reset_index())
    temp_df.drop_duplicates(subset=["index"], keep="last", inplace=True)
    temp_df.index = pd.to_datetime(temp_df["index"])
    del temp_df["index"]
    temp_df = temp_df[temp_df != 'Show All']
    temp_df.sort_index(inplace=True)
    temp_df = temp_df.astype(float)
    return temp_df
Пример #5
0
def macro_cons_silver_change():
    """
    全球最大白银ETF--iShares Silver Trust持仓报告, 数据区间从20060429-至今
    :return: pandas.Series
    2006-04-29         0
    2006-05-02      0.00
    2006-05-03    342.11
    2006-05-04    202.15
    2006-05-05    108.86
                   ...
    2019-10-17    -58.16
    2019-10-18      0.00
    2019-10-21    -34.89
    2019-10-22    -61.06
    2019-10-23      0.00
    """
    t = time.time()
    res = requests.get(
        JS_CONS_SLIVER_ETF_URL.format(str(int(round(t * 1000))),
                                      str(int(round(t * 1000)) + 90)))
    json_data = json.loads(res.text[res.text.find("{"):res.text.rfind("}") +
                                    1])
    date_list = [item["date"] for item in json_data["list"]]
    value_list = [item["datas"]["白银"] for item in json_data["list"]]
    value_df = pd.DataFrame(value_list)
    value_df.columns = json_data["kinds"]
    value_df.index = pd.to_datetime(date_list)
    temp_df = value_df["增持/减持(吨)"]
    temp_df.name = "silver_change"
    url = "https://datacenter-api.jin10.com/reports/list_v2"
    params = {
        "max_date": "",
        "category": "etf",
        "attr_id": "2",
        "_": str(int(round(t * 1000))),
    }
    headers = {
        "accept": "*/*",
        "accept-encoding": "gzip, deflate, br",
        "accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
        "cache-control": "no-cache",
        "origin": "https://datacenter.jin10.com",
        "pragma": "no-cache",
        "referer":
        "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
        "sec-fetch-dest": "empty",
        "sec-fetch-mode": "cors",
        "sec-fetch-site": "same-site",
        "user-agent":
        "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
        "x-app-id": "rU6QIu7JHe2gOUeR",
        "x-csrf-token": "",
        "x-version": "1.0.0",
    }
    r = requests.get(url, params=params, headers=headers)
    temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, [0, 2]]
    temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
    temp_se = temp_se.iloc[:, 1]
    temp_df = temp_df.append(temp_se)
    temp_df.dropna(inplace=True)
    temp_df.sort_index(inplace=True)
    temp_df = temp_df.reset_index()
    temp_df.drop_duplicates(subset="index", keep="last", inplace=True)
    temp_df.set_index("index", inplace=True)
    temp_df = temp_df.squeeze()
    temp_df.index.name = None
    return temp_df