def fetchds_update(self): quandl.ApiConfig.api_key = self.apiK for code in self.qdcodes: logger.info("Debug: code: " + code) if self.dtype == 'tb': ext = code.split("_") logger.info("Debug: ext: " + str(ext[1])) qdata = quandl.Datatable(str(ext[0]) + '/' + str(ext[1])).data() latest = str(qdata.meta['end_date']) logger.info("Debug: latest: " + latest) qdata = quandl.Datatable(str(ext[0]) + '/' + str(ext[1])).data( params={'start_date': latest}) else: dtable = self.tgtdata if len(dtable) < 3: dtable = dtable + str(dtable)[-1] table = self.dynamodb.Table(dtable) resp = table.scan(FilterExpression=Key('type').eq(code)) qdata = quandl.Dataset(self.tgtdata + '/' + code).data() count = int(resp['Count']) if count < 1: last = str(qdata.meta['start_date']) else: last = resp['Items'][-1]['date'] latest = str(qdata.meta['end_date']) qdata = quandl.Dataset(self.tgtdata + '/' + code).data(params={ 'start_date': last, 'end_date': latest }) headers = qdata.column_names logger.info("Debug: headers: " + str(headers)) qdata_list = qdata.to_list() qlen = len(qdata_list) qidx = 0 #qdata index if self.dtype == 'ts': #set the date type fields and update index headers[0] = 'date' for i in range(qidx, qlen): #for each q data index item = {} if self.dtype == 'ts': item = {"type": code} hlen = len(headers) for h in range(0, hlen): item[headers[h]] = str(qdata_list[i][h]) # Define dtable to table name meets 3 char minimum dtable = self.tgtdata if len(dtable) < 3: dtable = dtable + str(dtable)[-1] table = self.dynamodb.Table(dtable) item = json.dumps(item) resp = table.put_item(Item=json.loads(item)) logger.info("put_item response...") logger.info(json.dumps(resp, indent=4, cls=DecimalEncoder))
def get(self, dataset_code, **params): """ Return data and metadata for the specified dataset. Call data() on the returned object for data and data_fields() for metadata. @param dataset_code: Dataset short code (Format: Database code/Dataset code). @param params: Query parameters: See https://www.quandl.com/docs/api?python#customize-your-dataset. limit (int): Use limit=n to get the first n rows of the dataset. Use limit=1 to get just the latest row. column_index (int): Request a specific column. Column 0 is the date column and is always returned. Data begins at column 1. start_date (string): "yyyy-mm-dd" - Retrieve data rows on and after the specified start date. end_date (string): "yyyy-mm-dd" - Retrieve data rows up to and including the specified end date. order (string) "asc" or "desc" - Return data in ascending or descending order of date. Default is “desc”. collapse (string): "none", "daily", "weekly", "monthly", "quarterly", "annual" - Change the sampling frequency of the returned data. Default is “none” i.e. data is returned in its original granularity. transform (string): "none", "diff", "rdiff", "rdiff_from", "cumul", "normalize" Perform elementary calculations on the data prior to downloading. Default is “none”. Calculation options are described below. """ return quandl.Dataset(dataset_code, **params)
def update_data(self): if (self.api_source == 'Quandl'): quandl.ApiConfig.api_key = self.api_key dfset = quandl.Dataset(self.api_code, start_date=str(self.last_updated), end_date=str(date.today())) data = dfset.data() entries = data.to_pandas() print('INSIDE UPDATE DATA') # print ('ENTRIES>>>>>>>>>>') # print (entries) try: for index, row in entries.iterrows(): ts = BaseCommodityRecords() ts.base_commodity = self ts.date = index ts.value = row['Value'] ts.save() # print (ts) except Exception as e: print("Error in making TimeSeries entry:") print(e) self.last_updated = date.today() return True # Updated return False # Not updated
def fetch_data(self, ticker, start_date, end_date, metric, fetch_remote=False, filename='ticker.csv'): self.ticker = ticker self.start_date = start_date self.end_date = end_date self.metric = metric self.fetch_remote = fetch_remote self.df_metrics = pd.DataFrame(columns=[ticker]) if fetch_remote: data = quandl.Dataset(ticker).data(params={ 'start_date': start_date, 'end_date': end_date }).to_pandas() # save as CSV data.to_csv(filename) data = pd.read_csv(filename) else: data = pd.read_csv(filename) data.fillna(method='ffill', inplace=True) self.data = data
def get_quandl_dataset(market, code, columns=None, buffer_days=10): '''Queries a quandl database, with some buffer in order to introduce lag.''' start_date = market.index.min() end_date = market.index.max() if buffer_days is not None: start_date += dt.timedelta(days=-buffer_days) ds = quandl.get(code, start_date=start_date, end_date=end_date) db_name = re.search('(?<=/).*', code).group(0).lower() ds.columns = [col.lower().replace(' ', '_') for col in ds.columns] columns = ds.columns if columns is None else columns ds = market[[]].join(ds, how='outer')[columns] # ds = market[[]].join(ds,how='outer') ds.columns = [db_name + '_' + col_name for col_name in ds.columns] ds = ds.sort_index(ascending=True) # Add new column to indicate if the feature is 'old' news or not. # Not necessary if updated daily. is_new_trivial = quandl.Dataset(code)['frequency'] == 'daily' if not is_new_trivial: is_new = ds.apply(lambda row: 0 if all(isnan(c) for c in row) else 1, axis=1) ds = ds.fillna(method='pad') if not is_new_trivial: ds[db_name + '_is_new'] = is_new return ds
def call_quandl(symbol='LBMA/GOLD', column='ALL', start_date='2017-01-01', end_date=None, key='1qgxtyhysHxVA3XamT_-'): if end_date is None: end_date = dt.datetime.now().strftime( date_format) # use today, if no end_date passed in quandl.ApiConfig.api_key = key # note: although not relevant in current example, 'collapse':'daily' below will prevent data more frequent than # once/day from being returned data_params = { 'start_date': start_date, 'end_date': end_date, 'collapse': 'daily' } data = quandl.Dataset(symbol).data( params=data_params).to_pandas() # import the data # The step above converts the Date column into a DateTimeIndex. # if desired, choose a single column: if column != 'ALL': data = data[[column]] # select a single column data = data.dropna( ) # drop rows with NA (missing) values. Yes, Quandl does have some. assert isinstance(data, pd.DataFrame) # sanity check return data
def init_quandl(): quandl.ApiConfig.api_key = quandl_api_key data = quandl.Dataset("WIKI/KO").data(params={ 'start_date': '2001-12-01', 'end_date': '2010-12-30' })
def SMA(ticker, startdate, enddate, window): data = quandl.Dataset('WIKI/' + ticker).data(params={ 'start_date': startdate, 'end_date': enddate }) data = data.to_pandas().reset_index() data = data[['Date', 'Adj. Close']] closes = data['Adj. Close'].tolist() currwindow = [] averages = [] for close in closes: if len(currwindow) < (window - 1): currwindow.append(close) averages.append(np.nan) continue currwindow.append(close) avg = sum(currwindow) / window averages.append(avg) currwindow.pop(0) data[str(window) + '-days SMA'] = averages return data
def insert_data(startdate): db = quandl.Dataset(dbname) colnames = db.column_names ret = quandl.get(dbname, returns="numpy", start_date=startdate, end_date=datetime.date.today()) if (len(ret) > 0): query = "INSERT INTO quantldata (`date`, `code`, `columnname`, `value`) VALUES (%s, %s, %s, %s)" cols = len(colnames) totquery = "" data = [] for rows in ret: t = rows[0] for i in range(1, cols): val = float(rows[i]) if np.isnan(rows[i]) == False else -1 data.append((t, dbname, colnames[i], val)) with con.cursor() as cursor: cursor.executemany(query, data) con.commit() currenttime = time.strftime('%Y-%m-%d %H:%M:%S') con.cursor().execute("insert into fetchhistory values ('%s', NULL)" % (currenttime)) con.commit()
def get_metadata(self, dataset_code, **params): """ Return metadata for the specified dataset. @param dataset_code: Dataset short code (Format: Database code/Dataset code). """ return quandl.Dataset(dataset_code).data_fields()
def __init__(self, scode, startDate, endDate, sipAmount): self.qcode = 'AMFI/' + scode self.startDate = startDate self.endDate = endDate self.sipAmount = sipAmount self.fundName = quandl.Dataset(self.qcode).name self.create_date_nav_df() self.add_quantity_to_df()
def getStockData(stockList, start_date): with open(dest, 'wb') as destFile: writer = csv.writer(destFile, delimiter=',') for i, ticker in enumerate(stockList): print "Parsing stock # " + str(i) + ", " + str(ticker); quandlSrc = "WIKI/" + ticker.upper().strip(); try: data = quandl.Dataset(quandlSrc).data(params={'start_date': str(start_date),'end_date': end_date}) writeToFile(ticker, data, writer); except quandl.errors.quandl_error.NotFoundError: notFoundSymbols.append(ticker);
def make_symbol_dic(stocks, **kwargs): symbol_dict = {} if not kwargs: F = open(stocks, 'r') stocks = set(F.readlines()) for symbol in stocks: try: qnd_symbol = 'WIKI/' + symbol.strip() stock = quandl.Dataset(qnd_symbol).name symbol_dict.setdefault(symbol.strip(), stock.split(' (', 1)[0]) except: pass return symbol_dict
def get_data(ticker, dataset='WIKI', start_date='', end_date=''): ticker = ticker.upper() try: dataset = quandl.Dataset('{}/{}'.format(dataset, ticker)) df = dataset.data(params={ 'start_date': start_date, 'end_date': end_date }).to_pandas() timestamps = [] for time in df.index: timestamps.append(time.to_pydatetime() + timedelta(hours=9.5)) timestamps.append(time.to_pydatetime() + timedelta(hours=16)) prices = df[['Open', 'Close']].as_matrix().flatten() return timestamps, prices except Exception as e: return None
def get_data(symbol, n_samples=500, save_to_csv=False): # 股票名称 —— 用户输入 _symbol = symbol # 默认为AAPL # 取最近N条数据 —— 用户输入 _n_samples = n_samples # 默认为3000 # 拼凑用于请求的参数 request_stock = 'WIKI/' + _symbol # print(request_stock) local_name = "{0}.csv".format(_symbol) # try: # data = pd.read_csv(local_name) # data = data[-n_samples:] # return data # except OSError as e: # print(e) # 列表出该目录下的所有文件(返回当前目录'.')s for new_dir in os.listdir(os.curdir): # 如果有本地的数据就读取 if new_dir.startswith(_symbol): data = pd.read_csv(local_name) data = data[-n_samples:] return data # 否则从Quandl请求数据 else: try: data = quandl.Dataset(request_stock).data() # 转换为pandas的Dataframe形式 df = data.to_pandas() # ---加上日期列,用于绘图 df['Date'] = df.index # 保存数据到本地csv文件 if save_to_csv: data.to_csv(local_name, index=True) # 过老的数据没有太大的参考价值,取最近的n_samples条数据 _df = df[-n_samples:] return _df # 处理网络异常错误 except IOError as e: print(e) return None
def gather(self): """ Gathers the data from the Quandl api and returns a pandas DataFrame :return: pandas DataFrame """ quandl.ApiConfig.api_key = self.apikey # this would be where I would construct it's own api call, using quandl's get_table method instead #base = quandl.ApiConfig.api_base #base += '/datatables/' + querypattern + '&api_key=' + apikey #data = requests.get(base) metadata = quandl.Dataset('WIKI/' + self.ticker) date = metadata['newest_available_date'] data = quandl.get_table('WIKI/PRICES', ticker=self.ticker, date={'gte': (date - relativedelta(years=5))}) return (data)
def fetch_data_from_quandl(symbol, start_date, end_date): stock_code = 'NSE/' + symbol df_list = [] try: print('getting data for %s' % (symbol)) stock_data = quandl.Dataset(stock_code).data(params={ 'start_date': start_date, 'end_date': end_date }).to_pandas() stock_data = stock_data.drop('Turnover (Lacs)', 1) stock_data = stock_data.drop('Last', 1) stock_data = stock_data.rename_axis({"Total Trade Quantity": "Volume"}, axis="columns") stock_data['Symbol'] = symbol stock_data.to_csv('data/%s.csv' % (symbol)) return stock_data except: print('Skipping %s' % (symbol))
def fetchds(self): quandl.ApiConfig.api_key = self.apiK for code in self.qdcodes: logger.info("Debug: code: " + code) if self.dtype == 'tb': ext = code.split("_") logger.info("Debug: ext: " + str(ext[1])) qdata = quandl.Datatable(str(ext[0]) + '/' + str(ext[1])).data() else: qdata = quandl.Dataset(self.tgtdata + '/' + code).data() headers = qdata.column_names logger.info("Debug: headers: " + str(headers)) qdata_list = qdata.to_list() qlen = len(qdata_list) qidx = 0 #qdata index if self.dtype == 'ts': #set the date type fields and update index headers[0] = 'date' for i in range(qidx, qlen): #for each q data index item = {} if self.dtype == 'ts': item = {"type": code} hlen = len(headers) for h in range(0, hlen): item[headers[h]] = str(qdata_list[i][h]) # Define dtable to table name meets 3 char minimum dtable = self.tgtdata if len(dtable) < 3: dtable = dtable + str(dtable)[-1] table = self.dynamodb.Table(dtable) item = json.dumps(item) resp = table.put_item(Item=json.loads(item)) logger.info("put_item response...") logger.info(json.dumps(resp, indent=4, cls=DecimalEncoder))
def do_testing(self): """ To test actual and predicted accuracy """ filename = '{}_{}.csv'.format("testing", re.sub(r'([^\s\w]|_)+', '', self.ticker)) if self.fetch_remote: print("Reloading") data = quandl.Dataset(self.ticker).data(params={ 'start_date': self.start_date, 'end_date': self.query_date }).to_pandas() # Save as CSV to avoid re-query data.to_csv(filename) data = pd.read_csv(filename, parse_dates=[0], index_col='Date') else: print("Reading CSV...") data = pd.read_csv(filename, parse_dates=[0], index_col='Date') actual_price = data[self.metric][self.query_date] print("Actual price at query date: {:.4f}".format(actual_price)) end_date_price = data[self.metric][self.end_date] return actual_price, end_date_price
def getChartsData(*args): charts_info = args charts = [] chart_titles = [] charts_comments = [] for chart_info in charts_info: quandl_codes = chart_info["quandl_codes"] legends = chart_info["legends"] units = chart_info["units"] dataset = [] comments = [] for quandl_code, legend, unit in zip(quandl_codes, legends, units): quandl_dataset = quandl.Dataset(quandl_code) quandl_data = quandl_dataset.data().to_pandas() values = quandl_data[quandl_dataset.column_names[1]].tolist() labels = ( quandl_data.index.to_period("M").to_timestamp('S').astype( np.int64) // 10**6).tolist() data = [] for label, value in zip(labels, values): data.append([label, value]) dataset.append({"legend": legend, "data": data}) comment = make_comment(quandl_data, quandl_dataset, legend, unit) comments.append(comment) charts.append({ "chart_title": chart_info["chart_title"], "dataset": dataset }) charts_comments.append(comments) return charts, charts_comments
def Stock_Prices(): df = pd.DataFrame() statspath = path + '/_KeyStats' stock_list = [x[0] for x in os.walk(statspath)] stock_list = stock_list[1:550:25] print(stock_list[:20]) for each_dir in stock_list[1:]: try: ticker = each_dir.split('/')[-1] name = "WIKI/" + ticker.upper() data = quandl.Dataset(name) \ .data(params={'start_date': '2001-12-01', 'end_date': '2010-12-30'}) data = data.to_pandas() data[ticker.upper()] = data["Adj. Close"] df = pd.concat([df, data[ticker.upper()]], axis=1) except Exception as e: print(str(e)) time.sleep(10) print("df[:10] ", df[:10]) df.to_csv("stock_prices.csv")
def download_data_quandl(cls, stock_names, start_date, end_date): """ A function that requests stock information to the quandl API. You need to have a token for it, which is set in the __init__.py file. The retrieved data is persisted using the DataIngestionAPI. Parameters ---------- stock_names: list A list with the stock names you want information about start_date: str in YYYY-MM-DD format Beginning day from which you want data end_date: str in YYYY-MM-DD format Date until which you want data """ # We set the paths to persist the data data_filename = "data_quandl_" + start_date +"_to_" + end_date + ".joblib" full_data_path = (cls.save_dir / "../data/quandl" / data_filename).resolve() # We check if we already have the data persisted for the specified dates if not full_data_path.is_file(): quandl_stock_data = pd.DataFrame(columns=['Open', 'High', 'Low', 'Close', 'Volume', 'Ticker']) for name in stock_names: query_name = "WIKI/" + name try: data = quandl.Dataset(query_name).data(params={ 'start_date':start_date, 'end_date':end_date}).to_pandas() data = DataProcessing.clean_data(data) data['Ticker'] = name quandl_stock_data = pd.concat([quandl_stock_data, data], ignore_index=True) except: cls.persistenceApi.write_to_error_file(name, "quandl") # We persist the data PersistenceAPI.persist_stock_data(quandl_stock_data, full_data_path)
warehouse_location = abspath('spark-warehouse') #- hive database spark = SparkSession \ .builder.master("local") \ .appName("Python Spark SQL Hive integration example") \ .config("spark.sql.warehouse.dir", warehouse_location) \ .enableHiveSupport() \ .getOrCreate() # Enable Arrow-based columnar data transfers spark.conf.set("spark.sql.execution.arrow.enabled", "true") # Streaming Quandl & store in pandas quandl.ApiConfig.api_key = 'P6LZzSkdVN6zTXQDE6Pd' qGET = quandl.Dataset('NSE/OIL').data() toPD = qGET.to_pandas() # Generate a pandas DataFrame pdDF = pd.DataFrame(toPD) # Create a Spark DataFrame from a pandas DataFrame using Arrow base sparkDF = spark.createDataFrame(pdDF) # Rename the Column dataDF = (sparkDF.withColumnRenamed("Total Trade Quantity", "TotalTradeQuantity").withColumnRenamed( "Turnover (Lacs)", "TurnoverLacs")) # Create a Spark DataFrame Create Temp view dataDF.createOrReplaceTempView("DimNSEOIL")
def get_quandl_metadata(): quandl.ApiConfig.api_key = "G4L36mzXzxcKR5c_vyFr" metadata = quandl.Dataset("EOD").data().meta print(metadata) return
def PB_C(self,): # clear all outputs self.clearOutputs() #getting the inputs and exception handling if self.input_stockticker.text() is not '': stockticker = self.input_stockticker.text() else: self.output_rec.setText('Error: Please input a stockticker.') raise exit if self.input_forecastedGrowthRate.text() is not '': forecasted_growth_rate = float( self.input_forecastedGrowthRate.text()) else: forecasted_growth_rate = None if self.input_perpetualGrowthRate.text() is not '': perpetual_growth_rate = float( self.input_perpetualGrowthRate.text()) else: perpetual_growth_rate = None if self.input_quandl_API_key.text() is not '': quandl.ApiConfig.api_key = self.input_quandl_API_key.text() else: self.output_rec.setText('Error: Please input an Quandl API Key.'+ ' Go to https://www.quandl.com/') raise exit #test if quandl apikey is valid count = 0 while True: try: quandl.Dataset('WIKI/AAPL').data() break except: count += 1 if count >8: self.output_rec.setText('Error: Invalid Quandl API key. ' + 'Please check your API key '+ 'and try again') raise exit #test if stockticker is valid count = 0 while True: try: ignore = quandl.Dataset('WIKI/' + stockticker).data() break except: count += 1 if count >8: self.output_rec.setText('Error: Invalid stockticker.') raise exit #initialise Company object and store 'data' attribute [dict] company = Company(stockticker,quandl.ApiConfig.api_key) data = company.data # Calculations using Company data and dcffunctions #tax rate atr = d.avg_tax_rate(data['taxexp'],data['ebt']) #change in networking capital cwc = d.cwc(data['ybca'],data['yeca'],data['ybcl'],data['yecl']) #fcf valuation fcf = d.fcf(data['lastebit'], atr, data['dep'], data['amor'], cwc, data['capex']) #5 years free cash flow #outputs all five years as a tuple #(convert to list, then return individual years) fiveyearfcf = {} if forecasted_growth_rate is None: fiveyearfcf = d.five_year_fcf(fcf) else: fiveyearfcf = d.five_year_fcf(fcf,forecasted_growth_rate) #cost of equity re = d.re(data['rm'],data['rf'],data['beta']) #cost of debt rd = d.rd(data['intexp'],data['LTD']) #weighted average cost of capital wacc = d.wacc(data['marketcap'],data['lastD'],atr,re,rd) #terminal value - not part of ui, but included in calculation tv = 0 if perpetual_growth_rate is None: tv = d.tv(fiveyearfcf,wacc) else: tv = d.tv(fiveyearfcf,wacc, perpetual_growth_rate) #discounted cash flow for the current year dcf = d.dcf(fiveyearfcf,wacc,tv) #target price - not part of ui, but included in calculation target_price = d.target_price(dcf,data['lastD'],data['numshares']) #recommendation rec = d.recommendation( data['current_price'],data['stdev'],target_price) #generate the outputs #ebit - imported as a variable self.output_ebit.setText(d.convert(data['lastebit'])) self.output_tax_rate.setText(str(round(atr,4))) #depreciation and amortization - imported as a variable self.output_depr.setText(d.convert(data['dep'])) self.output_amor.setText(d.convert(data['amor'])) #capital expenditure - imported as a variable self.output_capex.setText(d.convert(data['capex'])) self.output_cwc.setText(d.convert(cwc)) self.output_fcf.setText(d.convert(fcf)) self.output_y0.setText(d.convert(fiveyearfcf[0])) self.output_y1.setText(d.convert(fiveyearfcf[1])) self.output_y2.setText(d.convert(fiveyearfcf[2])) self.output_y3.setText(d.convert(fiveyearfcf[3])) self.output_y4.setText(d.convert(fiveyearfcf[4])) self.output_y5.setText(d.convert(fiveyearfcf[5])) #market capitalization - imported as a variable self.output_marketcap.setText(d.convert(data['marketcap'])) #beta - imported as a variable self.output_beta.setText(str(data['beta'])) #expected market returns - imported as a variable self.output_emr.setText(str(round(data['rm'],4))) self.output_coe.setText(str(round(re,4))) #total long term debt - imported as a variable self.output_ltd.setText(d.convert(data['lastD'])) self.output_cod.setText(str(round(rd,4))) self.output_wacc.setText(str(round(wacc,4))) self.output_dcf.setText(d.convert(dcf)) self.output_rec.setText(rec) #insertPlainText
import quandl from pprint import pprint quandl.ApiConfig.api_key = "tRqZ1bqsD-mcjEqaLCGR" config = {"start_date": "2019-06-28", "end_date": "2019-10-28"} stocks = quandl.Dataset('EOD/TSLA').data(params=config) def find_max(collection): for index, item in enumerate(collection, start = 0): if index == 0: high_value = item if item > high_value: high_value = item return high_value #create a function that creates stocklist for us #function should allow us to specify what attribute list def pluck(collection, attr): result = [] for item in collection: val = getattr(item, attr) result.append(val) return result def att_avg_ratio(collection, attr1, attr2): listuno = []
import quandl import matplotlib.pyplot as plt import numpy as np import numpy as np_data from io import StringIO from scipy.interpolate import interp1d from scipy.optimize import leastsq import numpy.polynomial.polynomial as poly quandl.ApiConfig.api_key ='RqpDsLqAEJyHa7iAADNQ' try: stockSymbol = input('Enter a quandl stock symbol: ') data = quandl.Dataset('WIKI/'+stockSymbol).data(params={ 'start_date':'2018-01-01', 'end_date':'2018-01-31'}) data_array= [] csv_data = data.to_csv() except Exception as e: print('please enter a valid symbol' + e) exit try: i=0 reader = csv.reader(csv_data.split('\n'), delimiter=',') for row in reader: #skip the row header as it is string not float if i>0 and row[4] : data_array.append(float(row[4]))
# ejemplo de regresion con datos de quandl para oracle # API KEY quandl -> oKwYeLxg3fyRASCagzVs import pandas as pd import quandl import matplotlib.pyplot as plt df = quandl.get("GOOG/NASDAQ_ORCL") data = quandl.Dataset('GOOG/NASDAQ_ORCL').data() prom_hist = 0 df = df[['High', 'Low', 'Open', 'Close']] df['Volat'] = (df['High'] - df['Close']) / df['Close'] * 100.0 df['Porc_Cambio'] = (df['Close'] - df['Open']) / df['Open'] * 100.0 df = df[['Close', 'Volat', 'Porc_Cambio']] print(df) print() dat = [] tot = 0 for e in data: v = float(e.close or 0) prom_hist += v # los valores que no se tengan se vuelven cero dat.append(v) if v != 0 else v tot += 1 if v != 0 else v prom_hist = prom_hist / tot
def get_ts_metadata(self): '''Make a metadata requestfor a specific dataset''' metadata = quandl.Dataset(self.datatable_code).data().meta return metadata
def data(self, dataset='AAPL', args={}): logging.info('Quandl Query for WIKI/%s with args %s' % (dataset, args)) # print('Quandl Query for: ','WIKI/'+dataset,' with args ', args) return quandl.Dataset('WIKI/' + dataset).data(params=args)