def predict_last_values(self, symbol, last=0): data = get_raw_data(symbol,False) data_normaliser = preprocessing.MinMaxScaler() data_normalised = data_normaliser.fit_transform(data) next_day_open_values = get_next_day_open_values(data) y_normaliser = preprocessing.MinMaxScaler() y_normaliser.fit(next_day_open_values) ohlcv_histories_normalised = get_ohlcv_histories_normalised(data_normalised, last) technical_indicators_normalised = get_technical_indicators(ohlcv_histories_normalised) return ohlcv_histories_normalised, technical_indicators_normalised, data_normaliser, y_normaliser
def load_data(): dfraw = util.get_raw_data() # dead-time correction - N = Nm/(1-Nm*tau/T) T = 60.0 # 60 second counting interval tau = 1/250e3 # upper limit estimate #tau = 2*tau dtc = 1 - dfraw.lld * tau / T dfraw['lld_obs'] = dfraw.lld.copy() dfraw.lld = np.round(dfraw.lld / dtc) print('dead time correction, maximum:', ((1/dtc).max() - 1) * 100, 'percent') return dfraw
def api_ipo_funded_detail(self, query_UUID, query_key=User_key): ''' get info from crunchbase input argument: query_para: updated_since: When provided, restricts the result set to Organizations where updated_at >= the passed value sort_order: The sort order of the collection. Options are "created_at ASC", "created_at DESC", "updated_at ASC", and "updated_at DESC" page: Page number of the results to retrieve. return variables: flag indicating the current process 0->fail 1->success df_property information about organization ''' # get the raw data payload = {'user_key': query_key} flag, raw_json = util.get_raw_data(self.api_url + "/" + query_UUID, payload) if flag == 0: return 0, "" else: property_info, relation_ship = util.detail_info(raw_json) # add the relationship (people) connect to people part use uuid funded_company = relation_ship['item'] IPO_funded_dict = { 'ipo_id': query_UUID, 'stock_exchange_symbol': property_info['stock_exchange_symbol'], 'stock_symbol': property_info['stock_symbol'], 'money_raised_usd': property_info['money_raised_usd'], 'funded_company_id': funded_company['uuid'], 'funded_company_permalink': funded_company['properties']['permalink'], } df_IPO_funded = pd.DataFrame(IPO_funded_dict) return 1, df_IPO_funded
def api_acquisition_relation_detail(self, query_UUID, query_key=User_key): ''' get acquisition info from crunchbase input argument: query_para: updated_since: When provided, restricts the result set to Organizations where updated_at >= the passed value sort_order: The sort order of the collection. Options are "created_at ASC", "created_at DESC", "updated_at ASC", and "updated_at DESC" page: Page number of the results to retrieve. return variables: flag indicating the current process 0->fail 1->success df_property information about organization ''' # get the raw data payload = {'user_key': query_key} flag, raw_json = util.get_raw_data(self.api_url + "/" + query_UUID, payload) if flag == 0: return 0, "" else: property_info, relation_ship = util.detail_info(raw_json) acquisition_relation_data = {} acquiree_data = relation_ship['acquiree'] acquirer_data = relation_ship['acquirer'] acquisition_dict = { 'acquisition_id': query_UUID, 'acquiree_id': acquiree_data['item']['uuid'], 'acquiree_permalink': acquiree_data['item']['permalink'], 'acquirer_id': acquirer_data['item']['uuid'], 'acquirer_permalink': acquirer_data['item']['permalink'], } df_acquisition = pd.DataFrame(acquisition_dict) return 1, df_acquisition
def api_acquisition_summary(self, query_para, flag_status): ''' get summarized info of acquisition from crunchbase input argument: query_para: updated_since: When provided, restricts the result set to Organizations where updated_at >= the passed value sort_order: The sort order of the collection. Options are "created_at ASC", "created_at DESC", "updated_at ASC", and "updated_at DESC" page: Page number of the results to retrieve. return variables: flag indicating the current process 0-> fail 1-> success page_info indicating the page information we need in the next run df_temp dataframe we collect ''' # get the raw data # start_url = self.api_prefix + API_ENDPOINT['ipo'] query_para["user_key"] = User_key key_word = "properties" flag, raw_json = util.get_raw_data(self.api_url, query_para) if flag == 0: return 0, "", "" else: page_info, data_info = util.meta_info(raw_json, flag_status) df_temp = pd.DataFrame(data_info) df_temp = pd.concat([ df_temp.drop([key_word], axis=1), df_temp[key_word].apply( pd.Series) ], axis=1) df_temp = df_temp[acquisition_summary_col] if page_info["next_page_url"]: self.api_url = page_info["next_page_url"] return 1, page_info, df_temp
def api_investment_detail(self, query_UUID, query_key=User_key): ''' get info of funding round and investment from crunchbase input argument: query_para: updated_since: When provided, restricts the result set to Organizations where updated_at >= the passed value sort_order: The sort order of the collection. Options are "created_at ASC", "created_at DESC", "updated_at ASC", and "updated_at DESC" page: Page number of the results to retrieve. return variables: flag indicating the current process 0->fail 1->success df_property information about organization ''' # get the raw data payload = {'user_key': query_key} flag, raw_json = util.get_raw_data(self.api_url + "/" + query_UUID, payload) if flag == 0: return 0, "", "" else: items_info = util.relation_info(raw_json) investment_data_list = [] for ele_item in items_info: investment_temp = {} property_info = ele_item['properties'] relationship_info = ele_item['relationships'] ## investment part investment_dict = { 'funding_round_id': query_UUID, 'investment_id': ele_item['uuid'], 'money_invested': property_info['money_invested'], 'money_invested_currency_code': property_info['money_invested_currency_code'], 'money_invested_usd': property_info['money_invested_usd'], 'is_lead_investor': property_info['is_lead_investor'], 'announced_on': property_info['announced_on'], } ## related person and funded organization investor_info = relationship_info["investors"] investor_property = investor_info['properties'] investor_dict = { "investor_id": investor_info['uuid'], 'investor_permalink': investor_info['permalink'], } firm_info = relationship_info['invested_in'] funded_firm_dict = { 'target_id': firm_info['uuid'], 'target_permalink': firm_info['properties']['permalink'] } investment_temp.update(investment_dict) investment_temp.update(investor_dict) investment_temp.update(funded_firm_dict) investment_data_list.append(investment_temp) df_investment = pd.DataFrame(investment_data_list) return 1, df_investment
def api_degree_jobs_detail(self, query_UUID, query_key=User_key): ''' get info of people from crunchbase people degree info people jobs info input argument: query_para: updated_since: When provided, restricts the result set to Organizations where updated_at >= the passed value sort_order: The sort order of the collection. Options are "created_at ASC", "created_at DESC", "updated_at ASC", and "updated_at DESC" page: Page number of the results to retrieve. return variables: flag indicating the current process 0->fail 1->success flag indicating the current process 0->fail 1->success df_property information about organization ''' # get the raw data payload = {'user_key': query_key} flag, raw_json = util.get_raw_data(self.api_url + "/" + query_UUID, payload) if flag == 0: return 0, "", "" else: property_info, relation_ship = util.detail_info(raw_json) ## people extra people_dict = { 'uuid': query_UUID, 'born_on': property_info['born_on'], 'rank': property_info['rank'], } people_df = pd.DataFrame(people_dict) ## degree degree_df_temp = { 'people_id': "", ## school info 'degree_type': "", 'subject': "", 'institution': "", 'graduated_at': "", 'started_at': "", } degree_df_temp_list = [] degree_list = relation_ship['degrees']['items'] if len(degree_list) > 0: for ele in degree_list: degree_df_temp['people_id'] = query_UUID for key1, key2 in degree_dict.items(): degree_df_temp[key1] = ele[key2] ## institution temp_relation = ele['relationships']['school'][ 'properties'] degree_df_temp['institution'] = temp_relation['name'] degree_df_temp_list.append(degree_df_temp) degree_df = pd.DataFrame(degree_df_temp_list) ## jobs jobs_df_temp_list = [] jobs_list = relation_ship['jobs']['items'] if len(jobs_list) > 0: for ele in jobs_list: jobs_df_temp = {} jobs_df_temp['people_id'] = query_UUID jobs_df_temp['title'] = ele['properties']['title'] jobs_df_temp['started_on'] = ele['properties'][ 'started_on'] jobs_df_temp['ended_on'] = ele['properties']['ended_on'] jobs_df_temp['is_current'] = ele['properties'][ 'is_current'] jobs_df_temp['job_type'] = ele['properties']['job_type'] jobs_df_temp['affiliation'] = ele['relationships']['name'] jobs_df_temp_list = jobs_df_temp jobs_df = pd.DataFrame(jobs_df_temp_list) return 1, people_df, degree_df, jobs_df
def api_organization_detail(self, query_UUID, query_key=User_key): ''' get info from crunchbase input argument: query_UUID: The permalink of the organization or the UUID of the organization query_key: provide the user key return variables: flag indicating the current process 0->fail 1->success df_property information about organization ''' ### get the raw data payload = {'user_key': query_key} flag, raw_json = util.get_raw_data(self.api_url + "/" + query_UUID, payload) if flag == 0: return 0, "", "" else: property_info, relation_ship = util.detail_info(raw_json) key_word = "properties" df_property = pd.DataFrame(property_info) df_property = df_property[organization_property_col1] df_temp = pd.concat([ df_temp.drop([key_word], axis=1), df_temp[key_word].apply( pd.Series) ], axis=1) df_temp = df_temp[organization_property_col1] df_relation = pd.DataFrame(relation_ship) ### founding round if relation_ship['funding_rounds']['paging']['total_items'] > 0: df_property['num_founding_round'] = int( relation_ship['funding_rounds']['paging']['total_items']) funding_info = relation_ship['funding_rounds']['items'] df_property['current_funding_type'] = funding_info['funding_type'] df_property['current_funding_series'] = funding_info['series'] ### operating status if relation_ship['acquired_by']['paging'][ 'total_items'] == 0 and df_property['closed_on']: df_property['status'] = "operating" elif relation_ship['acquired_by']['paging']['total_items'] > 0: df_property['status'] = "acquired" if df_property['closed_on'] == False: df_property['status'] = "closed" ### category if relation_ship['categories']['paging']['total_items'] > 0: category_list = relation_ship['categories']['items'] cat_info_list = [] cat_groups_list = [] for ele in category_list: cat_info_list.append(ele['properties']['name']) cat_groups_list.append( ele['properties']['category_groups']) return 1, df_property
def main(): #df = data[['Date','Settle', 'Volume']] data = get_raw_data() df = data window_sma = [5, 10, 15, 20, 50, 100, 200] window_ema = [10, 12, 20, 26, 50, 100, 200] price_val = np.array(df['average']) time_val = np.array(df['date']) daily_return = create_class(price_val) sma_map = {} ema_map = {} mom_map = {} sma_cross_map = {} ema_cross_map = {} up_down_map = {} for k, l in zip(window_sma, window_ema): sma_map["SMA" + str(k)] = create_sma(price_val, k) sma_map["SMA" + str(l)] = create_sma(price_val, l) ema_map["EMA" + str(l)] = create_ema(price_val, sma_map["SMA" + str(l)], l) mom_map["MOM" + str(k)] = create_mom(price_val, k) sma_cross_map["SMA_CROSS" + str(k)] = create_ma_cross( sma_map["SMA" + str(k)], price_val) ema_cross_map["EMA_CROSS" + str(l)] = create_ma_cross( ema_map["EMA" + str(l)], price_val) up_down_map["Up-Down" + str(k)] = create_up_down(price_val, l) macd_val = create_macd(price_val) macd_cross = create_macd_cross(macd_val) day_since_cross_map = {} for m, l in zip(sma_cross_map.keys(), ema_cross_map.keys()): day_since_cross_map["Day_Since_" + str(m)] = create_day_since_cross( sma_cross_map[m]) day_since_cross_map["Day_Since_" + str(l)] = create_day_since_cross( ema_cross_map[l]) raw_data = { 'Date': time_val, 'Price': price_val, 'Minute': np.array(df['minute']), 'Class': daily_return, 'Volume': np.array(df['volume']), 'SMA5': sma_map["SMA5"], 'SMA10': sma_map["SMA10"], 'SMA15': sma_map["SMA15"], 'SMA20': sma_map["SMA20"], 'SMA50': sma_map["SMA50"], 'SMA100': sma_map["SMA100"], 'SMA200': sma_map["SMA200"], 'EMA10': ema_map["EMA10"], 'EMA12': ema_map["EMA12"], 'EMA20': ema_map["EMA20"], 'EMA26': ema_map["EMA26"], 'EMA50': ema_map["EMA50"], 'EMA100': ema_map["EMA100"], 'EMA200': ema_map["EMA200"], 'MACD': macd_val, 'MACD_Cross': macd_cross, 'SMA5Cross': sma_cross_map["SMA_CROSS5"], 'SMA10Cross': sma_cross_map["SMA_CROSS10"], 'SMA15Cross': sma_cross_map["SMA_CROSS15"], 'SMA20Cross': sma_cross_map["SMA_CROSS20"], 'SMA50Cross': sma_cross_map["SMA_CROSS50"], 'SMA100Cross': sma_cross_map["SMA_CROSS100"], 'EMA12Cross': ema_cross_map["EMA_CROSS12"], 'EMA10Cross': ema_cross_map["EMA_CROSS10"], 'EMA20Cross': ema_cross_map["EMA_CROSS20"], 'EMA26Cross': ema_cross_map["EMA_CROSS26"], 'EMA50Cross': ema_cross_map["EMA_CROSS50"], 'EMA100Cross': ema_cross_map["EMA_CROSS100"], 'SMA200Cross': sma_cross_map["SMA_CROSS200"], 'EMA200Cross': ema_cross_map["EMA_CROSS200"], 'Up-Down5': up_down_map["Up-Down5"], 'Up-Down10': up_down_map["Up-Down10"], 'Up-Down15': up_down_map["Up-Down15"], 'Up-Down20': up_down_map["Up-Down20"], 'Up-Down50': up_down_map["Up-Down50"], 'Up-Down100': up_down_map["Up-Down100"], 'Day_Since_SMA5Cross': day_since_cross_map["Day_Since_SMA_CROSS5"], 'Day_Since_SMA10Cross': day_since_cross_map["Day_Since_SMA_CROSS10"], 'Day_Since_SMA15Cross': day_since_cross_map["Day_Since_SMA_CROSS15"], 'Day_Since_SMA20Cross': day_since_cross_map["Day_Since_SMA_CROSS20"], 'Day_Since_SMA50Cross': day_since_cross_map["Day_Since_SMA_CROSS50"], 'Day_Since_SMA100Cross': day_since_cross_map["Day_Since_SMA_CROSS100"], 'Day_Since_EMA12Cross': day_since_cross_map["Day_Since_EMA_CROSS12"], 'Day_Since_EMA10Cross': day_since_cross_map["Day_Since_EMA_CROSS10"], 'Day_Since_EMA20Cross': day_since_cross_map["Day_Since_EMA_CROSS20"], 'Day_Since_EMA26Cross': day_since_cross_map["Day_Since_EMA_CROSS26"], 'Day_Since_EMA50Cross': day_since_cross_map["Day_Since_EMA_CROSS50"], 'Day_Since_EMA100Cross': day_since_cross_map["Day_Since_EMA_CROSS100"] } data = pd.DataFrame(raw_data) data[200:len(price_val)].to_csv("spy1min.csv")
import mne import pickle as pkl import numpy as np fs_dir = mne.datasets.fetch_fsaverage(verbose=True) subjects_dir = os.path.dirname(fs_dir) # The files live in: subject = 'fsaverage' trans = os.path.join(fs_dir, 'bem', 'fsaverage-trans.fif') src = os.path.join(fs_dir, 'bem', 'fsaverage-ico-5-src.fif') bem = os.path.join(fs_dir, 'bem', 'fsaverage-5120-5120-5120-bem-sol.fif') pth_res = 'assets/' pth = 'assets/raw_data/CL_KA_01.vhdr' raw, epochs, evoked = get_raw_data(pth) raw.set_eeg_reference(projection=True) # needed for inverse modeling res = 'low' # Source Model src = create_source_model(subject, subjects_dir, pth_res, res=res) # Forward Model fwd = mne.make_forward_solution(raw.info, trans=trans, src=src, bem=bem, eeg=True, mindist=5.0, n_jobs=-1) mne.write_forward_solution(pth_res + '\\{}-fwd.fif'.format(subject), fwd,