def read_from_rds(event, context): env = os.environ['env'] print('Setting environment to ' + env + '...') dr = 'ODBC Driver 17 for SQL Server' print('Getting parameters from parameter store...') param = '/lambda-https-request/' + env + '/read-from-rds-ds-param' ds = Functions.get_parameter(param, False) param = '/lambda-https-request/' + env + '/read-from-rds-un-param' un = Functions.get_parameter(param, False) param = '/lambda-https-request/' + env + '/read-from-rds-pw-param' pw = Functions.get_parameter(param, True) conn = Functions.sql_server_conn(dr, ds, un, pw) with conn: with conn.cursor() as cur: print('Building sql query...') sql = 'SELECT element FROM dbo.vwLocalAreaDistrictLookup FOR JSON AUTO ;' print('Attempting to read rows from sql query...') cur.execute(sql) result = cur.fetchall() for row in result: print('Building dictionary...') row_dict = json.loads((row[0])) print('Input json...' + row[0]) return row_dict
def run(self): Functions.print_result(self.set_current_place, self.destination_place, self.transport, self.duration_user, self.distance, 'Your') Functions.print_observation(self.duration_normal, self.duration_user, self.distance)
def set_suggestion(self): time.sleep(Functions.get_time_step()) try: elements = self.search_elements(Locators.SUGGESTIONS_PLACES) self.suggestion_place = elements[0].text elements[0].click() except: time.sleep(Functions.get_time_step()) raise RepeatLoopException( 'Problem to get the suggestions for this place!')
def get_trip_description(self): print(f'Getting the trip description ...') try: time.sleep(Functions.get_time_step()) web_element = self.search_element(Locators.TRIP_DESCRIPTION) return web_element except NoSuchElementException: time.sleep(Functions.get_time_step()) web_element = self.search_element(Locators.NO_POSSIBLE_TRIP) print(web_element.text) raise NoPossibleTripException
def is_timestamp(line): if Functions.is_included(Constants.ARROW, line) and \ TimeStamp.is_time(line.split(Constants.ARROW)[0]) and \ TimeStamp.is_time(line.split(Constants.ARROW)[1]): return True else: return False
def separate_timestamp(self): self.raw_subtitle = HandleFiles.read_lines_from_file(self.name_file) self.raw_subtitle = Functions.remove_of_list(self.raw_subtitle, Constants.NEWLINE, False) for line in self.raw_subtitle: if self.timestamp.is_timestamp(line): tuple_time = self.timestamp.pick_timestamp(line) self.timestamps.append(tuple_time)
def pass_pop_up(self): time.sleep(Functions.get_time_step()) try: self.driver.switch_to.frame(0) self.click(Locators.POPUP_BUTTON) self.driver.switch_to.default_content() except NoSuchFrameException: pass
def check_params(self): if self.is_all_params_filled(): self.args.time = Functions.get_string_to_hours(self.args.time) if self.args.time is None: raise ParametersFormatException(self.args.time) elif self.is_all_params_empty(): self.define_args() else: raise ParametersIncompletedException(self.args)
def get_stock_data(): # 遍历数据文件夹中所有股票文件的文件名,得到股票代码列表 stock_code_list = Functions.get_stock_code_list() all_stock = pd.DataFrame() for code in stock_code_list: # 此处为股票数据文件的本地路径,请自行修改 stock_data = Functions.import_stock_data(code, columns=['股票代码', '交易日期', '开盘价', '收盘价', '涨跌幅']) stock_data = stock_data.sort_values(by='交易日期') stock_data.reset_index(drop=True, inplace=True) # 计算复权价 stock_data[['开盘价', '收盘价']] = Functions.cal_right_price(stock_data, right_type='后复权', price_columns=['开盘价', '收盘价']) # 判断每天开盘是否涨停 stock_data.loc[stock_data['开盘价'] > stock_data['收盘价'].shift(1) * 1.097, '涨停'] = 1 stock_data['涨停'].fillna(0, inplace=True) all_stock = all_stock.append(stock_data, ignore_index=True) return all_stock[['股票代码', '交易日期', '涨跌幅', '涨停']]
def run(self): self.load_maps() self.set_language(Locators.ENGLISH_LANGUAGE) self.set_navigational() self.set_transport(Locators.TRANSPORT) self.send_place(Locators.BOX_CURRENT_PLACE, self.current_place_name) self.set_suggestion() self.suggestion_place_current = self.get_suggestion() self.send_place(Locators.BOX_DESTINATION_PLACE, self.destination_place_name) self.set_suggestion() self.suggestion_place_destination = self.get_suggestion() web_element = self.get_trip_description() self.duration = Functions.get_duration_hours(web_element) self.distance = Functions.get_distance_km(web_element) Functions.print_result(self.suggestion_place_current, self.suggestion_place_destination, self.transport, self.duration, self.distance, 'Normal')
def get_pd_from_path(path, max_steps, compare_same=False, function_name=None): df = pd.read_csv(path, sep='\t') if max_steps: df = df[df['Step'] < max_steps] df = df.rename(columns={'Name': 'Algorithm'}) df = df.astype({'Iteration': 'int32', 'Step': 'int32', 'Best value': 'float32'}) iteration_count = df['Iteration'].unique().shape[0] step_count = int(df['Step'].max()) # df = df.groupby(['Algorithm', 'Step'])['Best value'].agg(['min', 'mean', 'max']).reset_index() df = df.groupby(['Algorithm', 'Step'])['Best value'].agg(['mean', 'std']).reset_index() df['min'] = df['mean'] - df['std'] df['min'] = df['min'].where(df['min'] > 0, 0) df['max'] = df['mean'] + df['std'] df['Filename'] = path.split('/')[-1] if compare_same and function_name: algorithm = df['Algorithm'].iloc[0] space = get_opt_space(Algorithms(algorithm), Functions(function_name.upper())) if type(space) == tuple: space, _ = space hyper_parameters = [parameter[2:].replace('-', '_') for parameter in space.keys()] df['Hyperparameters'] = ", ".join(hyper_parameters) json_path = path.replace('.csv', '.json') algorithm_append = '' with open(json_path, 'r') as f: data = json.load(f) parameters_values = [f"{data[parameter]:.3f}" if type(data[parameter]) == float else str(data[parameter]) for parameter in hyper_parameters] algorithm_append += ' (' + '/'.join(parameters_values) + ')' df['Algorithm'] = df['Algorithm'] + algorithm_append return df, iteration_count, step_count
def define_args(self): self.args.current = Functions.get_valid_arg('place', 'Define current place') self.args.destination = Functions.get_valid_arg('place', 'Define destination place') self.args.time = Functions.get_valid_arg('duration', 'Define your allocated time (ex. 1 hr 10 min, 2.5 hr, 45 min)')
def momentum_and_contrarian(all_stock, start_date, end_date, window=3): """ :param all_stock: 所有股票的数据集 :param start_date: 起始日期(包含排名期) :param end_date: 结束日期 :param window: 排名期的月份数,默认为3个月 :return: 返回动量策略和反转策略的收益率和资金曲线 """ # 取出指数数据作为交易天数的参考标准, 此处为指数数据文件的本地路径,请自行修改 index_data = Functions.import_index_data('sh000001') index_data.set_index('交易日期', inplace=True) index_data.sort_index(inplace=True) index_data = index_data[start_date:end_date] # 转换成月度数据 by_month = index_data[['收盘价']].resample('M').last() by_month.reset_index(inplace=True) momentum_portfolio_all = pd.DataFrame() contrarian_portfolio_all = pd.DataFrame() for i in range(window, len(by_month) - 1): start_month = by_month['交易日期'].iloc[i - window] # 排名期第一个月 end_month = by_month['交易日期'].iloc[i] # 排名期最后一个月 # 取出在排名期内的数据 stock_temp = all_stock[(all_stock['交易日期'] > start_month) & (all_stock['交易日期'] <= end_month)] # 将指数在这段时间的数据取出作为交易日天数的标准 index_temp = index_data[start_month:end_month] # 统计每只股票在排名期的交易日天数 trading_days = stock_temp['股票代码'].value_counts() # 剔除在排名期内累计停牌超过(5*月数)天的股票,即如果排名期为3个月,就剔除累计停牌超过15天的股票 keep_list = trading_days[trading_days >= (len(index_temp) - 5 * window)].index stock_temp = stock_temp[stock_temp['股票代码'].isin(keep_list)] # 计算每只股票在排名期的累计收益率 grouped = stock_temp.groupby('股票代码')['涨跌幅'].agg(rtn=lambda x: (x + 1).prod() - 1) # 将累计收益率排序 grouped.sort_values(by='rtn', inplace=True) # 取排序后前5%的股票构造反转策略的组合,后5%的股票构造动量策略的组合 num = floor(len(grouped) * 0.05) momentum_code_list = grouped.index[-num:] # 动量组合的股票代码列表 contrarian_code_list = grouped.index[0:num] # 反转组合的股票代码列表 # ============================动量组合============================ # 取出动量组合内股票当月的数据 momentum = all_stock[(all_stock['股票代码'].isin(momentum_code_list)) & (all_stock['交易日期'] > end_month) & (all_stock['交易日期'] <= by_month['交易日期'].iloc[i + 1])] # 剔除动量组合里在当月第一个交易日涨停的股票 temp = momentum.groupby('股票代码')['涨停'].first() hold_list = temp[temp == 0].index momentum = momentum[momentum['股票代码'].isin(hold_list)].reset_index(drop=True) # 动量组合 momentum_portfolio = momentum.pivot('交易日期', '股票代码', '涨跌幅').fillna(0) # 计算动量组合的收益率 num = momentum_portfolio.shape[1] weights = num * [1. / num] momentum_portfolio['pf_rtn'] = np.dot(np.array(momentum_portfolio), np.array(weights)) momentum_portfolio.reset_index(inplace=True) # 将每个月的动量组合收益数据合并 momentum_portfolio_all = momentum_portfolio_all.append(momentum_portfolio[['交易日期', 'pf_rtn']], ignore_index=True) # 计算动量策略的资金曲线 momentum_portfolio_all['资金曲线'] = (1 + momentum_portfolio_all['pf_rtn']).cumprod() # ============================反转组合============================= # 取出反转组合内股票当月的数据 contrarian = all_stock[(all_stock['股票代码'].isin(contrarian_code_list)) & (all_stock['交易日期'] > end_month) & (all_stock['交易日期'] <= by_month['交易日期'].iloc[i + 1])] # 剔除反转组合里在当月第一个交易日涨停的股票 temp = contrarian.groupby('股票代码')['涨停'].first() hold_list = temp[temp == 0].index contrarian = contrarian[contrarian['股票代码'].isin(hold_list)].reset_index(drop=True) # 反转组合 contrarian_portfolio = contrarian.pivot('交易日期', '股票代码', '涨跌幅').fillna(0) # 计算反转组合的收益率 num = contrarian_portfolio.shape[1] weights = num * [1. / num] contrarian_portfolio['pf_rtn'] = np.dot(np.array(contrarian_portfolio), np.array(weights)) contrarian_portfolio.reset_index(inplace=True) # 将每个月的反转组合收益合并 contrarian_portfolio_all = contrarian_portfolio_all.append(contrarian_portfolio[['交易日期', 'pf_rtn']], ignore_index=True) # 计算反转策略的资金曲线 contrarian_portfolio_all['资金曲线'] = (1 + contrarian_portfolio_all['pf_rtn']).cumprod() return momentum_portfolio_all, contrarian_portfolio_all
def set_transport(self, locator): print(f'Defining transport: {self.transport} ...') self.click(locator) time.sleep(Functions.get_time_step())
return_line = list(m['pf_rtn']) print('\n=====================动量策略主要回测指标=====================') print("平均年化收益: %.2f" % annual_return(date_line, capital_line)) print("最大回撤: %.2f, 开始时间: %s, 结束时间: %s" % max_drawdown(date_line, capital_line)) print("夏普比例: %.2f" % sharpe_ratio(date_line, capital_line, return_line)) date_line = list(c['交易日期']) capital_line = list(c['资金曲线']) return_line = list(c['pf_rtn']) print('\n=====================反转策略主要回测指标=====================') print("平均年化收益: %.2f" % annual_return(date_line, capital_line)) print("最大回撤: %.2f, 开始时间: %s, 结束时间: %s" % max_drawdown(date_line, capital_line)) print("夏普比例: %.2f" % sharpe_ratio(date_line, capital_line, return_line)) # 同期大盘的相关指标 index_data = Functions.import_index_data('sh000001') index_data.sort_values(by='交易日期', inplace=True) index_data = index_data[index_data['交易日期'].isin(date_line)] capital_line = list(index_data['收盘价']) return_line = list(index_data['涨跌幅']) print('\n=====================同期上证指数主要回测指标=====================') print("平均年化收益: %.2f" % annual_return(date_line, capital_line)) print("最大回撤: %.2f, 开始时间: %s, 结束时间: %s" % max_drawdown(date_line, capital_line)) print("夏普比例: %.2f" % sharpe_ratio(date_line, capital_line, return_line)) plt.figure(figsize=(14, 7)) m.set_index('交易日期', inplace=True) c.set_index('交易日期', inplace=True) index_data['大盘收益'] = (index_data['涨跌幅'] + 1).cumprod() - 1 index_data.set_index('交易日期', inplace=True)
import datetime # Config Parsing config = SafeConfigParser() config.read('config.ini') # current working directory here = os.path.dirname(__file__) # logger logging.basicConfig(stream=sys.stderr, level=logging.DEBUG) logging.debug("logging started") logger = logging.getLogger(__name__) app = application = Bottle() functions = Functions() #/ping @route('/ping', method='GET') def ping(): return 'pong@%d' % (int(time.time())) #/started?taskname=task_name&time=12312131&host=172.16 @route('/started', method='GET') def startSignal(): task = request.query.get('taskname') start_time = int(request.query.get('time')) if None in [task, start_time]: return HTTPResponse(status=400,
微信:xingbx007 """ from common import Functions from common import config import pandas as pd import matplotlib.pyplot as plt # from kdj import Functions plt.rcParams['font.family'] = ['SimHei'] pd.set_option('expand_frame_repr', False) # 当列太多时不换行 # 得到所有股票的列表 code_list = Functions.get_stock_code_list_in_one_dir(config.stock_data_path) # code_list = code_list[:10] # 遍历所有股票 output = pd.DataFrame() for code in code_list: # 导入数据 columns_list = [ '交易日期', '股票代码', '开盘价', '最高价', '最低价', '收盘价', '涨跌幅', '成交额', '成交额' ] data = Functions.import_stock_data(code, columns=columns_list) # 计算后复权价 data[['开盘价', '最高价', '最低价', '收盘价']] = Functions.cal_fuquan_price(data) data = data.sort_values(by=['交易日期'], ascending=True)
# coding: utf-8 # 导入需要的库 import mplfinance as mpf from common import Functions stock_data = Functions.import_stock_data('sh600000') rename_map = { "交易日期": "Date", "开盘价": "Open", "收盘价": "Close", "最高价": "High", "最低价": "Low", "成交量": "Volume", } stock_data = stock_data.rename(columns=rename_map) stock_data = stock_data[list(rename_map.values())] stock_data = stock_data[stock_data['Date'] >= '2020-01-10'] stock_data.set_index('Date', inplace=True) print(stock_data) """ up: 设置上涨K线的颜色 down: 设置下跌K线的颜色 edge=inherit: K线图边缘和主题颜色保持一致 volume=in: 成交量bar的颜色继承K线颜色 wick=in: 上下引线颜色继承K线颜色 """ mc = mpf.make_marketcolors(up='r',
def search_elements(self, by_locator): time.sleep(Functions.get_time_step()) return self.driver.find_elements(by_locator[0],by_locator[1])
def invoke_snowflake_load_from_s3_event(event, context): env = os.environ.get('env') if env is None: env = 'dev' print('Setting environment to ' + env + '...') print('Getting parameters from parameter store...') # Snowflake connection parameters param = '/snowflake/' + env + '/ac-param' ac = Functions.get_parameter(param, False) param = '/snowflake/' + env + '/un-param' un = Functions.get_parameter(param, False) param = '/snowflake/' + env + '/pw-param' pw = Functions.get_parameter(param, True) # Snowflake data load parameters param = '/snowflake/' + env + '/role-param' role = Functions.get_parameter(param, True) param = '/snowflake/' + env + '/db-param' db = Functions.get_parameter(param, True) param = '/snowflake/' + env + '/schema-param' schema = Functions.get_parameter(param, True) param = '/snowflake/' + env + '/wh-param' wh = Functions.get_parameter(param, True) param = '/snowflake/' + env + '/file-format-param' file_format = Functions.get_parameter(param, True) # connect to snowflake data warehouse try: conn = sf.connect( account=ac, user=un, password=pw, role=role, warehouse=wh, database=db, schema=schema, ocsp_response_cache_filename="/tmp/ocsp_response_cache" ) print(str(conn)) print('Snowflake connection opened...') except Exception as e: print(e) try: sql = 'USE ROLE {}'.format(role) Functions.execute_query(conn, sql) sql = 'SELECT current_role()' print('role: ' + Functions.return_query(conn, sql)) sql = 'SELECT current_warehouse()' print('warehouse: ' + Functions.return_query(conn, sql)) try: sql = 'ALTER WAREHOUSE {} RESUME'.format(wh) Functions.execute_query(conn, sql) except Exception as e: print(e) sql = 'SELECT current_schema()' print('schema: ' + Functions.return_query(conn, sql)) sql = 'SELECT current_database()' print('database: ' + Functions.return_query(conn, sql)) # fs = s3fs.S3FileSystem(anon=False) # fs.mkdir("tfgm-wallingtonp") # fs.touch("tfgm-wallingtonp/test.txt") # fs.ls("tfgm-wallingtonp/") # get the object that triggered lambda # https://docs.aws.amazon.com/AmazonS3/latest/dev/notification-content-structure.html try: bucket = event['Records'][0]['s3']['bucket']['name'] arn = event['Records'][0]['s3']['bucket']['arn'] for record in event['Records']: key = record['s3']['object']['key'] size = record['s3']['object']['size'] print( 'bucket: ' + bucket + '\narn: ' + arn + '\nkey: ' + key + '\nsize: ' + str(size) ) except Exception as e: print(e) try: sql = 'TRUNCATE ' + schema + '.OutputAreaJson' print(sql) Functions.execute_query(conn, sql) sql = "copy into " + schema + ".OutputAreaJson from @" + str.replace(bucket, "-", "_") + "/" + key + \ " FILE_FORMAT = '" + file_format + "' ON_ERROR = 'ABORT_STATEMENT';" print(sql) Functions.execute_query(conn, sql) except Exception as e: print(e) # sql = 'SELECT current_version()' # with conn: # with conn.cursor() as cursor: # cursor.execute(sql) # result = cursor.fetchone() # print(result) # print('Got here2') # for c in cursor: # print(c) # except Exception as e: # print(e) # finally: # cursor.close() # print(one_row[0]) # sql = 'select * from "TFGMDW"."STG"."OUTPUTAREAJSON"' # with conn: # with conn.cursor() as cur: # cur.execute(sql) # for c in cur: # print(c) # one_row = cur.fetchone() # print(one_row[0]) except Exception as e: print(e) finally: conn.close() print('Snowflake connection closed...')
def invoke_snowflake_load_from_cloudwatch_event(event, context): env = os.environ.get('env') if env is None: env = 'dev' print('Setting environment to ' + env + '...') print('Getting parameters from parameter store...') # Snowflake connection parameters param = '/snowflake/' + env + '/ac-param' ac = Functions.get_parameter(param, False) param = '/snowflake/' + env + '/un-param' un = Functions.get_parameter(param, False) param = '/snowflake/' + env + '/pw-param' pw = Functions.get_parameter(param, True) # Snowflake data load parameters param = '/snowflake/' + env + '/role-param' role = Functions.get_parameter(param, True) param = '/snowflake/' + env + '/db-param' db = Functions.get_parameter(param, True) param = '/snowflake/' + env + '/schema-param' schema = Functions.get_parameter(param, True) param = '/snowflake/' + env + '/wh-param' wh = Functions.get_parameter(param, True) param = '/snowflake/' + env + '/file-format-param' file_format = Functions.get_parameter(param, True) # connect to snowflake data warehouse try: conn = sf.connect( account=ac, user=un, password=pw, role=role, warehouse=wh, database=db, schema=schema, ocsp_response_cache_filename="/tmp/ocsp_response_cache" ) print('Snowflake connection opened...') except Exception as e: print(e) try: print('Got here') try: sql = 'ALTER WAREHOUSE {} RESUME'.format(wh) print(sql) with conn: with conn.cursor() as cursor: cursor.execute(sql) except Exception as e: print(e) print('Got here2') # get the object that triggered cloudwatch # https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/EventTypes.html#events-for-services-not-listed try: bucket = event['detail']['requestParameters']['bucketName'] key = event['detail']['requestParameters']['key'] print( 'bucket: ' + bucket + '\nkey: ' + key ) except Exception as e: print(e) print('Got here3') try: sql = 'TRUNCATE ' + schema + '.OutputAreaJson' print(sql) with conn: with conn.cursor() as cursor: cursor.execute(sql) except Exception as e: print(e) print('Got here4') try: sql = "copy into " + schema + ".OutputAreaJson from @" \ + str.replace(bucket, "-", "_") + "/" + key[key.rindex('/')+1:len(key)] + \ " FILE_FORMAT = '" + file_format + "' ON_ERROR = 'ABORT_STATEMENT';" print(sql) with conn: with conn.cursor() as cursor: cursor.execute(sql) except Exception as e: print(e) print('Got here5') except Exception as e: print(e) finally: conn.close() print('Snowflake connection closed...') if __name__ == "__main__": # snowflake_validate({}, {}) json_event = "/var/task/event.json" with open(json_event) as response: _event = json.load(response) invoke_snowflake_load_from_cloudwatch_event(_event, '')
def get_ons_oa_http_request(event, context): env = os.environ['env'] print(f'Setting environment to {env}...') print('Getting parameters from parameter store...') param = '/lambda-https-request/' + env + '/s3-bucket-param' s3bucket = Functions.get_parameter(param, False) print(f'Parameter {param} value is: {s3bucket}') param = '/lambda-https-request/' + env + '/ons-oa-lookup-url-param' base_url = Functions.get_parameter(param, False) print(f'Parameter {param} value is: {base_url}') param = '/lambda-https-request/' + env + '/max-loops-param' max_loops = int(Functions.get_parameter(param, False)) print(f'Parameter {param} value is: {max_loops}') param = '/lambda-https-request/' + env + '/timeout-param' timeout = int(Functions.get_parameter(param, False)) print(f'Parameter {param} value is: {timeout}') param = '/lambda-https-request/' + env + '/result-record-count' result_record_count = int(Functions.get_parameter(param, False)) print(f'Parameter {param} value is: {result_record_count}') param = '/lambda-https-request/' + env + '/max-error-loops-param' max_error_loops = int(Functions.get_parameter(param, False)) print(f'Parameter {param} value is: {max_error_loops}') try: exceeded_transfer_limit = True counter = 1 error_counter = 1 offset = 0 url_result_record_count = 'resultRecordCount=' + str(result_record_count) event_list = json.loads(json.dumps(event, indent=4)) attribute = event_list['attribute'] print(f'Lookup attribute is: {attribute}') while exceeded_transfer_limit: curr_datetime = datetime.now() curr_datetime_str = curr_datetime.strftime('%Y%m%d_%H%M%S%f') filekey = attribute + '_' + curr_datetime_str + '_' + str(counter) + '.json' print('Building URL...') url_offset = 'resultOffset=' + str(offset) urls = [base_url, url_offset, url_result_record_count] join_urls = '&'.join(urls) url = join_urls.replace('<attribute>', attribute) print(f'URL: {url} built...') while error_counter < max_error_loops: print('Attempting to load api (' + str(error_counter) + ') of (' + str(max_error_loops) + ')...') data = Functions.load_api(filekey, timeout, url) if data.get('error'): print(f'API returned error message: {data}') else: break error_counter += 1 if error_counter == max_error_loops: print('You have reached the maximum number of error loops (' + str(max_error_loops) + ')') break if data.get('exceededTransferLimit'): exceeded_transfer_limit = json.dumps(data['exceededTransferLimit']) else: exceeded_transfer_limit = False offset = 0 if data.get('features'): number_of_features = len(data['features']) offset += number_of_features print(f'Json string for {attribute} loop {counter} contains {number_of_features} features') else: print('Json data does not contain features objects') break if counter == max_loops: print('You have reached the maximum number of loops (' + str(max_loops) + ')') break counter += 1 Functions.upload_file_to_s3(s3bucket, filekey, data) except Exception as e: print(e) return event_list['elements'] # #def snowflake_validate(event, context): # env = os.environ['env'] # print('Setting environment to ' + env + '...') # print('Getting parameters from parameter store...') # param = '/snowflake/' + env + '/ac-param' # ac = Functions.get_parameter(param, False) # param = '/snowflake/' + env + '/un-param' # un = Functions.get_parameter(param, False) # param = '/snowflake/' + env + '/pw-param' # pw = Functions.get_parameter(param, True) # # connect to snowflake data warehouse # conn = snowflake.connector.connect( # account=ac, # user=un, # password=pw # ) # sql = "SELECT current_version()" # with conn: # with conn.cursor() as cur: # cur.execute(sql) # one_row = cur.fetchone() # print(one_row[0])
def click(self, by_locator): WebDriverWait(self.driver, Data.TIMEOUT).until(EC.element_to_be_clickable(by_locator)).click() time.sleep(Functions.get_time_step())
from common import Functions from exceptions import NoPossibleTripException, RepeatLoopException, \ ParametersIncompletedException, ParametersFormatException, BrowserImplementationException from read_parameters import ReadParameters from start_driver import StartDriver, Browser from maps import Maps from user import User from selenium.common.exceptions import NoSuchElementException, WebDriverException, TimeoutException #------------------------------------------------------------------------------------------- #------------------------------------------Main--------------------------------------------- #------------------------------------------------------------------------------------------- if __name__ == '__main__': Functions.credits() try: parser = ReadParameters() args = parser.get_params() start_driver = StartDriver(Browser.CHROME) b_repeat = True while b_repeat: try: maps = Maps(start_driver.driver, args.current, args.destination) maps.run() user = User(maps.get_current_place(),