def __init__(self, exc: DatabaseError, ora_ex: Ora_Error, conn_label: str) -> None: DatabaseError.__init__(self, exc.statement, exc.params, exc.connection_invalidated) message = '%s <%s>' args = [ora_ex, conn_label] if exc.statement and ora_ex.offset: stmt_rest = exc.statement[ora_ex.offset:ora_ex.offset + 120] message += '\nat: %s' args += [stmt_rest] local_conn_prob = (ora_ex.code in self.tunnel_hint_codes and 'localhost' in conn_label) if local_conn_prob: message += '\nhint: ssh tunnel down?' message += '\nin: %s' args += [ora_ex.context] self.message = message self.args = tuple(args)
def campaign_summary(business_account_id, store_ids, date_param_cd): if date_param_cd not in [ r for r, in db.session.query( ParamsAppModel.param_name_cd.distinct()).all() ]: raise ValueError("Wrong date_param_cd") date_range = ParamsAppModel.get_date_range(business_account_id, date_param_cd) if date_range: lower_bound = date_range['lower_bound'] upper_bound = date_range['upper_bound'] prev_lower_bound = date_range['prev_lower_bound'] prev_upper_bound = date_range['prev_upper_bound'] report_date = date_range['report_date'] print(lower_bound) print(upper_bound) else: raise DatabaseError( "ParamAppModel returns no corresponding date range for business_account_id=%s and date param" % (business_account_id, date_param_cd)) # feedback_df = __feedback_tags_summary(date_range['lower_bound'], date_range['upper_bound'], business_account_id) # feedback_df1 = __feedback_till_date(business_account_id) # feedback_df2 = __campaign_till_date(business_account_id) logger.info('calling campaign summary protected method') campaign_json = __campaign_summary(lower_bound, upper_bound, business_account_id, store_ids) # 1st block- data param # get data from database (one / many cals - you will get one/many data frames # (optional) join and transform all data as necessary - may be 1+ df # (otional) where you will construct dict for complex json structure # - use to_json - manual dict construction (specific rules) # key_columns = ['business_account_id'] # comb_summary_df = pd.merge(feedback_df, feedback_df1, how='right', on=key_columns) # # j = ( # comb_summary_df.groupby(['business_account_id','active_campaign'], as_index=False) # .apply(lambda x: x[['channel', 'reach']].to_dict('r')) # # .apply(lambda x: x[['sum(feedback_2)', 'rating2']].to_dict('d')) # # .reset_index() # .rename(columns={0: 'channel'}) # # # .rename(columns={1: 'Detareiled_feedback'}) # .to_json(orient='records')) return campaign_json
def updateUser(params): try: sql = text( 'update t_user set name=:name, fullname=:fullname, password=:password ' 'where id=:id') print(sql) db.session.execute(sql, params) db.session.commit() except Exception as e: db.session.rollback() print_exc() raise DatabaseError('插入用户数据时发生错误', e.message, e)
def _postgresql_engine(): host = os.environ.get('DB_HOST', '127.0.0.1') port = os.environ.get('DB_PORT', '5432') user = os.environ.get('DB_USER') password = os.environ.get('DB_PASSWORD') db_name = os.environ.get('DB_NAME') if all([user, password, db_name]): engine = create_engine( f'postgresql://{user}:{password}@{host}:{port}/{db_name}') return engine raise DatabaseError( 'Не возможно подключиться к базе данных. Проверьте учётные данные', params=[user, password, db_name], orig=None)
def consumer_segment_summary(business_account_id, date_param_cd, store_ids: list): if date_param_cd not in [ r for r, in db.session.query( ParamsAppModel.param_name_cd.distinct()).all() ]: raise ValueError("Wrong date_param_cd") date_range = ParamsAppModel.get_date_range(business_account_id, date_param_cd) if date_range: lower_bound = date_range['lower_bound'] upper_bound = date_range['upper_bound'] prev_lower_bound = date_range['prev_lower_bound'] prev_upper_bound = date_range['prev_upper_bound'] report_date = date_range['report_date'] else: raise DatabaseError( "ParamAppModel returns no corresponding date range for business_account_id=%s and date param" % (business_account_id, date_param_cd)) summary_df = __consumer_tags_summary(report_date, lower_bound, upper_bound, business_account_id) print(summary_df) prev_summary_df = __consumer_tags_summary(report_date, prev_lower_bound, prev_upper_bound, business_account_id) if store_ids: summary_df = summary_df[summary_df['attributed_store_id'].isin( store_ids)] print(summary_df) prev_summary_df = prev_summary_df[ prev_summary_df['attributed_store_id'].isin(store_ids)] print(prev_summary_df) key_columns = [ 'gender', 'recency', 'age_group', 'frequency', 'monetary', 'clv_1year_level', 'clv_3year_level', 'badge', 'attributed_store_id' ] comb_summary_df = pd.merge(summary_df, prev_summary_df, how='left', on=key_columns, suffixes=['_curr', '_prev']) print(comb_summary_df) comb_summary_df.fillna(0, inplace=True) return comb_summary_df
def __init__(self, database=None, driver='sqlite', host=None, port=None, trackingDbverbose=False): """ Instantiate the results database, creating metrics, plots and summarystats tables. """ self.verbose = trackingDbverbose # Connect to database # for sqlite, connecting to non-existent database creates it automatically if database is None: # Default is a file in the current directory. self.database = os.path.join(os.getcwd(), 'trackingDb_sqlite.db') self.driver = 'sqlite' else: self.database = database self.driver = driver self.host = host self.port = port if self.driver == 'sqlite': dbAddress = url.URL(drivername=self.driver, database=self.database) else: dbAddress = url.URL( self.driver, username=DbAuth.username(self.host, str(self.port)), password=DbAuth.password(self.host, str(self.port)), host=self.host, port=self.port, database=self.database) engine = create_engine(dbAddress, echo=self.verbose) if self.verbose: print('Created or connected to MAF tracking %s database at %s' % (self.driver, self.database)) self.Session = sessionmaker(bind=engine) self.session = self.Session() # Create the tables, if they don't already exist. try: Base.metadata.create_all(engine) except DatabaseError: raise DatabaseError( "Cannot create a %s database at %s. Check directory exists." % (self.driver, self.database))
def test_order_service_delete(service): service.db_session.query().filter().filter().delete.return_value = True result = service.delete(user_slug="WILLrogerPEREIRAslugBR", order_slug="WILLrogerPEREIRAslugBR") assert result is True service.db_session.query().filter().filter().delete.return_value = False with pytest.raises(NotFoundError): service.delete(user_slug="WILLrogerPEREIRAslugBR", order_slug="WILLrogerPEREIRAslugBR") service.db_session.query().filter().filter( ).delete.side_effect = DatabaseError("statement", "params", "DETAIL: orig\n"), with pytest.raises(DatabaseError): service.delete(user_slug="WILLrogerPEREIRAslugBR", order_slug="WILLrogerPEREIRAslugBR")
def test_login_controller_authorized_error(mocker, flask_app, memory_blueprint): mocker.patch.object(UserService, "get_create_oauth", side_effect=DatabaseError("statement", "params", "orig")) with responses.RequestsMock() as rsps: rsps.add(responses.GET, re.compile(".+google.+"), status=200) with flask_app.test_request_context("/auth/google/authorized"): assert current_user.is_authenticated is False returned = user_logged_in(memory_blueprint, {"access_token": "fake-token"}) assert current_user.is_authenticated is False assert returned is not False assert re.search(r"error=error$", returned.headers["Location"]) is not None assert returned.status_code == 302
def test_order_service_insert(service): service.db_session.query().filter().one_or_none.return_value = None result = service.insert(user_slug="WILLrogerPEREIRAslugBR", item_list=[{ "item_id": "id", "amount": 2 }]) assert result is True service.db_session.query().filter().one_or_none.return_value = MagicMock( autospec=True) result = service.insert(user_slug="WILLrogerPEREIRAslugBR", item_list=[{ "item_id": "id", "amount": 2 }]) assert result is True with pytest.raises(SlugDecodeError): result = service.insert(user_slug="churros", item_list=[{ "item_id": "id", "amount": 2 }]) service.db_session.query().filter( ).one_or_none.side_effect = DatabaseError("statement", "params", "DETAIL: orig\n"), with pytest.raises(DatabaseError): service.insert(user_slug="WILLrogerPEREIRAslugBR", item_list=[{ "item_id": "id", "amount": 2 }])
def reward_summary(business_account_id, date_param_cd): # return {"message": "there is no get request for the API post"}, 404 if date_param_cd not in [ r for r, in db.session.query( ParamsAppModel.param_name_cd.distinct()).all() ]: raise ValueError("Wrong date_param_cd") date_range = ParamsAppModel.get_date_range(business_account_id, date_param_cd) #print(date_range) ###########data = ParamsAppModel.get_param_data(business_account_id) ###########print(data) if date_range: lower_bound = date_range['lower_bound'] upper_bound = date_range['upper_bound'] prev_lower_bound = date_range['prev_lower_bound'] prev_upper_bound = date_range['prev_upper_bound'] report_date = date_range['report_date'] else: raise DatabaseError( "ParamAppModel returns no corresponding date range for business_account_id=%s and date param" % (business_account_id, date_param_cd)) print(business_account_id, date_param_cd, lower_bound, upper_bound, prev_lower_bound, prev_upper_bound) reward_summary_output = __reward_summary(business_account_id, date_param_cd, lower_bound, upper_bound, prev_lower_bound, prev_upper_bound) return reward_summary_output
def RTcampaign_summary(business_account_id, date_param_cd, store_ids): if date_param_cd not in [ r for r, in db.session.query( ParamsAppModel.param_name_cd.distinct()).all() ]: raise ValueError("Wrong date_param_cd") date_range = ParamsAppModel.get_date_range(business_account_id, date_param_cd) if date_range: lower_bound = date_range['lower_bound'] upper_bound = date_range['upper_bound'] prev_lower_bound = date_range['prev_lower_bound'] prev_upper_bound = date_range['prev_upper_bound'] report_date = date_range['report_date'] else: raise DatabaseError( "ParamAppModel returns no corresponding date range for business_account_id=%s and date param" % (business_account_id, date_param_cd)) # rt_channel_df = __rt_summary_channel(report_date, lower_bound, upper_bound, business_account_id) # rt_goal_df = __rt_summary_goal(report_date, lower_bound, upper_bound, business_account_id) # rt_summary_df = __rt_summary(report_date, lower_bound, upper_bound, business_account_id) rt_summary = __rt_campaign_summary(report_date, lower_bound, upper_bound, business_account_id, date_param_cd, store_ids) # def dtj(df): # drec = dict() # ncols = df.values.shape[1] # for line in df.values: # d = drec # for j, col in enumerate(line[:-1]): # if not col in d.keys(): # if j != ncols - 2: # d[col] = {} # d = d[col] # else: # d[col] = line[-1] # else: # if j != ncols - 2: # d = d[col] # print(type(drec)) # return drec # # rt_channel_json = dtj(rt_channel_df) # rt_goal_json = dtj(rt_goal_df) # # # final_dict={} # final_dict['channels']=(rt_channel_json) # final_dict['goals']=rt_goal_json # # rt_dict= rt_summary_df.to_json(orient='records') # #rt_dict.append(final_dict) # # print(type(rt_dict)) # #print(rt_dict) # # return rt_dict # # ab=rt_summary.to_dict(orient='records') # # ab.append(drec) # print(ab) return rt_summary
def raise_db_error(name: str): raise DatabaseError("DB Error")
with login_disabled_app.test_client() as client: response = client.put("api/order/insert", json=invalid_amount) data = json.loads(response.data) ErrorSchema().load(data) assert response.status_code == 400 @pytest.mark.parametrize( "method,http_method,test_url,error,status_code", [("insert", "PUT", "api/order/insert", HTTPException(), 400), ("insert", "PUT", "api/order/insert", ConnectionError(), 502), ("insert", "PUT", "api/order/insert", DatabaseError("statement", "params", "orig"), 400), ("insert", "PUT", "api/order/insert", SQLAlchemyError(), 504), ("insert", "PUT", "api/order/insert", Exception(), 500)]) def test_insert_controller_error(mocker, get_request_function, willstores_ws, request_json, willstores_response_json, method, http_method, test_url, error, status_code): mocker.patch.object(OrderService, method, side_effect=error) with responses.RequestsMock() as rsps: rsps.add(responses.POST, re.compile(willstores_ws), status=200, json=willstores_response_json) make_request = get_request_function(http_method)
def raise_exception(self, *args): raise DatabaseError("SELECT 1", {}, "")
def execute(statement, *kwargs, **args): raise DatabaseError(statement, kwargs, args, connection_invalidated=True)
) data = json.loads(response.data) ErrorSchema().load(data) assert response.status_code == 400 @pytest.mark.parametrize( "method,http_method,test_url,error,status_code", [ ("select_by_user_slug", "POST", "api/order/user/WILLrogerPEREIRAslugBR", NoContentError(), 204), ("select_by_user_slug", "POST", "api/order/user/WILLrogerPEREIRAslugBR", HTTPException(), 400), ("select_by_user_slug", "POST", "api/order/user/WILLrogerPEREIRAslugBR", ConnectionError(), 502), ("select_by_user_slug", "POST", "api/order/user/WILLrogerPEREIRAslugBR", DataError("statement", "params", "DETAIL: orig\n"), 400), ("select_by_user_slug", "POST", "api/order/user/WILLrogerPEREIRAslugBR", DatabaseError("statement", "params", "orig"), 400), ("select_by_user_slug", "POST", "api/order/user/WILLrogerPEREIRAslugBR", SQLAlchemyError(), 504), ("select_by_user_slug", "POST", "api/order/user/WILLrogerPEREIRAslugBR", Exception(), 500) ] ) def test_select_by_user_slug_controller_error(mocker, get_request_function, method, http_method, test_url, error, status_code): mocker.patch.object(OrderService, method, side_effect=error) make_request = get_request_function(http_method) response = make_request( test_url ) if status_code == 204: with pytest.raises(JSONDecodeError):
def upgrade(): return defer.fail( DatabaseError('file is encrypted or is not a database', None, None))
def consumer_segment_counts_trends(business_account_id, date_param_cd, store_ids: list, aggregate_level='daily'): db_date_format = constants.REPORT_DB_DATE_FORMAT if date_param_cd not in [ r for r, in db.session.query( ParamsAppModel.param_name_cd.distinct()).all() ]: raise ValueError("Wrong date_param_cd") date_range = ParamsAppModel.get_date_range(business_account_id, date_param_cd) if aggregate_level is None: aggregate_level_dict = constants.API_DATE_AGGREGATION_LEVEL aggregate_level = aggregate_level_dict.get(date_param_cd, 'daily') if date_range: lower_bound = date_range['lower_bound'] upper_bound = date_range['upper_bound'] prev_lower_bound = date_range['prev_lower_bound'] prev_upper_bound = date_range['prev_upper_bound'] report_date = date_range['report_date'] else: raise DatabaseError( "ParamAppModel returns no corresponding date range for business_account_id=%s and date param" % (business_account_id, date_param_cd)) summary_df = __consumer_tags_counts_trends_dly(report_date, lower_bound, upper_bound, business_account_id, db_date_format) if summary_df is None: return pd.DataFrame({}) summary_df['date'] = pd.to_datetime(summary_df['date'], format="%Y-%m-%d") if store_ids: summary_df = summary_df[summary_df['attributed_store_id'].isin( store_ids)] # Potential function if there are a lot of trends APIs on date all_date_df = pd.DataFrame( DateModel.get_days_between_dates(lower_bound, upper_bound)) joined_summary_df = pd.merge(all_date_df, summary_df, how='left', left_on='full_date', right_on='date') joined_summary_df.fillna(0, inplace=True) joined_summary_df['full_date'] = joined_summary_df[ 'full_date'].dt.strftime(db_date_format) print(joined_summary_df) if aggregate_level == 'daily': joined_summary_df.drop([ 'date', 'attributed_store_id', 'Monday_Date_Of_Week', 'day_of_month' ], axis=1, inplace=True) joined_summary_df.rename(columns={'full_date': 'date'}, inplace=True) final_df = joined_summary_df.groupby('date').sum() print(final_df) elif aggregate_level == 'weekly': joined_summary_df.drop([ 'date', 'attributed_store_id', 'full_date', 'day_of_month', 'day_of_month' ], axis=1, inplace=True) joined_summary_df.rename(columns={'Monday_Date_Of_Week': 'date'}, inplace=True) final_df = joined_summary_df.groupby('date').sum() print(final_df) else: raise ValueError("aggregate level is wrong : %s" % aggregate_level) # final_df.set_index('date',inplace=True) #not required as groupby set the index already return final_df
async def test_database_corruption_while_running(opp, tmpdir, caplog): """Test we can recover from sqlite3 db corruption.""" def _create_tmpdir_for_test_db(): return tmpdir.mkdir("sqlite").join("test.db") test_db_file = await opp.async_add_executor_job(_create_tmpdir_for_test_db) dburl = f"{SQLITE_URL_PREFIX}//{test_db_file}" assert await async_setup_component(opp, DOMAIN, {DOMAIN: { CONF_DB_URL: dburl }}) await opp.async_block_till_done() caplog.clear() opp.states.async_set("test.lost", "on", {}) sqlite3_exception = DatabaseError("statement", {}, []) sqlite3_exception.__cause__ = sqlite3.DatabaseError() with patch.object( opp.data[DATA_INSTANCE].event_session, "close", side_effect=OperationalError("statement", {}, []), ): await async_wait_recording_done_without_instance(opp) await opp.async_add_executor_job(corrupt_db_file, test_db_file) await async_wait_recording_done_without_instance(opp) with patch.object( opp.data[DATA_INSTANCE].event_session, "commit", side_effect=[sqlite3_exception, None], ): # This state will not be recorded because # the database corruption will be discovered # and we will have to rollback to recover opp.states.async_set("test.one", "off", {}) await async_wait_recording_done_without_instance(opp) assert "Unrecoverable sqlite3 database corruption detected" in caplog.text assert "The system will rename the corrupt database file" in caplog.text assert "Connected to recorder database" in caplog.text # This state should go into the new database opp.states.async_set("test.two", "on", {}) await async_wait_recording_done_without_instance(opp) def _get_last_state(): with session_scope(opp=opp) as session: db_states = list(session.query(States)) assert len(db_states) == 1 assert db_states[0].event_id > 0 return db_states[0].to_native() state = await opp.async_add_executor_job(_get_last_state) assert state.entity_id == "test.two" assert state.state == "on" opp.bus.async_fire(EVENT_OPENPEERPOWER_STOP) await opp.async_block_till_done() opp.stop()
def consumer_loyalty_interaction_summary(business_account_id, date_param_cd, store_ids: list): global nested, nested if date_param_cd not in [ r for r, in db.session.query( ParamsAppModel.param_name_cd.distinct()).all() ]: raise ValueError("Wrong date_param_cd") date_range = ParamsAppModel.get_date_range(business_account_id, date_param_cd) if date_range: lower_bound = date_range['lower_bound'] upper_bound = date_range['upper_bound'] prev_lower_bound = date_range['prev_lower_bound'] prev_upper_bound = date_range['prev_upper_bound'] report_date = date_range['report_date'] else: raise DatabaseError( "ParamAppModel returns no corresponding date range for business_account_id=%s and date param" % (business_account_id, date_param_cd)) interaction_summary_df = _consumer_loyalty_interactions_summary( report_date, lower_bound, upper_bound, business_account_id, constants.REPORT_DB_DATE_FORMAT) prev_interaction_summary_df = _consumer_loyalty_interactions_summary( report_date, prev_lower_bound, prev_upper_bound, business_account_id, constants.REPORT_DB_DATE_FORMAT) if store_ids and (interaction_summary_df.empty is False): interaction_summary_df = interaction_summary_df[ interaction_summary_df['store_id'].isin(store_ids)] if store_ids and (prev_interaction_summary_df.empty is False): prev_interaction_summary_df = prev_interaction_summary_df[ prev_interaction_summary_df['store_id'].isin(store_ids)] key_columns = [ 'store_id', 'interaction_type', 'loyalty_currency', 'currency', 'time_zone', 'interaction_type_category' ] if interaction_summary_df.empty is False and prev_interaction_summary_df.empty is False: comb_interaction_df = pd.merge(interaction_summary_df, prev_interaction_summary_df, how='left', on=key_columns, suffixes=['', '_prev']) elif interaction_summary_df.empty is False and prev_interaction_summary_df.empty is True: comb_interaction_df = interaction_summary_df comb_interaction_df['total_value_prev'] = 0 comb_interaction_df['number_of_events_prev'] = 0 comb_interaction_df['distinct_consumer_events_prev'] = 0 else: comb_interaction_df = pd.DataFrame(None) comb_interaction_df.fillna(0, inplace=True) comb_interaction_df.drop(['store_id', 'currency', 'time_zone'], axis=1, inplace=True) # comb_interaction_df.groupby(by=['loyalty_currency','interaction_type_category','interaction_type']).sum() # comb_interaction_df.set_index(['loyalty_currency','interaction_type_category','interaction_type'],inplace=True) # print(comb_interaction_df.index) # results = defaultdict(lambda: defaultdict(dict)) # for row in comb_interaction_df.itertuples(): # print(row) # for i, key in enumerate(row.Index): # print(i,key) # if i == 0: # nested = results[key] # elif i == len(row.Index) - 1: # nested [key] = row # else: # nested = nested[key] # print(nested) # result=(comb_interaction_df.groupby(by=['loyalty_currency','interaction_type'],as_index=False).sum()\ # .apply(lambda x: x [['total_value','total_value_prev','number_of_events','number_of_events_prev','distinct_consumer_events','distinct_consumer_events_prev']])\ # .rename(columns={0:"some_col"}).to_json(orient='index')) # # print('Hey\n',json.dumps(json.loads(result),indent=2,sort_keys=True)) # json_dict = {} # json_dict['group_list'] = [] # for grp, grp_data in df.groupby('Group'): # grp_dict = {} # grp_dict['group'] = grp # for cat, cat_data in grp_data.groupby('Category'): # grp_dict['category_list'] = [] # cat_dict = {} # cat_dict['category'] = cat # cat_dict['p_list'] = [] # for p, p_data in cat_data.groupby('P'): # p_data = p_data.drop(['Category', 'Group'], axis=1).set_index('P') # for d in p_data.to_dict(orient='records'): # cat_dict['p_list'].append({'p': p, 'date_list': [d]}) # grp_dict['category_list'].append(cat_dict) # json_dict['group_list'].append(grp_dict) # json_out = dumps(json_dict) # parsed = json.loads(json_out) return comb_interaction_df