def test_deeply_nested_dimension_filter_with_sets_for_data_blending(self): field_alias = 'state' fltr = mock_dataset_blender.fields.state.like( fn.Concat( fn.Upper(fn.Trim(fn.Concat('%ab', mock_dataset_blender.fields['candidate-id']))), mock_dataset_blender.fields.winner, fn.Concat(mock_dataset_blender.fields.timestamp.between('date1', 'date2'), 'c%'), ) ) queries = ( mock_dataset_blender.query.widget(f.Pandas(mock_dataset_blender.fields['candidate-spend'])) .dimension(mock_dataset_blender.fields[field_alias]) .filter(f.ResultSet(fltr, set_label='set_A', complement_label='set_B')) .sql ) self.assertEqual(len(queries), 1) self.assertEqual( "SELECT " f'"sq0"."${field_alias}" "${field_alias}",' '"sq0"."$candidate-spend" "$candidate-spend" ' 'FROM (' 'SELECT ' f'CASE WHEN {fltr} THEN \'set_A\' ELSE \'set_B\' END "${field_alias}",' 'SUM("candidate_spend") "$candidate-spend" ' 'FROM "politics"."politician_spend" ' f'GROUP BY "${field_alias}"' f') "sq0" ' f"ORDER BY \"${field_alias}\" " "LIMIT 200000", str(queries[0]), )
def test_deeply_nested_dimension_filter_with_sets(self): field_alias = 'text' fltr = ds.fields.text.like( fn.Concat( fn.Upper(fn.Trim(fn.Concat('%ab', ds.fields.number))), ds.fields.aggr_number, fn.Concat(ds.fields.date.between('date1', 'date2'), 'c%'), )) queries = (ds.query.widget(f.Pandas(ds.fields.aggr_number)).dimension( ds.fields[field_alias]).filter( f.ResultSet(fltr, set_label='set_A', complement_label='set_B')).sql) self.assertEqual(len(queries), 1) self.assertEqual( "SELECT " f"CASE WHEN {fltr} THEN 'set_A' ELSE 'set_B' END \"${field_alias}\"," 'SUM("number") "$aggr_number" ' 'FROM "test" ' f"GROUP BY \"${field_alias}\" " f"ORDER BY \"${field_alias}\" " "LIMIT 200000", str(queries[0]), )
def search(key): room, usr = Tables(cfg.room, cfg.usr) q = Query.from_(room).select( room.id, room.name, fn.Concat(1).as_('type')).where( room.name.like(key + '%')) + Query.from_(usr).select( usr.id, usr.name, fn.Concat(0).as_('type')).where(usr.name.like(key + '%')) return q
def test_insert_multiple_columns_on_duplicate_update_one_with_expression_using_values( self): query = MySQLQuery \ .into(self.table_abc).insert(1, 'a') \ .on_duplicate_key_update(self.table_abc.bar, fn.Concat(Values(self.table_abc.bar), 'update')) self.assertEqual( 'INSERT INTO `abc` VALUES (1,\'a\') ON DUPLICATE KEY UPDATE `bar`=CONCAT(VALUES(`bar`),\'update\')', str(query))
def test_insert_multiple_columns_on_duplicate_update_one_with_expression_using_original_field_value( self, ): query = (MySQLQuery.into(self.table_abc).insert( 1, "a").on_duplicate_key_update( self.table_abc.bar, fn.Concat(self.table_abc.bar, "update"))) self.assertEqual( "INSERT INTO `abc` VALUES (1,'a') ON DUPLICATE KEY UPDATE `bar`=CONCAT(`bar`,'update')", str(query), )
def __init__(self, unit=None, **kw): super().__init__(**kw) a, b = pk.Tables('UnitSMR', 'UnitID') _year = cfn('YEAR', ['date']) _month = cfn('MONTH', ['date']) year = _year(a.DateSMR) month = _month(a.DateSMR) _period = fn.Concat(year, '-', month) cols = [a.Unit, _period.as_('Period'), fn.Max(a.SMR).as_('SMR')] q = Query.from_(a) \ .left_join(b).on_field('Unit') \ .where(a.Unit == unit) \ .groupby(a.Unit, _period) f.set_self(vars())
def test__concat__field(self): q = Q.from_(self.t).select(fn.Concat(self.t.foo, self.t.bar)) self.assertEqual("SELECT CONCAT(\"foo\",\"bar\") FROM \"abc\"", str(q))
def test__concat__str(self): q = Q.select(fn.Concat('p', 'y', 'q', 'b')) self.assertEqual("SELECT CONCAT('p','y','q','b')", str(q))
def q(self): d_rng, period, model, minesite, unit = self.d_rng, self.period, self.model, self.minesite, self.unit a, b = pk.Tables('Downtime', 'UnitID') hrs_in_period = cfn('tblHrsInPeriod', ['d_lower', 'd_upper', 'minesite', 'period']) period_range = cfn('period_range', ['startdate', 'enddate', 'period']) _month = cfn('MONTH', ['date']) _year = cfn('YEAR', ['date']) iso_year = cfn('dbo.iso_year', ['date']) datepart = cfn('DATEPART', ['period_type', 'date']) month = _month(a.ShiftDate) week = datepart(PseudoColumn('iso_week'), a.ShiftDate) if period == 'month': year = _year(a.ShiftDate) _period = fn.Concat(year, '-', month) # .as_('period') else: year = iso_year( a.ShiftDate ) # only use iso_year (slow custom function) when grouping by week _period = fn.Concat(year, '-', week) # .as_('period') # Create all week/month periods in range crossed with units q_prd = Query.from_(period_range(d_rng[0], d_rng[1], period)).select('period') q_base = Query.from_(b) \ .select(q_prd.period, b.Unit) \ .cross_join(q_prd).cross() \ .where(Criterion.all([ b.MineSite == minesite, b.model.like(model)])) # Unit, Total, SMS, Suncor cols_dt = [ _period.as_('period'), a.Unit, fn.Sum(a.Duration).as_('Total'), fn.Sum(a.SMS).as_('SMS'), fn.Sum(a.Suncor).as_('Suncor') ] q_dt = Query.from_(a) \ .select(*cols_dt) \ .where(Criterion.all([ a.ShiftDate.between(d_rng[0], d_rng[1]), a.Duration > 0.01])) \ .groupby(a.Unit, _period) # in case need historical data for single unit if not unit is None: q_dt = q_dt.where(a.Unit == unit) q_base = q_base.where(b.Unit == unit) cols1 = [q_base.period, q_base.Unit, q_dt.Total, q_dt.SMS, q_dt.Suncor] q1 = Query.from_(q_base) \ .select(*cols1) \ .left_join(q_dt).on_field('Unit', 'Period') q_hrs = Query.from_(hrs_in_period(d_rng[0], d_rng[1], minesite, period)).select('*') cols = [ b.Model, b.DeliveryDate, q1.star, q_hrs.ExcludeHours_MA, q_hrs.ExcludeHours_PA, Case().when(b.AHSActive == 1, 'AHS').else_('Staffed').as_('Operation') ] return Query.from_(q1) \ .select(*cols) \ .left_join(b).on_field('Unit') \ .left_join(q_hrs).on_field('Unit', 'Period') \ .where(b.Model.like(model))
def test__concat__field(self): q = Q.from_(self.t).select(fn.Concat(self.t.foo, self.t.bar)) self.assertEqual('SELECT CONCAT("foo","bar") FROM "abc"', str(q))
def concat(field, args): return pypika_fn.Concat(field, args)