def _allocate_builds(self, host, nb_slots, domain=None): if nb_slots <= 0: return [] non_allocated_domain = [('local_state', '=', 'pending'), ('host', '=', False)] if domain: non_allocated_domain = expression.AND( [non_allocated_domain, domain]) e = expression.expression(non_allocated_domain, self.env['runbot.build']) assert e.get_tables() == ['"runbot_build"'] where_clause, where_params = e.to_sql() # self-assign to be sure that another runbot batch cannot self assign the same builds query = """UPDATE runbot_build SET host = %%s WHERE runbot_build.id IN ( SELECT runbot_build.id FROM runbot_build WHERE %s ORDER BY array_position(array['normal','rebuild','indirect','scheduled']::varchar[], runbot_build.build_type) ASC FOR UPDATE OF runbot_build SKIP LOCKED LIMIT %%s ) RETURNING id""" % where_clause self.env.cr.execute(query, [host.name] + where_params + [nb_slots]) return self.env.cr.fetchall()
def _where_calc(self, domain, active_test=True): """Computes the WHERE clause needed to implement an OpenERP domain. :param domain: the domain to compute :type domain: list :param active_test: whether the default filtering of records with ``active`` field set to ``False`` should be applied. :return: the query expressing the given domain as provided in domain :rtype: osv.query.Query """ # if the object has a field named 'active', filter out all inactive # records unless they were explicitely asked for if 'active' in self._fields and active_test and self._context.get( 'active_test', True): # the item[0] trick below works for domain items and '&'/'|'/'!' # operators too if not any(item[0] == 'active' for item in domain): domain = [('active', '=', 1)] + domain #is_multi_search_installed = self.env['ir.module.module'].search([('state','=','installed'),('name','=','multi_search')], limit=1) self.env.cr.execute( "SELECT id FROM ir_module_module WHERE name='multi_search_with_comma' and state='installed' limit 1" ) is_multi_search_installed = self.env.cr.fetchone() if domain: modified_domain = [] #_logger.info(str(domain)) for domain_tuple in domain: if not is_multi_search_installed: modified_domain.append(domain_tuple) continue if type(domain_tuple) in (list, tuple): if str(domain_tuple[1]) == 'ilike': multi_name = domain_tuple[2].split(',') len_name = len(multi_name) if len_name > 1: for length in multi_name: modified_domain.append('|') for f_name in multi_name: modified_domain.append([ domain_tuple[0], domain_tuple[1], f_name.strip() ]) modified_domain.append(domain_tuple) e = expression.expression(modified_domain, self) tables = e.get_tables() where_clause, where_params = e.to_sql() where_clause = [where_clause] if where_clause else [] else: where_clause, where_params, tables = [], [], ['"%s"' % self._table] return Query(tables, where_clause, where_params)
def read_group(self, domain, fields, groupby, offset=0, limit=None, orderby=False, lazy=True): """ This is a hack to allow us to correctly calculate the average of PO specific date values since the normal report query result will duplicate PO values across its PO lines during joins and lead to incorrect aggregation values. Only the AVG operator is supported for avg_days_to_purchase. """ avg_days_to_purchase = next((field for field in fields if re.search(r'\bavg_days_to_purchase\b', field)), False) if avg_days_to_purchase: fields.remove(avg_days_to_purchase) if any(field.split(':')[1].split('(')[0] != 'avg' for field in [avg_days_to_purchase] if field): raise UserError("Value: 'avg_days_to_purchase' should only be used to show an average. If you are seeing this message then it is being accessed incorrectly.") res = [] if fields: res = super(PurchaseReport, self).read_group(domain, fields, groupby, offset=offset, limit=limit, orderby=orderby, lazy=lazy) if not res and avg_days_to_purchase: res = [{}] if avg_days_to_purchase: self.check_access_rights('read') query = """ SELECT AVG(days_to_purchase.po_days_to_purchase)::decimal(16,2) AS avg_days_to_purchase FROM ( SELECT extract(epoch from age(po.date_approve,po.create_date))/(24*60*60) AS po_days_to_purchase FROM purchase_order po WHERE po.id IN ( SELECT "purchase_report"."order_id" FROM %s WHERE %s) ) AS days_to_purchase """ subdomain = AND([domain, [('company_id', '=', self.env.company.id), ('date_approve', '!=', False)]]) subtables, subwhere, subparams = expression(subdomain, self).query.get_sql() self.env.cr.execute(query % (subtables, subwhere), subparams) res[0].update({ '__count': 1, avg_days_to_purchase.split(':')[0]: self.env.cr.fetchall()[0][0], }) return res
def _get_last_branch_name_builds(self): # naive way to find corresponding build, only matching branch name or pr pull_head_name and target_branch_name. self.ensure_one() domain = [] if self.pull_head_name: domain = [('pull_head_name', 'like', '%%:%s' % self.pull_head_name.split(':')[-1]), ('target_branch_name', '=', self.target_branch_name) ] # pr matching pull head name else: domain = [('name', '=', self.name)] #domain += [('id', '!=', self.branch_id.id)] e = expression.expression(domain, self) where_clause, where_params = e.to_sql() repo_ids = tuple(self.env['runbot.repo'].search( []).ids) # access rights query = """ SELECT max(b.id) FROM runbot_build b JOIN runbot_branch br ON br.id = b.branch_id WHERE b.branch_id IN ( SELECT id from runbot_branch WHERE %s ) AND b.build_type IN ('normal', 'rebuild') AND b.repo_id in %%s AND (b.hidden = false OR b.hidden IS NULL) AND b.parent_id IS NULL AND (br.no_build = false OR br.no_build IS NULL) GROUP BY b.repo_id """ % where_clause self.env.cr.execute(query, where_params + [repo_ids]) results = [r[0] for r in self.env.cr.fetchall()] return self.env['runbot.build'].browse(results)
def read_group(self, domain, fields, groupby, offset=0, limit=None, orderby=False, lazy=True): """ This is a hack to allow us to correctly calculate the average of PO specific date values since the normal report query result will duplicate PO values across its PO lines during joins and lead to incorrect aggregation values. Only the AVG operator is supported for avg_receipt_delay. """ avg_receipt_delay = next( (field for field in fields if re.search(r'\bavg_receipt_delay\b', field)), False) if avg_receipt_delay: fields.remove(avg_receipt_delay) if any( field.split(':')[1].split('(')[0] != 'avg' for field in [avg_receipt_delay] if field): raise UserError( "Value: 'avg_receipt_delay' should only be used to show an average. If you are seeing this message then it is being accessed incorrectly." ) res = [] if fields: res = super(PurchaseReport, self).read_group(domain, fields, groupby, offset=offset, limit=limit, orderby=orderby, lazy=lazy) if not res and avg_receipt_delay: res = [{}] if avg_receipt_delay: query = """ SELECT AVG(receipt_delay.po_receipt_delay)::decimal(16,2) AS avg_receipt_delay FROM ( SELECT extract(epoch from age(po.effective_date,COALESCE(po.date_planned, po.expected_date)))/(24*60*60) AS po_receipt_delay FROM purchase_order po WHERE po.id IN ( SELECT order_id FROM purchase_report WHERE effective_date IS NOT NULL AND %s ) ) AS receipt_delay """ where, args = expression( domain + [('company_id', '=', self.env.company.id)], self).to_sql() self.env.cr.execute(query % where, args) res[0].update({ '__count': 1, avg_receipt_delay.split(':')[0]: self.env.cr.fetchall()[0][0], }) return res
def assign_scores_to_leads(self, ids=False, lead_ids=False): _logger.info( 'Start scoring for %s rules and %s leads' % (ids and len(ids) or 'all', lead_ids and len(lead_ids) or 'all')) domain = [('running', '=', True)] if ids: domain.append(('id', 'in', ids)) scores = self.search_read(domain=domain, fields=['domain', 'rule_type']) # Sort rule to unlink before scoring priorities = dict(unlink=1, active=2, score=3) scores = sorted(scores, key=lambda k: priorities.get(k['rule_type'])) for score in scores: domain = safe_eval(score['domain'], evaluation_context) # Don't replace the domain with a 'not in' like below... that doesn't make the same thing !!! # domain.extend(['|', ('stage_id.on_change', '=', False), ('stage_id.probability', 'not in', [0,100])]) domain.extend([ '|', ('stage_id.on_change', '=', False), '&', ('stage_id.probability', '!=', 0), ('stage_id.probability', '!=', 100) ]) e = expression(domain, self.env['crm.lead']) where_clause, where_params = e.to_sql() where_clause += """ AND (id NOT IN (SELECT lead_id FROM crm_lead_score_rel WHERE score_id = %s)) """ where_params.append(score['id']) if not self.event_based and not lead_ids: self._cr.execute( 'SELECT max(lead_id) FROM crm_lead_score_rel WHERE score_id = %s', (score['id'], )) last_id = self._cr.fetchone()[0] if last_id: # Only check leads that are newer than the last matching lead. # Could be based on a "last run date" for a more precise optimization where_clause += """ AND (id > %s) """ where_params.append(last_id) if lead_ids: where_clause += """ AND (id in %s) """ where_params.append(tuple(lead_ids)) if score['rule_type'] == 'score': self._cr.execute( """INSERT INTO crm_lead_score_rel SELECT crm_lead.id as lead_id, %s as score_id FROM crm_lead WHERE %s RETURNING lead_id""" % (score['id'], where_clause), where_params) # Force recompute of fields that depends on score_ids returning_ids = [resp[0] for resp in self._cr.fetchall()] leads = self.env["crm.lead"].browse(returning_ids) leads.modified(['score_ids']) leads.recompute() elif score['rule_type'] == 'unlink': self._cr.execute( "DELETE FROM crm_lead WHERE %s" % where_clause, where_params) elif score['rule_type'] == 'active': self._cr.execute( "UPDATE crm_lead set active = 'f' WHERE %s" % where_clause, where_params) _logger.info('End scoring')
def read_group(self, domain, fields, groupby, offset=0, limit=None, orderby=False, lazy=True): """ This is a hack made in order to improve performance as adding inventory valuation on the report itself would be too costly. Basically when asked to return the valuation, it will run a smaller SQL query that will calculate the inventory valuation on the given domain. Only the SUM operator is supported for valuation. We can also get the stock_value of the inventory at a specific date (default is today). The same applies to this stock_value field, it only supports the sum operator and does not support the group by. NB: This should probably be implemented in a read instead of read_group since we don't support grouping NB: We might be able to avoid doing this hack by optimizing the query used to generate the report (= TODO: see nse) """ stock_value = next((field for field in fields if re.search(r'\bstock_value\b', field)), False) valuation = next((field for field in fields if re.search(r'\bvaluation\b', field)), False) if stock_value: fields.remove(stock_value) if valuation: fields.remove(valuation) if stock_value or valuation: if groupby: raise UserError("valuation and stock_value don't support grouping") if any(field.split(':')[1].split('(')[0] != 'sum' for field in [stock_value, valuation] if field): raise UserError("read_group only support operator sum for valuation and stock_value") res = [] if fields: res = super(StockReport, self).read_group(domain, fields, groupby, offset=offset, limit=limit, orderby=orderby, lazy=lazy) if not res and (stock_value or valuation): res = [{}] if stock_value: date = Date.to_string(Date.from_string(next((d[2] for d in domain if d[0] == 'date_done'), Date.today()))) products = self.env['product.product'].with_context(to_date=date).search([('product_tmpl_id.type', '=', 'product')]) value = sum(product.stock_value for product in products) res[0].update({ '__count': 1, stock_value.split(':')[0]: value, }) if valuation: query = """ SELECT SUM(move_valuation.valuation) as valuation FROM ( SELECT CASE property.value_text -- cost method WHEN 'fifo' THEN move.value WHEN 'average' THEN move.value ELSE move.product_qty * product_property.value_float -- standard price END as valuation FROM stock_move move INNER JOIN product_product product ON move.product_id = product.id INNER JOIN product_template ON product.product_tmpl_id = product_template.id INNER JOIN product_category category ON product_template.categ_id = category.id LEFT JOIN ir_property property ON property.res_id = CONCAT('product.category,', category.id) INNER JOIN ir_property product_property ON product_property.res_id = CONCAT('product.product,', product.id) WHERE move.id IN ( SELECT id FROM stock_report WHERE %s ) AND (property.company_id is null or property.company_id = move.company_id) AND product_property.company_id = move.company_id ) as move_valuation """ where, args = expression(domain + [('company_id', '=', self.env.user.company_id.id)], self).to_sql() self.env.cr.execute(query % where, args) res[0].update({ '__count': 1, valuation.split(':')[0]: self.env.cr.fetchall()[0][0], }) return res
def read_group(self, domain, fields, groupby, offset=0, limit=None, orderby=False, lazy=True): """ This is a hack made in order to improve performance as adding inventory valuation on the report itself would be too costly. Basically when asked to return the valuation, it will run a smaller SQL query that will calculate the inventory valuation on the given domain. Only the SUM operator is supported for valuation. We can also get the stock_value of the inventory at a specific date (default is today). The same applies to this stock_value field, it only supports the sum operator and does not support the group by. NB: This should probably be implemented in a read instead of read_group since we don't support grouping NB: We might be able to avoid doing this hack by optimizing the query used to generate the report (= TODO: see nse) """ stock_value = next((field for field in fields if re.search(r'\bstock_value\b', field)), False) valuation = next((field for field in fields if re.search(r'\bvaluation\b', field)), False) if stock_value: fields.remove(stock_value) if valuation: fields.remove(valuation) if stock_value or valuation: if groupby: raise UserError("valuation and stock_value don't support grouping") if any(field.split(':')[1].split('(')[0] != 'sum' for field in [stock_value, valuation] if field): raise UserError("read_group only support operator sum for valuation and stock_value") res = [] if fields: res = super(StockReport, self).read_group(domain, fields, groupby, offset=offset, limit=limit, orderby=orderby, lazy=lazy) if not res and (stock_value or valuation): res = [{}] if stock_value: products = self.env['product.product'] # Split the recordset for faster computing. value = sum( product.value_svl for products_split in self.env.cr.split_for_in_conditions( products.search([("product_tmpl_id.type", "=", "product")]).ids ) for product in products.browse(products_split) ) res[0].update({ '__count': 1, stock_value.split(':')[0]: value, }) if valuation: query = """ SELECT SUM(move_valuation.valuation) as valuation FROM ( SELECT sum(svl.value) AS valuation FROM stock_move move INNER JOIN stock_valuation_layer AS svl ON svl.stock_move_id = move.id WHERE move.id IN ( SELECT id FROM stock_report WHERE %s ) GROUP BY move.id ) as move_valuation """ where, args = expression(domain + [('company_id', '=', self.env.company.id)], self).to_sql() self.env.cr.execute(query % where, args) res[0].update({ '__count': 1, valuation.split(':')[0]: self.env.cr.fetchall()[0][0], }) return res
def assign_scores_to_leads(self, ids=False, lead_ids=False): _logger.info('Start scoring for %s rules and %s leads' % (ids and len(ids) or 'all', lead_ids and len(lead_ids) or 'all')) if ids: domain = [('id', 'in', ids)] elif self.ids: domain = [('id', 'in', self.ids)] else: domain = [] scores = self.search(domain) # Sort rule to unlink before scoring priorities = dict(unlink=1, active=2, score=3) scores = sorted(scores, key=lambda k: priorities.get(k['rule_type'])) for score in scores: now = datetime.datetime.now() domain = safe_eval.safe_eval(score.domain, evaluation_context) # Don't replace the domain with a 'not in' like below... that doesn't make the same thing !!! # domain.extend(['|', ('stage_id.is_won', '=', False), ('probability', 'not in', [0,100])]) domain.extend(['|', ('stage_id.is_won', '=', False), '&', ('probability', '!=', 0), ('probability', '!=', 100)]) e = expression(domain, self.env['crm.lead']) from_clause, where_clause, where_params = e.query.get_sql() assert from_clause == '"crm_lead"' where_clause += """ AND (id NOT IN (SELECT lead_id FROM crm_lead_score_rel WHERE score_id = %s)) """ where_params.append(score.id) if not score.event_based and not lead_ids: if score.last_run: # Only check leads that are newer than the last matching lead. where_clause += """ AND (create_date > %s) """ where_params.append(score.last_run) if lead_ids: where_clause += """ AND (id in %s) """ where_params.append(tuple(lead_ids)) if score.rule_type == 'score': self._cr.execute("""INSERT INTO crm_lead_score_rel SELECT crm_lead.id as lead_id, %s as score_id FROM crm_lead WHERE %s RETURNING lead_id""" % (score.id, where_clause), where_params) # Force recompute of fields that depends on score_ids returning_ids = [resp[0] for resp in self._cr.fetchall()] leads = self.env["crm.lead"].browse(returning_ids) leads.modified(['score_ids']) leads.recompute() elif score.rule_type == 'unlink': self.env['crm.lead'].flush() self._cr.execute("DELETE FROM crm_lead WHERE %s RETURNING id" % where_clause, where_params) deleted_ids = [row[0] for row in self._cr.fetchall()] deleted_leads = self.env['crm.lead'].browse(deleted_ids) elif score.rule_type == 'active': self._cr.execute("UPDATE crm_lead set active = 'f' WHERE %s" % where_clause, where_params) if not (lead_ids or ids): # if global scoring score.last_run = now _logger.info('End scoring')