def _interval_dates(self, frequency, company): """ Method used to compute the theoretical date from which account move lines should be fetched @param {string} frequency: a valid value of the selection field on the object (daily, monthly, annually) frequencies are literal (daily means 24 hours and so on) @param {recordset} company: the company for which the closing is done @return {dict} the theoretical date from which account move lines are fetched. date_stop date to which the move lines are fetched, always now() the dates are in their COffice Database string representation """ date_stop = datetime.utcnow() interval_from = None name_interval = '' if frequency == 'daily': interval_from = date_stop - timedelta(days=1) name_interval = _('Daily Closing') elif frequency == 'monthly': month_target = date_stop.month > 1 and date_stop.month - 1 or 12 year_target = month_target < 12 and date_stop.year or date_stop.year - 1 interval_from = date_stop.replace(year=year_target, month=month_target) name_interval = _('Monthly Closing') elif frequency == 'annually': year_target = date_stop.year - 1 interval_from = date_stop.replace(year=year_target) name_interval = _('Annual Closing') return { 'interval_from': FieldDateTime.to_string(interval_from), 'date_stop': FieldDateTime.to_string(date_stop), 'name_interval': name_interval }
def _compute_duration_display(self): for allocation in self: allocation.duration_display = '%g %s' % ( (float_round(allocation.number_of_hours_display, precision_digits=2) if allocation.type_request_unit == 'hour' else float_round(allocation.number_of_days_display, precision_digits=2)), _('hours') if allocation.type_request_unit == 'hour' else _('days'))
def format_failure_reason(self): self.ensure_one() if self.failure_type != 'UNKNOWN': return dict(type(self).failure_type.selection).get( self.failure_type, _('No Error')) else: return _("Unknown error") + ": %s" % (self.failure_reason or '')
def _parse_date_from_data(self, data, index, name, field_type, options): dt = datetime.datetime fmt = fields.Date.to_string if field_type == 'date' else fields.Datetime.to_string d_fmt = options.get('date_format') dt_fmt = options.get('datetime_format') for num, line in enumerate(data): if not line[index]: continue v = line[index].strip() try: # first try parsing as a datetime if it's one if dt_fmt and field_type == 'datetime': try: line[index] = fmt(dt.strptime(v, dt_fmt)) continue except ValueError: pass # otherwise try parsing as a date whether it's a date # or datetime line[index] = fmt(dt.strptime(v, d_fmt)) except ValueError as e: raise ValueError( _("Column %s contains incorrect values. Error in line %d: %s" ) % (name, num + 1, e)) except Exception as e: raise ValueError( _("Error Parsing Date [%s:L%d]: %s") % (name, num + 1, e))
def name_get(self): res = [] for allocation in self: if allocation.holiday_type == 'company': target = allocation.mode_company_id.name elif allocation.holiday_type == 'department': target = allocation.department_id.name elif allocation.holiday_type == 'category': target = allocation.category_id.name else: target = allocation.employee_id.name if allocation.type_request_unit == 'hour': res.append( (allocation.id, _("Allocation of %s : %.2f hour(s) to %s") % ( allocation.holiday_status_id.name, allocation.number_of_hours_display, target) ) ) else: res.append( (allocation.id, _("Allocation of %s : %.2f day(s) to %s") % ( allocation.holiday_status_id.name, allocation.number_of_days, target) ) ) return res
def _parse_import_data_recursive(self, model, prefix, data, import_fields, options): # Get fields of type date/datetime all_fields = self.env[model].fields_get() for name, field in all_fields.items(): name = prefix + name if field['type'] in ('date', 'datetime') and name in import_fields: index = import_fields.index(name) self._parse_date_from_data(data, index, name, field['type'], options) # Check if the field is in import_field and is a relational (followed by /) # Also verify that the field name exactly match the import_field at the correct level. elif any(name + '/' in import_field and name == import_field.split('/')[prefix.count('/')] for import_field in import_fields): # Recursive call with the relational as new model and add the field name to the prefix self._parse_import_data_recursive(field['relation'], name + '/', data, import_fields, options) elif field['type'] in ('float', 'monetary') and name in import_fields: # Parse float, sometimes float values from file have currency symbol or () to denote a negative value # We should be able to manage both case index = import_fields.index(name) self._parse_float_from_data(data, index, name, options) elif field['type'] == 'binary' and field.get('attachment') and any( f in name for f in IMAGE_FIELDS) and name in import_fields: index = import_fields.index(name) with requests.Session() as session: session.stream = True for num, line in enumerate(data): if re.match( config.get("import_image_regex", DEFAULT_IMAGE_REGEX), line[index]): if not self.env.user._can_import_remote_urls(): raise AccessError( _("You can not import images via URL, check with your administrator or support for the reason." )) line[index] = self._import_image_by_url( line[index], session, name, num) else: try: base64.b64decode(line[index], validate=True) except binascii.Error: raise ValueError( _("Found invalid image data, images should be imported as either URLs or base64-encoded data." )) return data
class RecruitmentStage(models.Model): _name = "hr.recruitment.stage" _description = "Recruitment Stages" _order = 'sequence' name = fields.Char("Stage Name", required=True, translate=True) sequence = fields.Integer( "Sequence", default=10, help="Gives the sequence order when displaying a list of stages.") job_ids = fields.Many2many( 'hr.job', string='Job Specific', help= 'Specific jobs that uses this stage. Other jobs will not use this stage.' ) requirements = fields.Text("Requirements") template_id = fields.Many2one( 'mail.template', "Email Template", help= "If set, a message is posted on the applicant using the template when the applicant is set to the stage." ) fold = fields.Boolean( "Folded in Kanban", help= "This stage is folded in the kanban view when there are no records in that stage to display." ) legend_blocked = fields.Char('Red Kanban Label', default=lambda self: _('Blocked'), translate=True, required=True) legend_done = fields.Char('Green Kanban Label', default=lambda self: _('Ready for Next Stage'), translate=True, required=True) legend_normal = fields.Char('Grey Kanban Label', default=lambda self: _('In Progress'), translate=True, required=True) @api.model def default_get(self, fields): if self._context and self._context.get( 'default_job_id') and not self._context.get( 'hr_recruitment_stage_mono', False): context = dict(self._context) context.pop('default_job_id') self = self.with_context(context) return super(RecruitmentStage, self).default_get(fields)
def _import_image_by_url(self, url, session, field, line_number): """ Imports an image by URL :param str url: the original field value :param requests.Session session: :param str field: name of the field (for logging/debugging) :param int line_number: 0-indexed line number within the imported file (for logging/debugging) :return: the replacement value :rtype: bytes """ maxsize = int( config.get("import_image_maxbytes", DEFAULT_IMAGE_MAXBYTES)) try: response = session.get(url, timeout=int( config.get("import_image_timeout", DEFAULT_IMAGE_TIMEOUT))) response.raise_for_status() if response.headers.get('Content-Length') and int( response.headers['Content-Length']) > maxsize: raise ValueError( _("File size exceeds configured maximum (%s bytes)") % maxsize) content = bytearray() for chunk in response.iter_content(DEFAULT_IMAGE_CHUNK_SIZE): content += chunk if len(content) > maxsize: raise ValueError( _("File size exceeds configured maximum (%s bytes)") % maxsize) image = Image.open(io.BytesIO(content)) w, h = image.size if w * h > 42e6: # Nokia Lumia 1020 photo resolution raise ValueError( u"Image size excessive, imported images must be smaller " u"than 42 million pixel") return base64.b64encode(content) except Exception as e: raise ValueError( _("Could not retrieve URL: %(url)s [%(field_name)s: L%(line_number)d]: %(error)s" ) % { 'url': url, 'field_name': field, 'line_number': line_number + 1, 'error': e })
def _read_file(self, options): """ Dispatch to specific method to read file content, according to its mimetype or file type :param options : dict of reading options (quoting, separator, ...) """ self.ensure_one() # guess mimetype from file content mimetype = guess_mimetype(self.file or b'') (file_extension, handler, req) = FILE_TYPE_DICT.get(mimetype, (None, None, None)) if handler: try: return getattr(self, '_read_' + file_extension)(options) except Exception: _logger.warn( "Failed to read file '%s' (transient id %d) using guessed mimetype %s", self.file_name or '<unknown>', self.id, mimetype) # try reading with user-provided mimetype (file_extension, handler, req) = FILE_TYPE_DICT.get(self.file_type, (None, None, None)) if handler: try: return getattr(self, '_read_' + file_extension)(options) except Exception: _logger.warn( "Failed to read file '%s' (transient id %d) using user-provided mimetype %s", self.file_name or '<unknown>', self.id, self.file_type) # fallback on file extensions as mime types can be unreliable (e.g. # software setting incorrect mime types, or non-installed software # leading to browser not sending mime types) if self.file_name: p, ext = os.path.splitext(self.file_name) if ext in EXTENSIONS: try: return getattr(self, '_read_' + ext[1:])(options) except Exception: _logger.warn( "Failed to read file '%s' (transient id %s) using file extension", self.file_name, self.id) if req: raise ImportError( _("Unable to load \"{extension}\" file: requires Python module \"{modname}\"" ).format(extension=file_extension, modname=req)) raise ValueError( _("Unsupported file format \"{}\", import only supports CSV, ODS, XLS and XLSX" ).format(self.file_type))
def get_google_drive_url(self, res_id, template_id): self.ensure_one() self = self.sudo() model = self.model_id filter_name = self.filter_id.name if self.filter_id else False record = self.env[model.model].browse(res_id).read()[0] record.update({'model': model.name, 'filter': filter_name}) name_gdocs = self.name_template try: name_gdocs = name_gdocs % record except: raise UserError( _("At least one key cannot be found in your Google Drive name pattern." )) attachments = self.env["ir.attachment"].search([ ('res_model', '=', model.model), ('name', '=', name_gdocs), ('res_id', '=', res_id) ]) url = False if attachments: url = attachments[0].url else: url = self.copy_doc(res_id, template_id, name_gdocs, model.model).get('url') return url
def execute_cr(cr, uid, obj, method, *args, **kw): coffice.api.Environment.reset( ) # clean cache etc if we retry the same transaction recs = coffice.api.Environment(cr, uid, {}).get(obj) if recs is None: raise UserError(_("Object %s doesn't exist") % obj) return coffice.api.call_kw(recs, method, args, kw)
def _check_validity_dates(self): for leave_type in self: if leave_type.validity_start and leave_type.validity_stop and \ leave_type.validity_start > leave_type.validity_stop: raise ValidationError( _("End of validity period should be greater than start of validity period" ))
def _message_get_suggested_recipients(self): recipients = super(Applicant, self)._message_get_suggested_recipients() for applicant in self: if applicant.partner_id: applicant._message_add_suggested_recipient( recipients, partner=applicant.partner_id, reason=_('Contact')) elif applicant.email_from: email_from = applicant.email_from if applicant.partner_name: email_from = '%s<%s>' % (applicant.partner_name, email_from) applicant._message_add_suggested_recipient( recipients, email=email_from, reason=_('Contact Email')) return recipients
def test_import_from_csv_file(self): """Test the import from a single CSV file works""" with file_open('test_translation_import/i18n/dot.csv', 'rb') as f: po_file = base64.encodestring(f.read()) import_tlh = self.env["base.language.import"].create({ 'name': 'Dothraki', 'code': 'dot', 'data': po_file, 'filename': 'dot.csv', }) with mute_logger('coffice.addons.base.models.res_lang'): import_tlh.import_lang() dot_lang = self.env['res.lang']._lang_get('dot') self.assertTrue(dot_lang, "The imported language was not creates") trans_count = self.env['ir.translation'].search_count([('lang', '=', 'dot')]) self.assertEqual(trans_count, 1, "The imported translations were not created") self.env.context = dict(self.env.context, lang="dot") self.assertEqual(_("Accounting"), "samva", "The code translation was not applied")
def test_lazy_translation(self): """Test the import from a single po file works""" with file_open('test_translation_import/i18n/tlh.po', 'rb') as f: po_file = base64.encodestring(f.read()) import_tlh = self.env["base.language.import"].create({ 'name': 'Klingon', 'code': 'tlh', 'data': po_file, 'filename': 'tlh.po', }) with mute_logger('coffice.addons.base.models.res_lang'): import_tlh.import_lang() context = {'lang': "tlh"} self.assertEqual(_("Klingon"), "tlhIngan", "The direct code translation was not applied") context = None # Comparison of lazy strings must be explicitely casted to string with self.assertRaises(NotImplementedError): TRANSLATED_TERM == "Klingon" self.assertEqual(str(TRANSLATED_TERM), "Klingon", "The translation should not be applied yet") context = {'lang': "tlh"} self.assertEqual(str(TRANSLATED_TERM), "tlhIngan", "The lazy code translation was not applied")
def test_import_from_po_file(self): """Test the import from a single po file works""" with file_open('test_translation_import/i18n/tlh.po', 'rb') as f: po_file = base64.encodestring(f.read()) import_tlh = self.env["base.language.import"].create({ 'name': 'Klingon', 'code': 'tlh', 'data': po_file, 'filename': 'tlh.po', }) with mute_logger('coffice.addons.base.models.res_lang'): import_tlh.import_lang() tlh_lang = self.env['res.lang']._lang_get('tlh') self.assertTrue(tlh_lang, "The imported language was not creates") trans_count = self.env['ir.translation'].search_count([('lang', '=', 'tlh')]) self.assertEqual(trans_count, 1, "The imported translations were not created") self.env.context = dict(self.env.context, lang="tlh") self.assertEqual(_("Klingon"), "tlhIngan", "The code translation was not applied")
def activity_update(self): to_clean, to_do = self.env['hr.leave.allocation'], self.env['hr.leave.allocation'] for allocation in self: note = _('New Allocation Request created by %s: %s Days of %s') % (allocation.create_uid.name, allocation.number_of_days, allocation.holiday_status_id.name) if allocation.state == 'draft': to_clean |= allocation elif allocation.state == 'confirm': allocation.activity_schedule( 'hr_holidays.mail_act_leave_allocation_approval', note=note, user_id=allocation.sudo()._get_responsible_for_approval().id or self.env.user.id) elif allocation.state == 'validate1': allocation.activity_feedback(['hr_holidays.mail_act_leave_allocation_approval']) allocation.activity_schedule( 'hr_holidays.mail_act_leave_allocation_second_approval', note=note, user_id=allocation.sudo()._get_responsible_for_approval().id or self.env.user.id) elif allocation.state == 'validate': to_do |= allocation elif allocation.state == 'refuse': to_clean |= allocation if to_clean: to_clean.activity_unlink(['hr_holidays.mail_act_leave_allocation_approval', 'hr_holidays.mail_act_leave_allocation_second_approval']) if to_do: to_do.activity_feedback(['hr_holidays.mail_act_leave_allocation_approval', 'hr_holidays.mail_act_leave_allocation_second_approval'])
def action_applications_email(self): return { 'type': 'ir.actions.act_window', 'name': _('Applications'), 'res_model': self._name, 'view_mode': 'kanban,tree,form,pivot,graph,calendar,activity', 'domain': [('email_from', 'in', self.mapped('email_from'))], }
def _check_leave_type_validity(self): for allocation in self: if allocation.holiday_status_id.validity_stop: vstop = allocation.holiday_status_id.validity_stop today = fields.Date.today() if vstop < today: raise ValidationError(_('You can allocate %s only before %s') % (allocation.holiday_status_id.display_name, allocation.holiday_status_id.validity_stop))
def get_empty_list_help(self, help): return super( Applicant, self.with_context( empty_list_help_model='hr.job', empty_list_help_id=self.env.context.get('default_job_id'), empty_list_help_document_name=_( "job applicant"))).get_empty_list_help(help)
def _compute_ressource_id(self): result = {} for record in self: word = self._get_key_from_url(record.google_drive_template_url) if word: record.google_drive_resource_id = word else: raise UserError(_("Please enter a valid Google Document URL.")) return result
def view_init(self, fields): """ Check some preconditions before the wizard executes. """ for lead in self.env['crm.lead'].browse( self._context.get('active_ids', [])): if lead.probability == 100: raise UserError( _("Closed/Dead leads cannot be converted into opportunities." )) return False
def open_track_speakers_list(self): return { 'name': _('Speakers'), 'domain': [('id', 'in', self.mapped('partner_id').ids)], 'view_mode': 'kanban,form', 'res_model': 'res.partner', 'view_id': False, 'type': 'ir.actions.act_window', }
def extract(spec): """ Utility function that locates a node given a specification, remove it from the source and returns it. """ if len(spec): raise ValueError( _("Invalid specification for moved nodes: '%s'") % etree.tostring(spec)) pre_locate(spec) to_extract = locate_node(source, spec) if to_extract is not None: remove_element(to_extract) return to_extract else: raise ValueError( _("Element '%s' cannot be located in parent view") % etree.tostring(spec))
def _message_get_suggested_recipients(self): recipients = super(Track, self)._message_get_suggested_recipients() for track in self: if track.partner_email and track.partner_email != track.partner_id.email: track._message_add_suggested_recipient( recipients, email=track.partner_email, reason=_('Speaker Email')) return recipients
def action_approve(self): # if validation_type == 'both': this method is the first approval approval # if validation_type != 'both': this method calls action_validate() below if any(holiday.state != 'confirm' for holiday in self): raise UserError(_('Allocation request must be confirmed ("To Approve") in order to approve it.')) current_employee = self.env['hr.employee'].search([('user_id', '=', self.env.uid)], limit=1) self.filtered(lambda hol: hol.validation_type == 'both').write({'state': 'validate1', 'first_approver_id': current_employee.id}) self.filtered(lambda hol: not hol.validation_type == 'both').action_validate() self.activity_update()
def open_rating(self, token, rate, **kwargs): assert rate in (1, 5, 10), "Incorrect rating" rating = request.env['rating.rating'].sudo().search([('access_token', '=', token)]) if not rating: return request.not_found() rate_names = { 5: _("not satisfied"), 1: _("highly dissatisfied"), 10: _("satisfied") } rating.write({'rating': rate, 'consumed': True}) lang = rating.partner_id.lang or get_lang(request.env).code return request.env['ir.ui.view'].with_context( lang=lang).render_template( 'rating.rating_external_page_submit', { 'rating': rating, 'token': token, 'rate_name': rate_names[rate], 'rate': rate })
def exp_about(extended=False): """Return information about the OpenERP Server. @param extended: if True then return version info @return string if extended is False else tuple """ info = _('See http://openerp.com') if extended: return info, coffice.release.version return info
def _notify_get_groups(self): """ Handle HR users and officers recipients that can validate or refuse holidays directly from email. """ groups = super(HolidaysAllocation, self)._notify_get_groups() self.ensure_one() hr_actions = [] if self.state == 'confirm': app_action = self._notify_get_action_link('controller', controller='/allocation/validate') hr_actions += [{'url': app_action, 'title': _('Approve')}] if self.state in ['confirm', 'validate', 'validate1']: ref_action = self._notify_get_action_link('controller', controller='/allocation/refuse') hr_actions += [{'url': ref_action, 'title': _('Refuse')}] holiday_user_group_id = self.env.ref('hr_holidays.group_hr_holidays_user').id new_group = ( 'group_hr_holidays_user', lambda pdata: pdata['type'] == 'user' and holiday_user_group_id in pdata['groups'], { 'actions': hr_actions, }) return [new_group] + groups
def name_get(self): if not self._context.get('employee_id'): # leave counts is based on employee_id, would be inaccurate if not based on correct employee return super(HolidaysType, self).name_get() res = [] for record in self: name = record.name if record.allocation_type != 'no': name = "%(name)s (%(count)s)" % { 'name': name, 'count': _('%g remaining out of %g') % ( float_round(record.virtual_remaining_leaves, precision_digits=2) or 0.0, float_round(record.max_leaves, precision_digits=2) or 0.0, ) + (_(' hours') if record.request_unit == 'hour' else _(' days')) } res.append((record.id, name)) return res