def website_set_ga_data(self, website_id, ga_client_id, ga_analytics_key): if not request.env.user.has_group('base.group_system'): return { 'error': { 'title': _('Access Error'), 'message': _('You do not have sufficient rights to perform that action.' ), } } if not ga_analytics_key or not ga_client_id.endswith( '.apps.googleusercontent.com'): return { 'error': { 'title': _('Incorrect Client ID / Key'), 'message': _('The Google Analytics Client ID or Key you entered seems incorrect.' ), } } Website = request.env['website'] current_website = website_id and Website.browse( website_id) or Website.get_current_website() request.env['res.config.settings'].create({ 'google_management_client_id': ga_client_id, 'google_analytics_key': ga_analytics_key, 'website_id': current_website.id, }).execute() return True
def _interval_dates(self, frequency, company): """ Method used to compute the theoretical date from which account move lines should be fetched @param {string} frequency: a valid value of the selection field on the object (daily, monthly, annually) frequencies are literal (daily means 24 hours and so on) @param {recordset} company: the company for which the closing is done @return {dict} the theoretical date from which account move lines are fetched. date_stop date to which the move lines are fetched, always now() the dates are in their Eagle Database string representation """ date_stop = datetime.utcnow() interval_from = None name_interval = '' if frequency == 'daily': interval_from = date_stop - timedelta(days=1) name_interval = _('Daily Closing') elif frequency == 'monthly': month_target = date_stop.month > 1 and date_stop.month - 1 or 12 year_target = month_target < 12 and date_stop.year or date_stop.year - 1 interval_from = date_stop.replace(year=year_target, month=month_target) name_interval = _('Monthly Closing') elif frequency == 'annually': year_target = date_stop.year - 1 interval_from = date_stop.replace(year=year_target) name_interval = _('Annual Closing') return { 'interval_from': FieldDateTime.to_string(interval_from), 'date_stop': FieldDateTime.to_string(date_stop), 'name_interval': name_interval }
def format_failure_reason(self): self.ensure_one() if self.failure_type != 'UNKNOWN': return dict(type(self).failure_type.selection).get( self.failure_type, _('No Error')) else: return _("Unknown error") + ": %s" % (self.failure_reason or '')
def _parse_date_from_data(self, data, index, name, field_type, options): dt = datetime.datetime fmt = fields.Date.to_string if field_type == 'date' else fields.Datetime.to_string d_fmt = options.get('date_format') dt_fmt = options.get('datetime_format') for num, line in enumerate(data): if not line[index]: continue v = line[index].strip() try: # first try parsing as a datetime if it's one if dt_fmt and field_type == 'datetime': try: line[index] = fmt(dt.strptime(v, dt_fmt)) continue except ValueError: pass # otherwise try parsing as a date whether it's a date # or datetime line[index] = fmt(dt.strptime(v, d_fmt)) except ValueError as e: raise ValueError( _("Column %s contains incorrect values. Error in line %d: %s" ) % (name, num + 1, e)) except Exception as e: raise ValueError( _("Error Parsing Date [%s:L%d]: %s") % (name, num + 1, e))
def insert_record(self, request, model, values, custom, meta=None): model_name = model.sudo().model record = request.env[model_name].sudo().with_context( mail_create_nosubscribe=True).create(values) if custom or meta: _custom_label = "%s\n___________\n\n" % _( "Other Information:") # Title for custom fields if model_name == 'mail.mail': _custom_label = "%s\n___________\n\n" % _( "This message has been posted on your website!") default_field = model.website_form_default_field_id default_field_data = values.get(default_field.name, '') custom_content = (default_field_data + "\n\n" if default_field_data else '') \ + (_custom_label + custom + "\n\n" if custom else '') \ + (self._meta_label + meta if meta else '') # If there is a default field configured for this model, use it. # If there isn't, put the custom data in a message instead if default_field.name: if default_field.ttype == 'html' or model_name == 'mail.mail': custom_content = nl2br(custom_content) record.update({default_field.name: custom_content}) else: values = { 'body': nl2br(custom_content), 'model': model_name, 'message_type': 'comment', 'no_auto_thread': False, 'res_id': record.id, } mail_id = request.env['mail.message'].sudo().create(values) return record.id
class RecruitmentStage(models.Model): _name = "hr.recruitment.stage" _description = "Recruitment Stages" _order = 'sequence' name = fields.Char("Stage name", required=True, translate=True) sequence = fields.Integer( "Sequence", default=10, help="Gives the sequence order when displaying a list of stages.") job_id = fields.Many2one('hr.job', string='Job Specific', ondelete='cascade', help='Specific job that uses this stage. Other jobs will not use this stage.') requirements = fields.Text("Requirements") template_id = fields.Many2one( 'mail.template', "Automated Email", help="If set, a message is posted on the applicant using the template when the applicant is set to the stage.") fold = fields.Boolean( "Folded in Recruitment Pipe", help="This stage is folded in the kanban view when there are no records in that stage to display.") legend_blocked = fields.Char( 'Red Kanban Label', default=lambda self: _('Blocked'), translate=True, required=True) legend_done = fields.Char( 'Green Kanban Label', default=lambda self: _('Ready for Next Stage'), translate=True, required=True) legend_normal = fields.Char( 'Grey Kanban Label', default=lambda self: _('In Progress'), translate=True, required=True) @api.model def default_get(self, fields): if self._context and self._context.get('default_job_id') and not self._context.get('hr_recruitment_stage_mono', False): context = dict(self._context) context.pop('default_job_id') self = self.with_context(context) return super(RecruitmentStage, self).default_get(fields)
def name_get(self): res = [] for allocation in self: if allocation.holiday_type == 'company': target = allocation.mode_company_id.name elif allocation.holiday_type == 'department': target = allocation.department_id.name elif allocation.holiday_type == 'category': target = allocation.category_id.name else: target = allocation.employee_id.name if allocation.type_request_unit == 'hour': res.append( (allocation.id, _("Allocation of %s : %.2f hour(s) to %s") % ( allocation.holiday_status_id.name, allocation.number_of_hours_display, target) ) ) else: res.append( (allocation.id, _("Allocation of %s : %.2f day(s) to %s") % ( allocation.holiday_status_id.name, allocation.number_of_days, target) ) ) return res
def write(self, vals): has_been_posted = False for move in self: if move.company_id._is_accounting_unalterable(): # write the hash and the secure_sequence_number when posting an account.move if vals.get('state') == 'posted': has_been_posted = True # restrict the operation in case we are trying to write a forbidden field if (move.state == "posted" and set(vals).intersection(MOVE_FIELDS)): raise UserError( _("According to the French law, you cannot modify a journal entry in order for its posted data to be updated or deleted. Unauthorized field: %s." ) % ', '.join(MOVE_FIELDS)) # restrict the operation in case we are trying to overwrite existing hash if (move.l10n_fr_hash and 'l10n_fr_hash' in vals) or ( move.l10n_fr_secure_sequence_number and 'l10n_fr_secure_sequence_number' in vals): raise UserError( _('You cannot overwrite the values ensuring the inalterability of the accounting.' )) res = super(AccountMove, self).write(vals) # write the hash and the secure_sequence_number when posting an account.move if has_been_posted: for move in self.filtered( lambda m: m.company_id._is_accounting_unalterable() and not (m.l10n_fr_secure_sequence_number or m.l10n_fr_hash)): new_number = move.company_id.l10n_fr_secure_sequence_id.next_by_id( ) vals_hashing = { 'l10n_fr_secure_sequence_number': new_number, 'l10n_fr_hash': move._get_new_hash(new_number) } res |= super(AccountMove, move).write(vals_hashing) return res
def _check_leave_type_validity(self): for leave in self: vstart = leave.holiday_status_id.validity_start vstop = leave.holiday_status_id.validity_stop dfrom = leave.date_from dto = leave.date_to if leave.holiday_status_id.validity_start and leave.holiday_status_id.validity_stop: if dfrom and dto and (dfrom.date() < vstart or dto.date() > vstop): raise UserError( _('You can take %s only between %s and %s') % (leave.holiday_status_id.display_name, leave.holiday_status_id.validity_start, leave.holiday_status_id.validity_stop)) elif leave.holiday_status_id.validity_start: if dfrom and (dfrom.date() < vstart): raise UserError( _('You can take %s from %s') % (leave.holiday_status_id.display_name, leave.holiday_status_id.validity_start)) elif leave.holiday_status_id.validity_stop: if dto and (dto.date() > vstop): raise UserError( _('You can take %s until %s') % (leave.holiday_status_id.display_name, leave.holiday_status_id.validity_stop))
def name_get(self): res = [] for leave in self: if self.env.context.get('short_name'): if leave.leave_type_request_unit == 'hour': res.append((leave.id, _("%s : %.2f hour(s)") % (leave.name or leave.holiday_status_id.name, leave.number_of_hours_display))) else: res.append((leave.id, _("%s : %.2f day(s)") % (leave.name or leave.holiday_status_id.name, leave.number_of_days))) else: if leave.holiday_type == 'company': target = leave.mode_company_id.name elif leave.holiday_type == 'department': target = leave.department_id.name elif leave.holiday_type == 'category': target = leave.category_id.name else: target = leave.employee_id.name if leave.leave_type_request_unit == 'hour': res.append((leave.id, _("%s on %s : %.2f hour(s)") % (target, leave.holiday_status_id.name, leave.number_of_hours_display))) else: res.append((leave.id, _("%s on %s : %.2f day(s)") % (target, leave.holiday_status_id.name, leave.number_of_days))) return res
def _notify_get_groups(self, message, groups): """ Handle HR users and officers recipients that can validate or refuse holidays directly from email. """ groups = super(HolidaysRequest, self)._notify_get_groups(message, groups) self.ensure_one() hr_actions = [] if self.state == 'confirm': app_action = self._notify_get_action_link( 'controller', controller='/leave/validate') hr_actions += [{'url': app_action, 'title': _('Approve')}] if self.state in ['confirm', 'validate', 'validate1']: ref_action = self._notify_get_action_link( 'controller', controller='/leave/refuse') hr_actions += [{'url': ref_action, 'title': _('Refuse')}] holiday_user_group_id = self.env.ref( 'hr_holidays.group_hr_holidays_user').id new_group = ('group_hr_holidays_user', lambda pdata: pdata['type'] == 'user' and holiday_user_group_id in pdata['groups'], { 'actions': hr_actions, }) return [new_group] + groups
def _check_approval_update(self, state): """ Check if target state is achievable. """ current_employee = self.env['hr.employee'].search([('user_id', '=', self.env.uid)], limit=1) is_officer = self.env.user.has_group('hr_holidays.group_hr_holidays_user') is_manager = self.env.user.has_group('hr_holidays.group_hr_holidays_manager') for holiday in self: val_type = holiday.holiday_status_id.validation_type if state == 'confirm': continue if state == 'draft': if holiday.employee_id != current_employee and not is_manager: raise UserError(_('Only a Leave Manager can reset other people leaves.')) continue if not is_officer: raise UserError(_('Only a Leave Officer or Manager can approve or refuse leave requests.')) if is_officer: # use ir.rule based first access check: department, members, ... (see security.xml) holiday.check_access_rule('write') if holiday.employee_id == current_employee and not is_manager: raise UserError(_('Only a Leave Manager can approve its own requests.')) if (state == 'validate1' and val_type == 'both') or (state == 'validate' and val_type == 'manager'): manager = holiday.employee_id.parent_id or holiday.employee_id.department_id.manager_id if (manager and manager != current_employee) and not self.env.user.has_group('hr_holidays.group_hr_holidays_manager'): raise UserError(_('You must be either %s\'s manager or Leave manager to approve this leave') % (holiday.employee_id.name)) if state == 'validate' and val_type == 'both': if not self.env.user.has_group('hr_holidays.group_hr_holidays_manager'): raise UserError(_('Only an Leave Manager can apply the second approval on leave requests.'))
def _compute_duration_display(self): for allocation in self: allocation.duration_display = '%g %s' % ( (float_round(allocation.number_of_hours_display, precision_digits=2) if allocation.type_request_unit == 'hour' else float_round(allocation.number_of_days_display, precision_digits=2)), _('hours') if allocation.type_request_unit == 'hour' else _('days'))
def _compute_duration_display(self): for leave in self: leave.duration_display = '%g %s' % ( (float_round(leave.number_of_hours_display, precision_digits=2) if leave.leave_type_request_unit == 'hour' else float_round( leave.number_of_days_display, precision_digits=2)), _('hour(s)') if leave.leave_type_request_unit == 'hour' else _('day(s)'))
def action_validate(self): current_employee = self.env['hr.employee'].search( [('user_id', '=', self.env.uid)], limit=1) if any(holiday.state not in ['confirm', 'validate1'] for holiday in self): raise UserError( _('Leave request must be confirmed in order to approve it.')) self.write({'state': 'validate'}) self.filtered(lambda holiday: holiday.validation_type == 'both').write( {'second_approver_id': current_employee.id}) self.filtered(lambda holiday: holiday.validation_type != 'both').write( {'first_approver_id': current_employee.id}) for holiday in self.filtered( lambda holiday: holiday.holiday_type != 'employee'): if holiday.holiday_type == 'category': employees = holiday.category_id.employee_ids elif holiday.holiday_type == 'company': employees = self.env['hr.employee'].search([ ('company_id', '=', holiday.mode_company_id.id) ]) else: employees = holiday.department_id.member_ids if self.env['hr.leave'].search_count([ ('date_from', '<=', holiday.date_to), ('date_to', '>', holiday.date_from), ('state', 'not in', ['cancel', 'refuse']), ('holiday_type', '=', 'employee'), ('employee_id', 'in', employees.ids) ]): raise ValidationError( _('You can not have 2 leaves that overlaps on the same day.' )) values = [ holiday._prepare_holiday_values(employee) for employee in employees ] leaves = self.env['hr.leave'].with_context( tracking_disable=True, mail_activity_automation_skip=True, leave_fast_create=True, ).create(values) leaves.action_approve() # FIXME RLi: This does not make sense, only the parent should be in validation_type both if leaves and leaves[0].validation_type == 'both': leaves.action_validate() employee_requests = self.filtered( lambda hol: hol.holiday_type == 'employee') employee_requests._validate_leave_request() if not self.env.context.get('leave_fast_create'): employee_requests.activity_update() return True
def copy_doc(self, res_id, template_id, name_gdocs, res_model): google_web_base_url = self.env['ir.config_parameter'].sudo().get_param('web.base.url') access_token = self.get_access_token() # Copy template in to drive with help of new access token request_url = "https://www.googleapis.com/drive/v2/files/%s?fields=parents/id&access_token=%s" % (template_id, access_token) headers = {"Content-type": "application/x-www-form-urlencoded"} try: req = requests.get(request_url, headers=headers, timeout=TIMEOUT) req.raise_for_status() parents_dict = req.json() except requests.HTTPError: raise UserError(_("The Google Template cannot be found. Maybe it has been deleted.")) record_url = "Click on link to open Record in Eagle\n %s/?db=%s#id=%s&model=%s" % (google_web_base_url, self._cr.dbname, res_id, res_model) data = { "title": name_gdocs, "description": record_url, "parents": parents_dict['parents'] } request_url = "https://www.googleapis.com/drive/v2/files/%s/copy?access_token=%s" % (template_id, access_token) headers = { 'Content-type': 'application/json', 'Accept': 'text/plain' } # resp, content = Http().request(request_url, "POST", data_json, headers) req = requests.post(request_url, data=json.dumps(data), headers=headers, timeout=TIMEOUT) req.raise_for_status() content = req.json() res = {} if content.get('alternateLink'): res['id'] = self.env["ir.attachment"].create({ 'res_model': res_model, 'name': name_gdocs, 'res_id': res_id, 'type': 'url', 'url': content['alternateLink'] }).id # Commit in order to attach the document to the current object instance, even if the permissions has not been written. self._cr.commit() res['url'] = content['alternateLink'] key = self._get_key_from_url(res['url']) request_url = "https://www.googleapis.com/drive/v2/files/%s/permissions?emailMessage=This+is+a+drive+file+created+by+Eagle&sendNotificationEmails=false&access_token=%s" % (key, access_token) data = {'role': 'writer', 'type': 'anyone', 'value': '', 'withLink': True} try: req = requests.post(request_url, data=json.dumps(data), headers=headers, timeout=TIMEOUT) req.raise_for_status() except requests.HTTPError: raise self.env['res.config.settings'].get_config_warning(_("The permission 'reader' for 'anyone with the link' has not been written on the document")) if self.env.user.email: data = {'role': 'writer', 'type': 'user', 'value': self.env.user.email} try: requests.post(request_url, data=json.dumps(data), headers=headers, timeout=TIMEOUT) except requests.HTTPError: pass return res
def from_data(self, fields, rows): if len(rows) > 65535: raise UserError( _('There are too many rows (%s rows, limit: 65535) to export as Excel 97-2003 (.xls) format. Consider splitting the export.' ) % len(rows)) workbook = xlwt.Workbook() worksheet = workbook.add_sheet('Sheet 1') for i, fieldname in enumerate(fields): worksheet.write(0, i, fieldname) worksheet.col(i).width = 8000 # around 220 pixels base_style = xlwt.easyxf('align: wrap yes') date_style = xlwt.easyxf('align: wrap yes', num_format_str='YYYY-MM-DD') datetime_style = xlwt.easyxf('align: wrap yes', num_format_str='YYYY-MM-DD HH:mm:SS') for row_index, row in enumerate(rows): for cell_index, cell_value in enumerate(row): cell_style = base_style if isinstance(cell_value, bytes) and not isinstance( cell_value, pycompat.string_types): # because xls uses raw export, we can get a bytes object # here. xlwt does not support bytes values in Python 3 -> # assume this is base64 and decode to a string, if this # fails note that you can't export try: cell_value = pycompat.to_text(cell_value) except UnicodeDecodeError: raise UserError( _("Binary fields can not be exported to Excel unless their content is base64-encoded. That does not seem to be the case for %s." ) % fields[cell_index]) if isinstance(cell_value, pycompat.string_types): cell_value = re.sub("\r", " ", pycompat.to_text(cell_value)) # Excel supports a maximum of 32767 characters in each cell: cell_value = cell_value[:32767] elif isinstance(cell_value, datetime.datetime): cell_style = datetime_style elif isinstance(cell_value, datetime.date): cell_style = date_style worksheet.write(row_index + 1, cell_index, cell_value, cell_style) fp = io.BytesIO() workbook.save(fp) fp.seek(0) data = fp.read() fp.close() return data
def _import_image_by_url(self, url, session, field, line_number): """ Imports an image by URL :param str url: the original field value :param requests.Session session: :param str field: name of the field (for logging/debugging) :param int line_number: 0-indexed line number within the imported file (for logging/debugging) :return: the replacement value :rtype: bytes """ maxsize = int( config.get("import_image_maxbytes", DEFAULT_IMAGE_MAXBYTES)) try: response = session.get(url, timeout=int( config.get("import_image_timeout", DEFAULT_IMAGE_TIMEOUT))) response.raise_for_status() if response.headers.get('Content-Length') and int( response.headers['Content-Length']) > maxsize: raise ValueError( _("File size exceeds configured maximum (%s bytes)") % maxsize) content = bytearray() for chunk in response.iter_content(DEFAULT_IMAGE_CHUNK_SIZE): content += chunk if len(content) > maxsize: raise ValueError( _("File size exceeds configured maximum (%s bytes)") % maxsize) image = Image.open(io.BytesIO(content)) w, h = image.size if w * h > 42e6: # Nokia Lumia 1020 photo resolution raise ValueError( u"Image size excessive, imported images must be smaller " u"than 42 million pixel") return base64.b64encode(content) except Exception as e: raise ValueError( _("Could not retrieve URL: %(url)s [%(field_name)s: L%(line_number)d]: %(error)s" ) % { 'url': url, 'field_name': field, 'line_number': line_number + 1, 'error': e })
def _check_hash_integrity(self, company_id): """Checks that all posted moves have still the same data as when they were posted and raises an error with the result. """ def build_move_info(move): entry_reference = _('(ref.: %s)') move_reference_string = move.ref and entry_reference % move.ref or '' return [move.name, move_reference_string] moves = self.search([('state', '=', 'posted'), ('company_id', '=', company_id), ('l10n_fr_secure_sequence_number', '!=', 0)], order="l10n_fr_secure_sequence_number ASC") if not moves: raise UserError( _('There isn\'t any journal entry flagged for data inalterability yet for the company %s. This mechanism only runs for journal entries generated after the installation of the module France - Certification CGI 286 I-3 bis.' ) % self.env.user.company_id.name) previous_hash = u'' start_move_info = [] for move in moves: if move.l10n_fr_hash != move._compute_hash( previous_hash=previous_hash): raise UserError( _('Corrupted data on journal entry with id %s.') % move.id) if not previous_hash: #save the date and sequence number of the first move hashed start_move_info = build_move_info(move) previous_hash = move.l10n_fr_hash end_move_info = build_move_info(move) report_dict = { 'start_move_name': start_move_info[0], 'start_move_ref': start_move_info[1], 'end_move_name': end_move_info[0], 'end_move_ref': end_move_info[1] } # Raise on success raise UserError( _('''Successful test ! The journal entries are guaranteed to be in their original and inalterable state From: %(start_move_name)s %(start_move_ref)s To: %(end_move_name)s %(end_move_ref)s For this report to be legally meaningful, please download your certification from your customer account on Eagle.com (Only for Eagle Enterprise users).''' ) % report_dict)
def _read_file(self, options): """ Dispatch to specific method to read file content, according to its mimetype or file type :param options : dict of reading options (quoting, separator, ...) """ self.ensure_one() # guess mimetype from file content mimetype = guess_mimetype(self.file or b'') (file_extension, handler, req) = FILE_TYPE_DICT.get(mimetype, (None, None, None)) if handler: try: return getattr(self, '_read_' + file_extension)(options) except Exception: _logger.warn( "Failed to read file '%s' (transient id %d) using guessed mimetype %s", self.file_name or '<unknown>', self.id, mimetype) # try reading with user-provided mimetype (file_extension, handler, req) = FILE_TYPE_DICT.get(self.file_type, (None, None, None)) if handler: try: return getattr(self, '_read_' + file_extension)(options) except Exception: _logger.warn( "Failed to read file '%s' (transient id %d) using user-provided mimetype %s", self.file_name or '<unknown>', self.id, self.file_type) # fallback on file extensions as mime types can be unreliable (e.g. # software setting incorrect mime types, or non-installed software # leading to browser not sending mime types) if self.file_name: p, ext = os.path.splitext(self.file_name) if ext in EXTENSIONS: try: return getattr(self, '_read_' + ext[1:])(options) except Exception: _logger.warn( "Failed to read file '%s' (transient id %s) using file extension", self.file_name, self.id) if req: raise ImportError( _("Unable to load \"{extension}\" file: requires Python module \"{modname}\"" ).format(extension=file_extension, modname=req)) raise ValueError( _("Unsupported file format \"{}\", import only supports CSV, ODS, XLS and XLSX" ).format(self.file_type))
def create_employee_from_applicant(self): """ Create an hr.employee from the hr.applicants """ employee = False for applicant in self: contact_name = False if applicant.partner_id: address_id = applicant.partner_id.address_get(['contact'])['contact'] contact_name = applicant.partner_id.display_name else: if not applicant.partner_name: raise UserError(_('You must define a Contact Name for this applicant.')) new_partner_id = self.env['res.partner'].create({ 'is_company': False, 'name': applicant.partner_name, 'email': applicant.email_from, 'phone': applicant.partner_phone, 'mobile': applicant.partner_mobile }) address_id = new_partner_id.address_get(['contact'])['contact'] if applicant.partner_name or contact_name: employee = self.env['hr.employee'].create({ 'name': applicant.partner_name or contact_name, 'job_id': applicant.job_id.id or False, 'job_title': applicant.job_id.name, 'address_home_id': address_id, 'department_id': applicant.department_id.id or False, 'address_id': applicant.company_id and applicant.company_id.partner_id and applicant.company_id.partner_id.id or False, 'work_email': applicant.department_id and applicant.department_id.company_id and applicant.department_id.company_id.email or False, 'work_phone': applicant.department_id and applicant.department_id.company_id and applicant.department_id.company_id.phone or False}) applicant.write({'emp_id': employee.id}) if applicant.job_id: applicant.job_id.write({'no_of_hired_employee': applicant.job_id.no_of_hired_employee + 1}) applicant.job_id.message_post( body=_('New Employee %s Hired') % applicant.partner_name if applicant.partner_name else applicant.name, subtype="hr_recruitment.mt_job_applicant_hired") applicant.message_post_with_view( 'hr_recruitment.applicant_hired_template', values={'applicant': applicant}, subtype_id=self.env.ref("hr_recruitment.mt_applicant_hired").id) employee_action = self.env.ref('hr.open_view_employee_list') dict_act_window = employee_action.read([])[0] dict_act_window['context'] = {'form_view_initial_mode': 'edit'} dict_act_window['res_id'] = employee.id return dict_act_window
def open_rating(self, token, rate, **kwargs): assert rate in (1, 5, 10), "Incorrect rating" rating = request.env['rating.rating'].sudo().search([('access_token', '=', token)]) if not rating: return request.not_found() rate_names = { 5: _("not satisfied"), 1: _("highly dissatisfied"), 10: _("satisfied") } rating.write({'rating': rate, 'consumed': True}) lang = rating.partner_id.lang or get_lang(request.env).code return request.env['ir.ui.view'].with_context(lang=lang).render_template('rating.rating_external_page_submit', { 'rating': rating, 'token': token, 'rate_name': rate_names[rate], 'rate': rate })
def test_lazy_translation(self): """Test the import from a single po file works""" with file_open('test_translation_import/i18n/tlh.po', 'rb') as f: po_file = base64.encodestring(f.read()) import_tlh = self.env["base.language.import"].create({ 'name': 'Klingon', 'code': 'tlh', 'data': po_file, 'filename': 'tlh.po', }) with mute_logger('eagle.addons.base.models.res_lang'): import_tlh.import_lang() context = {'lang': "tlh"} self.assertEqual(_("Klingon"), "tlhIngan", "The direct code translation was not applied") context = None # Comparison of lazy strings must be explicitely casted to string with self.assertRaises(NotImplementedError): TRANSLATED_TERM == "Klingon" self.assertEqual(str(TRANSLATED_TERM), "Klingon", "The translation should not be applied yet") context = {'lang': "tlh"} self.assertEqual(str(TRANSLATED_TERM), "tlhIngan", "The lazy code translation was not applied")
def _onchange_update_posted(self): if self.update_posted and self.company_id._is_accounting_unalterable(): field_string = self._fields['update_posted'].get_description( self.env)['string'] raise UserError( _("According to the French law, you cannot modify a journal in order for its posted data to be updated or deleted. Unauthorized field: %s." ) % field_string)
def _send_email(self): """ send notification email to a new portal user """ if not self.env.user.email: raise UserError( _('You must have an email address in your User Preferences to send emails.' )) # determine subject and body in the portal user's language template = self.env.ref('portal.mail_template_data_portal_welcome') for wizard_line in self: lang = wizard_line.user_id.lang partner = wizard_line.user_id.partner_id portal_url = partner.with_context( signup_force_type_in_url='', lang=lang)._get_signup_url_for_action()[partner.id] partner.signup_prepare() if template: template.with_context(dbname=self._cr.dbname, portal_url=portal_url, lang=lang).send_mail(wizard_line.id, force_send=True) else: _logger.warning( "No email template found for sending email to the portal user" ) return True
def test_import_from_csv_file(self): """Test the import from a single CSV file works""" with file_open('test_translation_import/i18n/dot.csv', 'rb') as f: po_file = base64.encodestring(f.read()) import_tlh = self.env["base.language.import"].create({ 'name': 'Dothraki', 'code': 'dot', 'data': po_file, 'filename': 'dot.csv', }) with mute_logger('eagle.addons.base.models.res_lang'): import_tlh.import_lang() dot_lang = self.env['res.lang']._lang_get('dot') self.assertTrue(dot_lang, "The imported language was not creates") trans_count = self.env['ir.translation'].search_count([('lang', '=', 'dot')]) self.assertEqual(trans_count, 1, "The imported translations were not created") self.env.context = dict(self.env.context, lang="dot") self.assertEqual(_("Accounting"), "samva", "The code translation was not applied")
def _read_xls_book(self, book): sheet = book.sheet_by_index(0) # emulate Sheet.get_rows for pre-0.9.4 for row in pycompat.imap(sheet.row, range(sheet.nrows)): values = [] for cell in row: if cell.ctype is xlrd.XL_CELL_NUMBER: is_float = cell.value % 1 != 0.0 values.append( pycompat.text_type(cell.value) if is_float else pycompat.text_type(int(cell.value))) elif cell.ctype is xlrd.XL_CELL_DATE: is_datetime = cell.value % 1 != 0.0 # emulate xldate_as_datetime for pre-0.9.3 dt = datetime.datetime(*xlrd.xldate.xldate_as_tuple( cell.value, book.datemode)) values.append( dt.strftime(DEFAULT_SERVER_DATETIME_FORMAT ) if is_datetime else dt. strftime(DEFAULT_SERVER_DATE_FORMAT)) elif cell.ctype is xlrd.XL_CELL_BOOLEAN: values.append(u'True' if cell.value else u'False') elif cell.ctype is xlrd.XL_CELL_ERROR: raise ValueError( _("Error cell found while reading XLS/XLSX file: %s") % xlrd.error_text_from_code.get( cell.value, "unknown error code %s" % cell.value)) else: values.append(cell.value) if any(x for x in values if x.strip()): yield values
def _check_validity_dates(self): for leave_type in self: if leave_type.validity_start and leave_type.validity_stop and \ leave_type.validity_start > leave_type.validity_stop: raise ValidationError( _("End of validity period should be greater than start of validity period" ))
def get_google_drive_config(self, res_model, res_id): ''' Function called by the js, when no google doc are yet associated with a record, with the aim to create one. It will first seek for a google.docs.config associated with the model `res_model` to find out what's the template of google doc to copy (this is usefull if you want to start with a non-empty document, a type or a name different than the default values). If no config is associated with the `res_model`, then a blank text document with a default name is created. :param res_model: the object for which the google doc is created :param ids: the list of ids of the objects for which the google doc is created. This list is supposed to have a length of 1 element only (batch processing is not supported in the code, though nothing really prevent it) :return: the config id and config name ''' # TO DO in master: fix my signature and my model if isinstance(res_model, str): res_model = self.env['ir.model'].search([('model', '=', res_model)]).id if not res_id: raise UserError(_("Creating google drive may only be done by one at a time.")) # check if a model is configured with a template configs = self.search([('model_id', '=', res_model)]) config_values = [] for config in configs.sudo(): if config.filter_id: if config.filter_id.user_id and config.filter_id.user_id.id != self.env.user.id: #Private continue domain = [('id', 'in', [res_id])] + safe_eval(config.filter_id.domain) additionnal_context = safe_eval(config.filter_id.context) google_doc_configs = self.env[config.filter_id.model_id].with_context(**additionnal_context).search(domain) if google_doc_configs: config_values.append({'id': config.id, 'name': config.name}) else: config_values.append({'id': config.id, 'name': config.name}) return config_values
def _check_session_timing(self): self.ensure_one() date_today = datetime.utcnow() session_start = Datetime.from_string(self.start_at) if not date_today - timedelta(hours=24) <= session_start: raise UserError(_("This session has been opened another day. To comply with the French law, you should close sessions on a daily basis. Please close session %s and open a new one.") % self.name) return True