def compute_loan_line(self): loan_line = self.env['hr.loan.line'] input_obj = self.env['hr.salary.inputs'] loan_line.search([('loan_id','=',self.id)]).unlink() for loan in self: date_start_str = loan.date_payment counter = 1 amount_per_time = loan.amount / loan.num_quotas for i in range(1, loan.num_quotas + 1): line_id = loan_line.create({ 'paid_date':date_start_str, 'paid_amount': amount_per_time, 'employee_id': loan.employee_id.id, 'loan_id':loan.id}) ## lines creation in hr_salary_inputs input_id = input_obj.create({ 'employee_id': loan.employee_id.id, 'center_id': loan.employee_id.center_id.id, 'name' : 'LOAN', 'amount' : amount_per_time, 'state' : 'confirm', 'loan_line' : line_id.id, 'date' : date_start_str + relativedelta.relativedelta(months = 1), }) counter += 1 date_start_str = date_start_str + relativedelta.relativedelta(months = 1) return True
def dateutil_parse(timestr, default, ignoretz=False, tzinfos=None, **kwargs): """ lifted from dateutil to get resolution""" from dateutil import tz import time fobj = StringIO(str(timestr)) res = DEFAULTPARSER._parse(fobj, **kwargs) # dateutil 2.2 compat if isinstance(res, tuple): res, _ = res if res is None: raise ValueError("unknown string format") repl = {} reso = None for attr in [ "year", "month", "day", "hour", "minute", "second", "microsecond" ]: value = getattr(res, attr) if value is not None: repl[attr] = value reso = attr if reso is None: raise ValueError("Cannot parse date.") if reso == 'microsecond': if repl['microsecond'] == 0: reso = 'second' elif repl['microsecond'] % 1000 == 0: reso = 'millisecond' ret = default.replace(**repl) if res.weekday is not None and not res.day: ret = ret + relativedelta.relativedelta(weekday=res.weekday) if not ignoretz: if callable(tzinfos) or tzinfos and res.tzname in tzinfos: if callable(tzinfos): tzdata = tzinfos(res.tzname, res.tzoffset) else: tzdata = tzinfos.get(res.tzname) if isinstance(tzdata, datetime.tzinfo): tzinfo = tzdata elif isinstance(tzdata, compat.string_types): tzinfo = tz.tzstr(tzdata) elif isinstance(tzdata, int): tzinfo = tz.tzoffset(res.tzname, tzdata) else: raise ValueError("offset must be tzinfo subclass, " "tz string, or int offset") ret = ret.replace(tzinfo=tzinfo) elif res.tzname and res.tzname in time.tzname: ret = ret.replace(tzinfo=tz.tzlocal()) elif res.tzoffset == 0: ret = ret.replace(tzinfo=tz.tzutc()) elif res.tzoffset: ret = ret.replace(tzinfo=tz.tzoffset(res.tzname, res.tzoffset)) return ret, reso
def get_number_of_months_from_dates(date1, date2): ''' Helper function to extract total months of experience from a resume :param date1: Starting date :param date2: Ending date :return: months of experience from date1 to date2 ''' if date2.lower() == 'present': date2 = datetime.now().strftime('%b %Y') try: if len(date1.split()[0]) > 3: date1 = date1.split() date1 = date1[0][:3] + ' ' + date1[1] if len(date2.split()[0]) > 3: date2 = date2.split() date2 = date2[0][:3] + ' ' + date2[1] except IndexError: return 0 try: date1 = datetime.strptime(str(date1), '%b %Y') date2 = datetime.strptime(str(date2), '%b %Y') months_of_experience = relativedelta.relativedelta(date2, date1) months_of_experience = f"total experience: {months_of_experience.years} years, {months_of_experience.months} months" # months_of_experience = (months_of_experience.years # * 12 + months_of_experience.months) except ValueError: return 0 # a = months_of_experience/12 return months_of_experience
def compute_message(self, vals, manual_comment=None): if not self.ids and not manual_comment: return False from jinja2.sandbox import SandboxedEnvironment mako_template_env = SandboxedEnvironment( block_start_string="<%", block_end_string="%>", variable_start_string="${", variable_end_string="}", comment_start_string="<%doc>", comment_end_string="</%doc>", line_statement_prefix="%", line_comment_prefix="##", trim_blocks=True, # do not output newline after autoescape=True, # XML/HTML automatic escaping ) mako_template_env.globals.update({ "str": str, "datetime": datetime, "len": len, "abs": abs, "min": min, "max": max, "sum": sum, "filter": filter, "map": map, "round": round, # dateutil.relativedelta is an old-style class and cannot be # instanciated wihtin a jinja2 expression, so a lambda "proxy" is # is needed, apparently. "relativedelta": lambda *a, **kw: relativedelta.relativedelta(*a, **kw), # adding format amount # now we can format values like currency on fiscal observation "format_amount": (lambda amount, context=self._context: self.format_amount( self.env, amount, self.env.ref("base.BRL"))), }) mako_safe_env = copy.copy(mako_template_env) mako_safe_env.autoescape = False comments = [manual_comment] if manual_comment else [] for record in self: template = mako_safe_env.from_string(tools.ustr(record.comment)) comments.append(template.render(vals)) return " - ".join(comments)
def dateutil_parse(timestr, default, ignoretz=False, tzinfos=None, **kwargs): """ lifted from dateutil to get resolution""" from dateutil import tz import time fobj = StringIO(str(timestr)) res = DEFAULTPARSER._parse(fobj, **kwargs) # dateutil 2.2 compat if isinstance(res, tuple): res, _ = res if res is None: raise ValueError("unknown string format") repl = {} reso = None for attr in ["year", "month", "day", "hour", "minute", "second", "microsecond"]: value = getattr(res, attr) if value is not None: repl[attr] = value reso = attr if reso is None: raise ValueError("Cannot parse date.") if reso == "microsecond": if repl["microsecond"] == 0: reso = "second" elif repl["microsecond"] % 1000 == 0: reso = "millisecond" ret = default.replace(**repl) if res.weekday is not None and not res.day: ret = ret + relativedelta.relativedelta(weekday=res.weekday) if not ignoretz: if callable(tzinfos) or tzinfos and res.tzname in tzinfos: if callable(tzinfos): tzdata = tzinfos(res.tzname, res.tzoffset) else: tzdata = tzinfos.get(res.tzname) if isinstance(tzdata, datetime.tzinfo): tzinfo = tzdata elif isinstance(tzdata, compat.string_types): tzinfo = tz.tzstr(tzdata) elif isinstance(tzdata, int): tzinfo = tz.tzoffset(res.tzname, tzdata) else: raise ValueError("offset must be tzinfo subclass, " "tz string, or int offset") ret = ret.replace(tzinfo=tzinfo) elif res.tzname and res.tzname in time.tzname: ret = ret.replace(tzinfo=tz.tzlocal()) elif res.tzoffset == 0: ret = ret.replace(tzinfo=tz.tzutc()) elif res.tzoffset: ret = ret.replace(tzinfo=tz.tzoffset(res.tzname, res.tzoffset)) return ret, reso
def compute_message(self, vals): from jinja2.sandbox import SandboxedEnvironment mako_template_env = SandboxedEnvironment( block_start_string="<%", block_end_string="%>", variable_start_string="${", variable_end_string="}", comment_start_string="<%doc>", comment_end_string="</%doc>", line_statement_prefix="%", line_comment_prefix="##", trim_blocks=True, # do not output newline after autoescape=True, # XML/HTML automatic escaping ) mako_template_env.globals.update({ 'str': str, 'datetime': datetime, 'len': len, 'abs': abs, 'min': min, 'max': max, 'sum': sum, 'filter': filter, 'map': map, 'round': round, # dateutil.relativedelta is an old-style class and cannot be # instanciated wihtin a jinja2 expression, so a lambda "proxy" is # is needed, apparently. 'relativedelta': lambda *a, **kw: relativedelta.relativedelta(*a, **kw), # adding format amount # now we can format values like currency on fiscal observation 'format_amount': lambda amount, context=self._context: self.format_amount( self.env, amount, self.env.ref('base.BRL')), }) mako_safe_env = copy.copy(mako_template_env) mako_safe_env.autoescape = False result = '' for record in self: template = mako_safe_env.from_string(tools.ustr(record.comment)) render_result = template.render(vals) result += render_result + '\n' return result
def dateutil_parse(timestr, default, ignoretz=False, tzinfos=None, **kwargs): """ lifted from dateutil to get resolution""" from dateutil import tz import time res = DEFAULTPARSER._parse(StringIO(timestr), **kwargs) if res is None: raise ValueError("unknown string format") repl = {} for attr in ["year", "month", "day", "hour", "minute", "second", "microsecond"]: value = getattr(res, attr) if value is not None: repl[attr] = value reso = attr if reso == 'microsecond' and repl['microsecond'] == 0: reso = 'second' ret = default.replace(**repl) if res.weekday is not None and not res.day: ret = ret + relativedelta.relativedelta(weekday=res.weekday) if not ignoretz: if callable(tzinfos) or tzinfos and res.tzname in tzinfos: if callable(tzinfos): tzdata = tzinfos(res.tzname, res.tzoffset) else: tzdata = tzinfos.get(res.tzname) if isinstance(tzdata, datetime.tzinfo): tzinfo = tzdata elif isinstance(tzdata, basestring): tzinfo = tz.tzstr(tzdata) elif isinstance(tzdata, int): tzinfo = tz.tzoffset(res.tzname, tzdata) else: raise ValueError("offset must be tzinfo subclass, " "tz string, or int offset") ret = ret.replace(tzinfo=tzinfo) elif res.tzname and res.tzname in time.tzname: ret = ret.replace(tzinfo=tz.tzlocal()) elif res.tzoffset == 0: ret = ret.replace(tzinfo=tz.tzutc()) elif res.tzoffset: ret = ret.replace(tzinfo=tz.tzoffset(res.tzname, res.tzoffset)) return ret, reso
def dateutil_parse(timestr, default, ignoretz=False, tzinfos=None, **kwargs): """ lifted from dateutil to get resolution""" res = DEFAULTPARSER._parse(StringIO(timestr), **kwargs) if res is None: raise ValueError, "unknown string format" repl = {} for attr in ["year", "month", "day", "hour", "minute", "second", "microsecond"]: value = getattr(res, attr) if value is not None: repl[attr] = value reso = attr if reso == 'microsecond' and repl['microsecond'] == 0: reso = 'second' ret = default.replace(**repl) if res.weekday is not None and not res.day: ret = ret+relativedelta.relativedelta(weekday=res.weekday) if not ignoretz: if callable(tzinfos) or tzinfos and res.tzname in tzinfos: if callable(tzinfos): tzdata = tzinfos(res.tzname, res.tzoffset) else: tzdata = tzinfos.get(res.tzname) if isinstance(tzdata, datetime.tzinfo): tzinfo = tzdata elif isinstance(tzdata, basestring): tzinfo = tz.tzstr(tzdata) elif isinstance(tzdata, int): tzinfo = tz.tzoffset(res.tzname, tzdata) else: raise ValueError, "offset must be tzinfo subclass, " \ "tz string, or int offset" ret = ret.replace(tzinfo=tzinfo) elif res.tzname and res.tzname in time.tzname: ret = ret.replace(tzinfo=tz.tzlocal()) elif res.tzoffset == 0: ret = ret.replace(tzinfo=tz.tzutc()) elif res.tzoffset: ret = ret.replace(tzinfo=tz.tzoffset(res.tzname, res.tzoffset)) return ret, reso
def prepare_jinja_template_env(env_kwargs=None, extra_context=None): """ Prepare custom jinja2 template environment. """ env_params = { 'trim_blocks': True, # do not output newline after blocks 'autoescape': True, # XML/HTML automatic escaping } if env_kwargs: env_params.update(env_kwargs) env = SandboxedEnvironment(**env_params) env_ctx = { 'str': str, 'quote': urls.url_quote, 'urlencode': urls.url_encode, 'datetime': datetime, 'len': len, 'abs': abs, 'min': min, 'max': max, 'sum': sum, 'filter': filter, 'reduce': functools.reduce, 'map': map, 'round': round, # dateutil.relativedelta is an old-style class and cannot be directly # instanciated wihtin a jinja2 expression, so a lambda "proxy" is # is needed, apparently. # pylint: disable=unnecessary-lambda 'relativedelta': lambda *a, **kw: (relativedelta.relativedelta(*a, **kw)), } if extra_context: env_ctx.update(extra_context) env.globals.update(env_ctx) return env
from datetime import datetime from dateutil.relativedelta import relativedelta from copy import copy import itertools from itertools import combinations as itercomb from dateutil import relativedelta import numpy as np import pandas as pd START_DATE = datetime(2016, 1, 1) # first day of the schedule N_CLUSTERS = 6 # number of cluster in the schedule END_DATE = START_DATE + relativedelta.relativedelta(months = N_CLUSTERS) # last day of the schedule MAX_LOAD_L1 = 24. # hours per day MAX_LOAD_L2 = 24. # hours per day OVER_FACTOR = 1.5 # overload acceptance factor MAX_VOLTAGE_L1 = 630. # max voltage for line 1 MAX_AREA_L1 = 150. # max area for line 1 sched_hours = 2 * (END_DATE - START_DATE).total_seconds() / 3600. def first_selection(batches, clusterdays): ''' Arguments --------- batches : a list of dictionaries containing first batches
def plot_baselines_prop(bls, dbfile): from BruggCablesKTI.db.model import DBHandler from BruggCablesKTI.db import utils dbh = DBHandler('sqlite:///' + dbfile) baselines = [] batches = [] import time start = time.clock() for bl in bls: df_bl = [] brugg_cables_ids = [] for batch in bl: attr = { 'ids': batch.pk, 'kind': batch.cable.opportunity.kind, 'description': batch.cable.opportunity.description, 'delivery': batch.delivery_date, 'revenue': batch.cable.opportunity.revenue, 'margin': batch.cable.opportunity.margin, 'workload': batch.workload, 'delay': None, 'n_opp': 0 } if batch.cable.opportunity.kind == 'offer': brugg_cables_ids.append(batch.cable.opportunity) batches.append(copy(attr)) df_bl.append(attr) attr['ids'] = [attr['ids']] attr['kind'] = [attr['kind']] attr['description'] = [attr['description']] attr['workload'] /= (24. * (END_DATE - START_DATE).days * 2.) df_bl = pd.DataFrame(df_bl) delay = [] for icl in range(0, N_CLUSTERS): start_date = copy(START_DATE) end_date = start_date + relativedelta.relativedelta(months=1) sel = df_bl[ (df_bl.delivery >= start_date) & \ (df_bl.delivery < end_date) ] aux_wl = sel.workload.sum() - (2 * (end_date - start_date).days * 24.) delay += [aux_wl] if aux_wl >= 0 else [0] start_date = copy(end_date) df_bl = df_bl.drop('delivery', 1) df_bl = df_bl.sum() df_bl['delay'] = np.sum(delay) df_bl['n_opp'] = len(list(set(brugg_cables_ids))) baselines.append(df_bl.to_dict()) print(time.clock() - start) df = pd.DataFrame(baselines) ##################################### ### SCATTER PLOT OF THE BASELINES ### ##################################### import matplotlib.pyplot as plt plt.scatter(df.workload.values, df.margin.values, c=df.n_opp.values, s=50) plt.title('Baselines') plt.xlabel('Workload (%)') plt.ylabel('Profit Margin (CHF)') # for label, x, y, z in zip( df.n_opp.values, # df.workload.values, # df.margin.values, # df.n_opp.values): # plt.annotate( label, xy = (x, y), xytext = (-10, 10), # textcoords = 'offset points', ha = 'right', va = 'bottom', # bbox = dict(boxstyle = 'round,pad=0.1', fc = 'white', alpha = 0.5), # arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=0')) cbar = plt.colorbar(orientation='horizontal') cbar.set_label('Number of (Priority) Offers') plt.grid() plt.savefig('Baselines_plot.png') plt.show() ####################################### ### DISTANCE MEASURE IMPLEMENTATION ### ####################################### distances = [] df_batch = pd.DataFrame(batches) import time start = time.clock() #for index in range(len(df)): for index in range(0, 1): d_4_off = [] for internal in range(len(df)): set_dif = list(set(df.ix[internal].ids) - set(df.ix[index].ids)) + \ list(set(df.ix[index].ids) - set(df.ix[internal].ids)) dist = 0 for pk in set_dif: dist += dbh.find_batch(pk).workload attr = { 'dist': dist, 'profit': df.ix[internal].margin - df.ix[index].margin, 'number': df.ix[internal].n_opp } d_4_off.append(attr) distances.append(pd.DataFrame(d_4_off)) print(time.clock() - start) plt.scatter(distances[index].dist, distances[index].profit, c=distances[index].number) plt.xlabel('Distance (hours)') plt.ylabel('Profit Margin (CHF)') # cbar = plt.colorbar(orientation='horizontal') # cbar.set_label('Number of (Priority) Offers') plt.show() #import pdb; pdb.set_trace() ############################################# ### HISTOGRAM FOR THE BASELINES SELECTION ### ############################################# CUTOFF_PARAM = 7.4e7 df = df[df.margin > CUTOFF_PARAM] print(df) priority = [] ids = df.description.tolist() kind = df.kind.tolist() bg_id = df.ids.tolist() for index in range(0, len(ids)): pk = ids[index] kn = kind[index] priority.append(list(set([pk[i] for i in range(0,len(pk)) \ if kn[i] == 'offer']))) import collections counter = collections.Counter( [item for sublist in priority for item in sublist]) names = list(counter.elements()) fig = plt.figure(figsize=(15, 10)) plt.tick_params(axis='y', which='minor', labelsize=4) for i in range(0, len(counter)): rects1 = plt.hlines(i + 1, 0, list(counter.values())[i], linewidth=10, colors='red', linestyle='solid') plt.yticks(range(1, len(set(names)) + 1), set(names), fontsize=10) plt.ylim([0, len(set(names)) + 1]) plt.xlim([0, np.max(list(counter.values())) + 1]) plt.xlabel('Occurrencies') plt.savefig('Frequencies.png') plt.show() import pdb pdb.set_trace()
class HrHistoricWizard(models.TransientModel): _name = 'hr.historic.wizard' _description = 'Historico' @api.depends('date_from', 'date_to') def _get_period(self): for val in self: date_from = fields.Date.from_string(val.date_from) date_to = fields.Date.from_string(val.date_to) if date_from.month == date_to.month: val.period = '%.2d-%s' % (date_from.month, date_from.year) else: val.period = '%.2d-%s al %.2d-%s' % ( date_from.month, date_from.year, date_to.month, date_to.year) operation = fields.Selection([ ('upload', 'Carga'), ('download', 'Descarga'), ], string="Operación", required=True) type = fields.Selection([ ('all', 'Todos los empleados'), ('one', 'Un empleado'), ], string="Seleccione") period = fields.Char(string='Período', readonly=True, compute="_get_period") date_from = fields.Date( string='Desde', required=True, default=lambda self: fields.Date.today() + relativedelta(day=1)) date_to = fields.Date( string='Hasta', required=True, default=lambda self: fields.Date.today() + relativedelta.relativedelta( months=1, day=1, days=-1)) employee_id = fields.Many2one('hr.employee', string="Empleado") company_id = fields.Many2one('res.company', string="Company", required=True, default=lambda self: self.env.company) @api.constrains('date_from', 'date_to') def _check_dates(self): if any( self.filtered( lambda payslip: payslip.date_from > payslip.date_to)): raise ValidationError( _("La fecha final no puede ser menor que la fecha inicial'.")) def ask_operation(self): if self.operation == 'upload': self.upload_historic() else: self.download_historic() def upload_historic(self, payslip_process=False): self.company_id.hr_period = self.period context = dict(self._context or {}) historic = self.env['hr.payroll.historic'].search([('name', '=', self.period)]) if payslip_process and historic: historic.unlink() tree_id = self.env.ref( 'l10n_cl_hr_payroll.hr_payroll_historic_view_tree') # Buscamos las nóminas realizadas en el periodo seleccionado payslip = self.env['hr.payslip'].search([ ('date_from', '>=', self.date_from), ('date_to', '<=', self.date_to) ]) if payslip: #contracts = [] details = [] values = { 'name': self.period, 'date_from': self.date_from, 'date_to': self.date_to } for data in payslip: # ~ for contract in data.worked_days_line_ids: # ~ if contract.contract_id: #~ contracts.append(contract.id) # ~ for contract in data.input_line_ids: # ~ if contract.contract_id: #~ contracts.append(contract.id) vals = { 'origin': data.number or data.name, 'employee_id': data.employee_id.id, 'afp_id': data.employee_id.afp_id.id, 'job_id': data.employee_id.job_id.id, 'struct_id': data.struct_id.id, 'centro_costo_id': data.employee_id.centro_costo_id.id, 'contract_id': data.contract_id.id, # 'contract_ids': [(6, 0, contracts)], } details.append((0, 0, vals)) vals = {} values.update({'line_ids': details}) historic_id = self.env['hr.payroll.historic'].create(values) if not payslip_process: return { 'name': ('Historico'), 'res_model': 'hr.payroll.historic', 'view_mode': 'tree,form', 'view_id': historic_id.id, 'context': context, 'views': [(tree_id.id, 'tree')], 'type': 'ir.actions.act_window', 'target': 'current', # 'domain': [('id','in',[historic_id.id])], 'nodestroy': True } else: return True def download_historic(self): employee_obj = self.env['hr.employee'] context = dict(self._context or {}) tree_id = self.env.ref( 'l10n_cl_hr_payroll.hr_payroll_historic_view_tree') self.company_id.hr_period = self.period # Buscamos las nóminas realizadas en el periodo seleccionado historic = self.env['hr.payroll.historic'].search([('name', '=', self.period)]) if len(historic) > 1: raise ValidationError( _("El período de nómina se encuentra duplicado en el histórico." )) if historic: for employee in historic.line_ids: vals = { 'afp_id': employee.afp_id and employee.afp_id.id or False, 'job_id': employee.job_id and employee.job_id.id or False, 'struct_id': employee.struct_id and employee.struct_id.id or False, 'centro_costo_id': employee.centro_costo_id and employee.centro_costo_id.id or False, 'contract_id': employee.contract_id and employee.contract_id.id or False, } if self.type == 'one': if employee.employee_id.id == self.employee_id.id: employee.employee_id.write(vals) else: employee.employee_id.write(vals) return { 'name': ('Historico'), 'res_model': 'hr.payroll.historic', 'view_mode': 'tree,form', # 'view_id': historic_id.id, 'context': context, 'views': [(tree_id.id, 'tree')], 'type': 'ir.actions.act_window', 'target': 'current', 'domain': [('id', 'in', [historic.ids])], 'nodestroy': True }
'min': min, 'max': max, 'sum': sum, 'filter': filter, 'reduce': reduce, 'map': map, 'round': round, 'relativedelta': lambda *a, **kw: relativedelta.relativedelta(*a, **kw), }) except ImportError: _logger.warning("jinja2 not available, templating features will not work!") class SendSMS(models.Model): _name = "send_sms" _description = "Send SMS" def reminder_sms_cron(self): templates = self.env['send_sms'].search([('remind_sms', '=', True)]) if templates: for single in templates: pos_order = self.env['cybill.sms.queue'].search([ ('sms_sent', '=', 'not_send')