示例#1
0
class HeaderImage(osv.osv):
    """Logo allows you to define multiple logo per company"""
    _name = "ir.header_img"
    _columns = {
        'company_id': fields.many2one('res.company', 'Company'),
        'img': fields.binary('Image', attachment=True),
        'name': fields.char('Name', required=True, help="Name of Image"),
        'type': fields.char('Type',
                            required=True,
                            help="Image type(png,gif,jpeg)")
    }
示例#2
0
class base_language_import(osv.osv_memory):
    """ Language Import """

    _name = "base.language.import"
    _description = "Language Import"
    _columns = {
        'name':
        fields.char('Language Name', required=True),
        'code':
        fields.char('ISO Code',
                    size=5,
                    help="ISO Language and Country code, e.g. en_US",
                    required=True),
        'data':
        fields.binary('File', required=True),
        'filename':
        fields.char('File Name', required=True),
        'overwrite':
        fields.boolean(
            'Overwrite Existing Terms',
            help=
            "If you enable this option, existing translations (including custom ones) "
            "will be overwritten and replaced by those in this file"),
    }

    def import_lang(self, cr, uid, ids, context=None):
        if context is None:
            context = {}
        this = self.browse(cr, uid, ids[0])
        if this.overwrite:
            context = dict(context, overwrite=True)
        fileobj = TemporaryFile('w+')
        try:
            fileobj.write(base64.decodestring(this.data))

            # now we determine the file format
            fileobj.seek(0)
            fileformat = os.path.splitext(this.filename)[-1][1:].lower()

            tools.trans_load_data(cr,
                                  fileobj,
                                  fileformat,
                                  this.code,
                                  lang_name=this.name,
                                  context=context)
        except Exception, e:
            _logger.exception(
                'File unsuccessfully imported, due to format mismatch.')
            raise UserError(
                _('File not imported due to format mismatch or a malformed file. (Valid formats are .csv, .po, .pot)\n\nTechnical Details:\n%s'
                  ) % tools.ustr(e))
        finally:
示例#3
0
class Country(osv.osv):
    _name = 'res.country'
    _description = 'Country'
    _columns = {
        'name': fields.char('Country Name',
                            help='The full name of the country.', required=True, translate=True),
        'code': fields.char('Country Code', size=2,
                            help='The ISO country code in two chars.\n'
                            'You can use this field for quick search.'),
        'address_format': fields.text('Address Format', help="""You can state here the usual format to use for the \
addresses belonging to this country.\n\nYou can use the python-style string patern with all the field of the address \
(for example, use '%(street)s' to display the field 'street') plus
            \n%(state_name)s: the name of the state
            \n%(state_code)s: the code of the state
            \n%(country_name)s: the name of the country
            \n%(country_code)s: the code of the country"""),
        'currency_id': fields.many2one('res.currency', 'Currency'),
        'image': fields.binary("Image", attachment=True),
        'phone_code': fields.integer('Country Calling Code'),
        'country_group_ids': fields.many2many('res.country.group', 'res_country_res_country_group_rel', 'res_country_id', 'res_country_group_id', string='Country Groups'),
        'state_ids': fields.one2many('res.country.state', 'country_id', string='States'),
    }
    _sql_constraints = [
        ('name_uniq', 'unique (name)',
            'The name of the country must be unique !'),
        ('code_uniq', 'unique (code)',
            'The code of the country must be unique !')
    ]
    _defaults = {
        'address_format': "%(street)s\n%(street2)s\n%(city)s %(state_code)s %(zip)s\n%(country_name)s",
    }
    _order = 'name'

    name_search = location_name_search

    def create(self, cursor, user, vals, context=None):
        if vals.get('code'):
            vals['code'] = vals['code'].upper()
        return super(Country, self).create(cursor, user, vals, context=context)

    def write(self, cursor, user, ids, vals, context=None):
        if vals.get('code'):
            vals['code'] = vals['code'].upper()
        return super(Country, self).write(cursor, user, ids, vals, context=context)

    def get_address_fields(self, cr, uid, ids, context=None):
        res = {}
        for country in self.browse(cr, uid, ids, context=context):
            res[country.id] = re.findall('\((.+?)\)', country.address_format)
        return res
示例#4
0
文件: test_models.py 项目: ecoreos/hz
class test_converter(orm.Model):
    _name = 'web_editor.converter.test'

    # disable translation export for those brilliant field labels and values
    _translate = False

    _columns = {
        'char':
        fields.char(),
        'integer':
        fields.integer(),
        'float':
        fields.float(),
        'numeric':
        fields.float(digits=(16, 2)),
        'many2one':
        fields.many2one('web_editor.converter.test.sub'),
        'binary':
        fields.binary(),
        'date':
        fields.date(),
        'datetime':
        fields.datetime(),
        'selection':
        fields.selection([
            (1, "réponse A"),
            (2, "réponse B"),
            (3, "réponse C"),
            (4, "réponse D"),
        ]),
        'selection_str':
        fields.selection(
            [
                ('A', "Qu'il n'est pas arrivé à Toronto"),
                ('B', "Qu'il était supposé arriver à Toronto"),
                ('C', "Qu'est-ce qu'il fout ce maudit pancake, tabernacle ?"),
                ('D', "La réponse D"),
            ],
            string=
            u"Lorsqu'un pancake prend l'avion à destination de Toronto et "
            u"qu'il fait une escale technique à St Claude, on dit:"),
        'html':
        fields.html(),
        'text':
        fields.text(),
    }
示例#5
0
class restaurant_floor(osv.osv):
    _name = 'restaurant.floor'
    _columns = {
        'name':
        fields.char('Floor Name',
                    required=True,
                    help='An internal identification of the restaurant floor'),
        'pos_config_id':
        fields.many2one('pos.config', 'Point of Sale'),
        'background_image':
        fields.binary(
            'Background Image',
            attachment=True,
            help=
            'A background image used to display a floor layout in the point of sale interface'
        ),
        'background_color':
        fields.char(
            'Background Color',
            help=
            'The background color of the floor layout, (must be specified in a html-compatible format)'
        ),
        'table_ids':
        fields.one2many('restaurant.table',
                        'floor_id',
                        'Tables',
                        help='The list of tables in this floor'),
        'sequence':
        fields.integer('Sequence', help='Used to sort Floors'),
    }

    _defaults = {
        'sequence': 1,
        'background_color': 'rgb(210, 210, 210)',
    }

    def set_background_color(self, cr, uid, id, background, context=None):
        self.write(cr,
                   uid, [id], {'background_color': background},
                   context=context)
示例#6
0
文件: badge.py 项目: LiberTang0/5
class gamification_badge(osv.Model):
    """Badge object that users can send and receive"""

    CAN_GRANT = 1
    NOBODY_CAN_GRANT = 2
    USER_NOT_VIP = 3
    BADGE_REQUIRED = 4
    TOO_MANY = 5

    _name = 'gamification.badge'
    _description = 'Gamification badge'
    _inherit = ['mail.thread']

    def _get_owners_info(self, cr, uid, ids, name, args, context=None):
        """Return:
            the list of unique res.users ids having received this badge
            the total number of time this badge was granted
            the total number of users this badge was granted to
        """
        result = dict((res_id, {'stat_count': 0, 'stat_count_distinct': 0, 'unique_owner_ids': []}) for res_id in ids)

        cr.execute("""
            SELECT badge_id, count(user_id) as stat_count,
                count(distinct(user_id)) as stat_count_distinct,
                array_agg(distinct(user_id)) as unique_owner_ids
            FROM gamification_badge_user
            WHERE badge_id in %s
            GROUP BY badge_id
            """, (tuple(ids),))
        for (badge_id, stat_count, stat_count_distinct, unique_owner_ids) in cr.fetchall():
            result[badge_id] = {
                'stat_count': stat_count,
                'stat_count_distinct': stat_count_distinct,
                'unique_owner_ids': unique_owner_ids,
            }
        return result

    def _get_badge_user_stats(self, cr, uid, ids, name, args, context=None):
        """Return stats related to badge users"""
        result = dict.fromkeys(ids, False)
        badge_user_obj = self.pool.get('gamification.badge.user')
        first_month_day = date.today().replace(day=1).strftime(DF)
        for bid in ids:
            result[bid] = {
                'stat_my': badge_user_obj.search(cr, uid, [('badge_id', '=', bid), ('user_id', '=', uid)], context=context, count=True),
                'stat_this_month': badge_user_obj.search(cr, uid, [('badge_id', '=', bid), ('create_date', '>=', first_month_day)], context=context, count=True),
                'stat_my_this_month': badge_user_obj.search(cr, uid, [('badge_id', '=', bid), ('user_id', '=', uid), ('create_date', '>=', first_month_day)], context=context, count=True),
                'stat_my_monthly_sending': badge_user_obj.search(cr, uid, [('badge_id', '=', bid), ('create_uid', '=', uid), ('create_date', '>=', first_month_day)], context=context, count=True)
            }
        return result

    def _remaining_sending_calc(self, cr, uid, ids, name, args, context=None):
        """Computes the number of badges remaining the user can send

        0 if not allowed or no remaining
        integer if limited sending
        -1 if infinite (should not be displayed)
        """
        result = dict.fromkeys(ids, False)
        for badge in self.browse(cr, uid, ids, context=context):
            if self._can_grant_badge(cr, uid, badge.id, context) != 1:
                # if the user cannot grant this badge at all, result is 0
                result[badge.id] = 0
            elif not badge.rule_max:
                # if there is no limitation, -1 is returned which means 'infinite'
                result[badge.id] = -1
            else:
                result[badge.id] = badge.rule_max_number - badge.stat_my_monthly_sending
        return result

    _columns = {
        'name': fields.char('Badge', required=True, translate=True),
        'description': fields.text('Description', translate=True),
        'image': fields.binary("Image", attachment=True,
            help="This field holds the image used for the badge, limited to 256x256"),
        'rule_auth': fields.selection([
                ('everyone', 'Everyone'),
                ('users', 'A selected list of users'),
                ('having', 'People having some badges'),
                ('nobody', 'No one, assigned through challenges'),
            ],
            string="Allowance to Grant",
            help="Who can grant this badge",
            required=True),
        'rule_auth_user_ids': fields.many2many('res.users', 'rel_badge_auth_users',
            string='Authorized Users',
            help="Only these people can give this badge"),
        'rule_auth_badge_ids': fields.many2many('gamification.badge',
            'gamification_badge_rule_badge_rel', 'badge1_id', 'badge2_id',
            string='Required Badges',
            help="Only the people having these badges can give this badge"),

        'rule_max': fields.boolean('Monthly Limited Sending',
            help="Check to set a monthly limit per person of sending this badge"),
        'rule_max_number': fields.integer('Limitation Number',
            help="The maximum number of time this badge can be sent per month per person."),
        'stat_my_monthly_sending': fields.function(_get_badge_user_stats,
            type="integer",
            string='My Monthly Sending Total',
            multi='badge_users',
            help="The number of time the current user has sent this badge this month."),
        'remaining_sending': fields.function(_remaining_sending_calc, type='integer',
            string='Remaining Sending Allowed', help="If a maxium is set"),

        'challenge_ids': fields.one2many('gamification.challenge', 'reward_id',
            string="Reward of Challenges"),

        'goal_definition_ids': fields.many2many('gamification.goal.definition', 'badge_unlocked_definition_rel',
            string='Rewarded by',
            help="The users that have succeeded theses goals will receive automatically the badge."),

        'owner_ids': fields.one2many('gamification.badge.user', 'badge_id',
            string='Owners', help='The list of instances of this badge granted to users'),
        'active': fields.boolean('Active'),
        'unique_owner_ids': fields.function(_get_owners_info,
            string='Unique Owners',
            help="The list of unique users having received this badge.",
            multi='unique_users',
            type="many2many", relation="res.users"),

        'stat_count': fields.function(_get_owners_info, string='Total',
            type="integer",
            multi='unique_users',
            help="The number of time this badge has been received."),
        'stat_count_distinct': fields.function(_get_owners_info,
            type="integer",
            string='Number of users',
            multi='unique_users',
            help="The number of time this badge has been received by unique users."),
        'stat_this_month': fields.function(_get_badge_user_stats,
            type="integer",
            string='Monthly total',
            multi='badge_users',
            help="The number of time this badge has been received this month."),
        'stat_my': fields.function(_get_badge_user_stats, string='My Total',
            type="integer",
            multi='badge_users',
            help="The number of time the current user has received this badge."),
        'stat_my_this_month': fields.function(_get_badge_user_stats,
            type="integer",
            string='My Monthly Total',
            multi='badge_users',
            help="The number of time the current user has received this badge this month."),
    }

    _defaults = {
        'rule_auth': 'everyone',
        'active': True,
    }

    def check_granting(self, cr, uid, badge_id, context=None):
        """Check the user 'uid' can grant the badge 'badge_id' and raise the appropriate exception
        if not

        Do not check for SUPERUSER_ID
        """
        status_code = self._can_grant_badge(cr, uid, badge_id, context=context)
        if status_code == self.CAN_GRANT:
            return True
        elif status_code == self.NOBODY_CAN_GRANT:
            raise UserError(_('This badge can not be sent by users.'))
        elif status_code == self.USER_NOT_VIP:
            raise UserError(_('You are not in the user allowed list.'))
        elif status_code == self.BADGE_REQUIRED:
            raise UserError(_('You do not have the required badges.'))
        elif status_code == self.TOO_MANY:
            raise UserError(_('You have already sent this badge too many time this month.'))
        else:
            _logger.exception("Unknown badge status code: %d" % int(status_code))
        return False

    def _can_grant_badge(self, cr, uid, badge_id, context=None):
        """Check if a user can grant a badge to another user

        :param uid: the id of the res.users trying to send the badge
        :param badge_id: the granted badge id
        :return: integer representing the permission.
        """
        if uid == SUPERUSER_ID:
            return self.CAN_GRANT

        badge = self.browse(cr, uid, badge_id, context=context)

        if badge.rule_auth == 'nobody':
            return self.NOBODY_CAN_GRANT

        elif badge.rule_auth == 'users' and uid not in [user.id for user in badge.rule_auth_user_ids]:
            return self.USER_NOT_VIP

        elif badge.rule_auth == 'having':
            all_user_badges = self.pool.get('gamification.badge.user').search(cr, uid, [('user_id', '=', uid)], context=context)
            for required_badge in badge.rule_auth_badge_ids:
                if required_badge.id not in all_user_badges:
                    return self.BADGE_REQUIRED

        if badge.rule_max and badge.stat_my_monthly_sending >= badge.rule_max_number:
            return self.TOO_MANY

        # badge.rule_auth == 'everyone' -> no check
        return self.CAN_GRANT

    def check_progress(self, cr, uid, context=None):
        try:
            model, res_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'gamification', 'badge_hidden')
        except ValueError:
            return True
        badge_user_obj = self.pool.get('gamification.badge.user')
        if not badge_user_obj.search(cr, uid, [('user_id', '=', uid), ('badge_id', '=', res_id)], context=context):
            values = {
                'user_id': uid,
                'badge_id': res_id,
            }
            badge_user_obj.create(cr, SUPERUSER_ID, values, context=context)
        return True
示例#7
0
class ir_import(orm.TransientModel):
    _name = 'base_import.import'
    # allow imports to survive for 12h in case user is slow
    _transient_max_hours = 12.0

    _columns = {
        'res_model': fields.char('Model'),
        'file': fields.binary(
            'File', help="File to check and/or import, raw binary (not base64)"),
        'file_name': fields.char('File Name'),
        'file_type': fields.char(string='File Type'),
    }

    def get_fields(self, cr, uid, model, context=None,
                   depth=FIELDS_RECURSION_LIMIT):
        """ Recursively get fields for the provided model (through
        fields_get) and filter them according to importability

        The output format is a list of ``Field``, with ``Field``
        defined as:

        .. class:: Field

            .. attribute:: id (str)

                A non-unique identifier for the field, used to compute
                the span of the ``required`` attribute: if multiple
                ``required`` fields have the same id, only one of them
                is necessary.

            .. attribute:: name (str)

                The field's logical (eCore) name within the scope of
                its parent.

            .. attribute:: string (str)

                The field's human-readable name (``@string``)

            .. attribute:: required (bool)

                Whether the field is marked as required in the
                model. Clients must provide non-empty import values
                for all required fields or the import will error out.

            .. attribute:: fields (list(Field))

                The current field's subfields. The database and
                external identifiers for m2o and m2m fields; a
                filtered and transformed fields_get for o2m fields (to
                a variable depth defined by ``depth``).

                Fields with no sub-fields will have an empty list of
                sub-fields.

        :param str model: name of the model to get fields form
        :param int landing: depth of recursion into o2m fields
        """
        model_obj = self.pool[model]
        fields = [{
            'id': 'id',
            'name': 'id',
            'string': _("External ID"),
            'required': False,
            'fields': [],
        }]
        fields_got = model_obj.fields_get(cr, uid, context=context)
        blacklist = orm.MAGIC_COLUMNS + [model_obj.CONCURRENCY_CHECK_FIELD]
        for name, field in fields_got.iteritems():
            if name in blacklist:
                continue
            # an empty string means the field is deprecated, @deprecated must
            # be absent or False to mean not-deprecated
            if field.get('deprecated', False) is not False:
                continue
            if field.get('readonly'):
                states = field.get('states')
                if not states:
                    continue
                # states = {state: [(attr, value), (attr2, value2)], state2:...}
                if not any(attr == 'readonly' and value is False
                           for attr, value in itertools.chain.from_iterable(
                                states.itervalues())):
                    continue

            f = {
                'id': name,
                'name': name,
                'string': field['string'],
                # Y U NO ALWAYS HAS REQUIRED
                'required': bool(field.get('required')),
                'fields': [],
            }

            if field['type'] in ('many2many', 'many2one'):
                f['fields'] = [
                    dict(f, name='id', string=_("External ID")),
                    dict(f, name='.id', string=_("Database ID")),
                ]
            elif field['type'] == 'one2many' and depth:
                f['fields'] = self.get_fields(
                    cr, uid, field['relation'], context=context, depth=depth-1)
                if self.user_has_groups(cr, uid, 'base.group_no_one'):
                    f['fields'].append({'id' : '.id', 'name': '.id', 'string': _("Database ID"), 'required': False, 'fields': []})

            fields.append(f)

        # TODO: cache on model?
        return fields

    def _read_file(self, file_type, record, options):
        (file_extension, handler, req) = FILE_TYPE_DICT.get(file_type, (None, None, None))
        if handler:
            return getattr(self, '_read_' + file_extension)(record, options)
        # fallback on file extensions as mime types can be unreliable (e.g.
        # software setting incorrect mime types, or non-installed software
        # leading to browser not sending mime types)
        if record.file_name:
            p, ext = os.path.splitext(record.file_name)
            if ext and EXTENSIONS.get(ext):
                return getattr(self, '_read_' + ext[1:])(record, options)

        if req:
            raise ImportError(_("Unable to load \"{extension}\" file: requires Python module \"{modname}\"").format(extension=file_extension, modname=req))
        raise ValueError(_("Unsupported file format \"{}\", import only supports CSV, ODS, XLS and XLSX").format(file_type))

    def _read_xls(self, record, options):
        book = xlrd.open_workbook(file_contents=record.file)
        sheet = book.sheet_by_index(0)
        # emulate Sheet.get_rows for pre-0.9.4
        for row in itertools.imap(sheet.row, range(sheet.nrows)):
            values = []
            for cell in row:
                if cell.ctype is xlrd.XL_CELL_NUMBER:
                    is_float = cell.value % 1 != 0.0
                    values.append(
                        unicode(cell.value)
                        if is_float
                        else unicode(int(cell.value))
                    )
                elif cell.ctype is xlrd.XL_CELL_DATE:
                    is_datetime = cell.value % 1 != 0.0
                    # emulate xldate_as_datetime for pre-0.9.3
                    dt = datetime.datetime(*xlrd.xldate.xldate_as_tuple(
                        cell.value, book.datemode))
                    values.append(
                        dt.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
                        if is_datetime
                        else dt.strftime(DEFAULT_SERVER_DATE_FORMAT)
                    )
                elif cell.ctype is xlrd.XL_CELL_BOOLEAN:
                    values.append(u'True' if cell.value else u'False')
                elif cell.ctype is xlrd.XL_CELL_ERROR:
                    raise ValueError(
                        _("Error cell found while reading XLS/XLSX file: %s") %
                        xlrd.error_text_from_code.get(
                            cell.value, "unknown error code %s" % cell.value)
                    )
                else:
                    values.append(cell.value)
            if any(x for x in values if x.strip()):
                yield values
    _read_xlsx = _read_xls

    def _read_ods(self, record, options):
        doc = odf_ods_reader.ODSReader(file=io.BytesIO(record.file))

        return (
            row
            for row in doc.getFirstSheet()
            if any(x for x in row if x.strip())
        )

    def _read_csv(self, record, options):
        """ Returns a CSV-parsed iterator of all empty lines in the file

        :throws csv.Error: if an error is detected during CSV parsing
        :throws UnicodeDecodeError: if ``options.encoding`` is incorrect
        """
        csv_iterator = csv.reader(
            StringIO(record.file),
            quotechar=str(options['quoting']),
            delimiter=str(options['separator']))

        # TODO: guess encoding with chardet? Or https://github.com/aadsm/jschardet
        encoding = options.get('encoding', 'utf-8')
        return (
            [item.decode(encoding) for item in row]
            for row in csv_iterator
            if any(x for x in row if x.strip())
        )

    def _match_header(self, header, fields, options):
        """ Attempts to match a given header to a field of the
        imported model.

        :param str header: header name from the CSV file
        :param fields:
        :param dict options:
        :returns: an empty list if the header couldn't be matched, or
                  all the fields to traverse
        :rtype: list(Field)
        """
        string_match = None
        for field in fields:
            # FIXME: should match all translations & original
            # TODO: use string distance (levenshtein? hamming?)
            if header.lower() == field['name'].lower():
                return [field]
            if header.lower() == field['string'].lower():
                # matching string are not reliable way because
                # strings have no unique constraint
                string_match = field
        if string_match:
            # this behavior is only applied if there is no matching field['name']
            return [string_match]

        if '/' not in header:
            return []

        # relational field path
        traversal = []
        subfields = fields
        # Iteratively dive into fields tree
        for section in header.split('/'):
            # Strip section in case spaces are added around '/' for
            # readability of paths
            match = self._match_header(section.strip(), subfields, options)
            # Any match failure, exit
            if not match: return []
            # prep subfields for next iteration within match[0]
            field = match[0]
            subfields = field['fields']
            traversal.append(field)
        return traversal

    def _match_headers(self, rows, fields, options):
        """ Attempts to match the imported model's fields to the
        titles of the parsed CSV file, if the file is supposed to have
        headers.

        Will consume the first line of the ``rows`` iterator.

        Returns a pair of (None, None) if headers were not requested
        or the list of headers and a dict mapping cell indices
        to key paths in the ``fields`` tree

        :param Iterator rows:
        :param dict fields:
        :param dict options:
        :rtype: (None, None) | (list(str), dict(int: list(str)))
        """
        if not options.get('headers'):
            return None, None

        headers = next(rows)
        return headers, {
            index: [field['name'] for field in self._match_header(header, fields, options)] or None
            for index, header in enumerate(headers)
        }

    def parse_preview(self, cr, uid, id, options, count=10, context=None):
        """ Generates a preview of the uploaded files, and performs
        fields-matching between the import's file data and the model's
        columns.

        If the headers are not requested (not options.headers),
        ``matches`` and ``headers`` are both ``False``.

        :param id: identifier of the import
        :param int count: number of preview lines to generate
        :param options: format-specific options.
                        CSV: {encoding, quoting, separator, headers}
        :type options: {str, str, str, bool}
        :returns: {fields, matches, headers, preview} | {error, preview}
        :rtype: {dict(str: dict(...)), dict(int, list(str)), list(str), list(list(str))} | {str, str}
        """
        (record,) = self.browse(cr, uid, [id], context=context)
        fields = self.get_fields(cr, uid, record.res_model, context=context)

        try:
            rows = self._read_file(record.file_type, record, options)

            headers, matches = self._match_headers(rows, fields, options)
            # Match should have consumed the first row (iif headers), get
            # the ``count`` next rows for preview
            preview = list(itertools.islice(rows, count))
            assert preview, "CSV file seems to have no content"
            return {
                'fields': fields,
                'matches': matches or False,
                'headers': headers or False,
                'preview': preview,
            }
        except Exception, e:
            # Due to lazy generators, UnicodeDecodeError (for
            # instance) may only be raised when serializing the
            # preview to a list in the return.
            _logger.debug("Error during parsing preview", exc_info=True)
            preview = None
            if record.file_type == 'text/csv':
                preview = record.file[:ERROR_PREVIEW_BYTES].decode('iso-8859-1')
            return {
                'error': str(e),
                # iso-8859-1 ensures decoding will always succeed,
                # even if it yields non-printable characters. This is
                # in case of UnicodeDecodeError (or csv.Error
                # compounded with UnicodeDecodeError)
                'preview': preview,
            }
示例#8
0
class base_language_export(osv.osv_memory):
    _name = "base.language.export"

    def _get_languages(self, cr, uid, context):
        lang_obj = self.pool.get('res.lang')
        ids = lang_obj.search(cr, uid, [('translatable', '=', True)])
        langs = lang_obj.browse(cr, uid, ids)
        return [(NEW_LANG_KEY, _('New Language (Empty translation template)'))
                ] + [(lang.code, lang.name) for lang in langs]

    _columns = {
        'name':
        fields.char('File Name', readonly=True),
        'lang':
        fields.selection(_get_languages, 'Language', required=True),
        'format':
        fields.selection([('csv', 'CSV File'), ('po', 'PO File'),
                          ('tgz', 'TGZ Archive')],
                         'File Format',
                         required=True),
        'modules':
        fields.many2many('ir.module.module',
                         'rel_modules_langexport',
                         'wiz_id',
                         'module_id',
                         'Apps To Export',
                         domain=[('state', '=', 'installed')]),
        'data':
        fields.binary('File', readonly=True),
        'state':
        fields.selection([
            ('choose', 'choose'),  # choose language
            ('get', 'get')
        ])  # get the file
    }
    _defaults = {
        'state': 'choose',
        'lang': NEW_LANG_KEY,
        'format': 'csv',
    }

    def act_getfile(self, cr, uid, ids, context=None):
        this = self.browse(cr, uid, ids, context=context)[0]
        lang = this.lang if this.lang != NEW_LANG_KEY else False
        mods = sorted(map(lambda m: m.name, this.modules)) or ['all']

        with contextlib.closing(cStringIO.StringIO()) as buf:
            tools.trans_export(lang, mods, buf, this.format, cr)
            out = base64.encodestring(buf.getvalue())

        filename = 'new'
        if lang:
            filename = get_iso_codes(lang)
        elif len(mods) == 1:
            filename = mods[0]
        extension = this.format
        if not lang and extension == 'po':
            extension = 'pot'
        name = "%s.%s" % (filename, extension)
        this.write({'state': 'get', 'data': out, 'name': name})
        return {
            'type': 'ir.actions.act_window',
            'res_model': 'base.language.export',
            'view_mode': 'form',
            'view_type': 'form',
            'res_id': this.id,
            'views': [(False, 'form')],
            'target': 'new',
        }
示例#9
0
class consult_export_csv_excel_reportcustomer(osv.osv_memory):
    _name = 'consult.export.csv.excel.reportcustomer'
    _description = 'Exportar Reporte a Excel o CSV'
    _columns = {
        'datas_fname':
        fields.char('File Name', size=256),
        'file':
        fields.binary('Layout'),
        'download_file':
        fields.boolean('Descargar Archivo'),
        'cadena_decoding':
        fields.text('Binario sin encoding'),
        'type':
        fields.selection(
            [('csv', 'CSV')],
            'Tipo Exportacion',
            required=False,
        ),
    }

    _defaults = {
        'download_file': False,
        'type': 'csv',
    }

    def export_csv_file(self, cr, uid, ids, context=None):
        document_csv = ""
        active_ids = context['active_ids']
        consult_obj = self.pool.get('stock.reportcustomer.model')
        if active_ids:
            for active in active_ids:
                da_list = []
                for rec in self.browse(cr, uid, ids, context=None):
                    consult_br = consult_obj.browse(cr,
                                                    uid,
                                                    active,
                                                    context=None)
                    da_list.append(consult_br.date)
                    salto_line = "\n"
                    cabeceras_p = "Cliente" + "," + "Fecha Inicio" + "," + "Fecha Fin"
                    document_csv = document_csv + cabeceras_p

                    linea_1 = consult_br.name.name+","+\
                    consult_br.date+","+consult_br.date_end

                    document_csv = document_csv + salto_line + linea_1 + salto_line

                    cabeceras_l = "Factura" + "," + "Fecha" + "," + "Monto"

                    texto_x = "Facturas del Cliente" + "," + ","
                    document_csv = document_csv + salto_line + texto_x

                    document_csv = document_csv + salto_line + cabeceras_l

                    detalle_lineas = ""
                    for linea in consult_br.reportcustomer_invoice_lines:
                        linea_str = ""
                        if linea.invoice_id:
                            linea_str = str(linea.invoice_id.number)+","+str(linea.invoice_id.date_invoice)+\
                            ","+str(linea.invoice_id.amount_total)

                        detalle_lineas = detalle_lineas + salto_line + linea_str
                    document_csv = document_csv + detalle_lineas + salto_line + salto_line

                    cabeceras_l = "Producto"+","+"Cantidad"+","+\
                    "Unidad de Medida"+","+"Total Facturado"

                    texto_x = "Detalle Productos Facturados" + "," + "," + ","
                    document_csv = document_csv + salto_line + texto_x

                    document_csv = document_csv + salto_line + cabeceras_l

                    detalle_lineas = ""
                    for linea in consult_br.reportcustomer_lines:
                        linea_str = ""
                        if linea.product_id:
                            linea_str = str(linea.product_id.name)+\
                            ","+str(linea.qty)+","+str(linea.uom_id.name)+","+str(linea.amount_total)

                        detalle_lineas = detalle_lineas + salto_line + linea_str
                    document_csv = document_csv + detalle_lineas + salto_line + salto_line

                date = datetime.now().strftime('%d-%m-%Y')
                if len(da_list) > 1:
                    datas_fname = "Reporte Facturacion Client " + str(
                        date) + ".csv"  # Nombre del Archivo
                else:
                    datas_fname = "Reporte Facturacion Client " + consult_br.date + " - " + consult_br.date_end + ".csv"  # Nombre del Archivo
                rec.write({
                    'cadena_decoding': document_csv,
                    'datas_fname': datas_fname,
                    'file': base64.encodestring(document_csv),
                    'download_file': True
                })
        return {
            'type': 'ir.actions.act_window',
            'res_model': 'consult.export.csv.excel.reportcustomer',
            'view_mode': 'form',
            'view_type': 'form',
            'res_id': ids[0],
            'views': [(False, 'form')],
            'target': 'new',
        }

    def process_export(self, cr, uid, ids, context=None):
        for rec in self.browse(cr, uid, ids, context=context):
            if rec.type == 'csv':
                result = self.export_csv_file(cr, uid, ids, context=context)
                return result
        return True
示例#10
0
class base_import_module(osv.TransientModel):
    """ Import Module """
    _name = "base.import.module"
    _description = "Import Module"

    _columns = {
        'module_file':
        fields.binary('Module .ZIP file', required=True),
        'state':
        fields.selection([('init', 'init'), ('done', 'done')],
                         'Status',
                         readonly=True),
        'import_message':
        fields.char('Import message'),
        'force':
        fields.boolean(
            'Force init',
            help=
            "Force init mode even if installed. (will update `noupdate='1'` records)"
        ),
    }

    _defaults = {
        'state': 'init',
        'force': False,
    }

    def import_module(self, cr, uid, ids, context=None):
        module_obj = self.pool.get('ir.module.module')
        data = self.browse(cr, uid, ids[0], context=context)
        zip_data = base64.decodestring(data.module_file)
        fp = BytesIO()
        fp.write(zip_data)
        res = module_obj.import_zipfile(cr,
                                        uid,
                                        fp,
                                        force=data.force,
                                        context=context)
        self.write(cr,
                   uid,
                   ids, {
                       'state': 'done',
                       'import_message': res[0]
                   },
                   context=context)
        context = dict(context, module_name=res[1])
        # Return wizard otherwise it will close wizard and will not show result message to user.
        return {
            'name': 'Import Module',
            'view_type': 'form',
            'view_mode': 'form',
            'target': 'new',
            'res_id': ids[0],
            'res_model': 'base.import.module',
            'type': 'ir.actions.act_window',
            'context': context,
        }

    def action_module_open(self, cr, uid, ids, context):
        return {
            'domain': [('name', 'in', context.get('module_name', []))],
            'name': 'Modules',
            'view_type': 'form',
            'view_mode': 'tree,form',
            'res_model': 'ir.module.module',
            'view_id': False,
            'type': 'ir.actions.act_window',
        }
示例#11
0
文件: xml_decl.py 项目: ecoreos/hz
class xml_decl(osv.TransientModel):
    """
    Intrastat XML Declaration
    """
    _name = "l10n_be_intrastat_xml.xml_decl"
    _description = 'Intrastat XML Declaration'

    def _get_company_id(self, cr, uid, context=None):
        return self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id

    def _get_def_monthyear(self, cr, uid, context=None):
        td = datetime.strptime(fields.date.context_today(self, cr, uid, context=context),
                               tools.DEFAULT_SERVER_DATE_FORMAT).date()
        return td.strftime('%Y'), td.strftime('%m')

    def _get_def_month(self, cr, uid, context=None):
        return self._get_def_monthyear(cr, uid, context=context)[1]

    def _get_def_year(self, cr, uid, context=None):
        return self._get_def_monthyear(cr, uid, context=context)[0]

    _columns = {
        'name': fields.char('File Name'),
        'month': fields.selection([('01','January'), ('02','February'), ('03','March'),
                                   ('04','April'), ('05','May'), ('06','June'), ('07','July'),
                                   ('08','August'), ('09','September'), ('10','October'),
                                   ('11','November'), ('12','December')], 'Month', required=True),
        'year': fields.char('Year', size=4, required=True),
        'company_id': fields.many2one('res.company', 'Company', required=True),
        'arrivals': fields.selection([('be-exempt', 'Exempt'),
                                      ('be-standard', 'Standard'),
                                      ('be-extended', 'Extended')],
                                     'Arrivals', required=True),
        'dispatches': fields.selection([('be-exempt', 'Exempt'),
                                      ('be-standard', 'Standard'),
                                      ('be-extended', 'Extended')],
                                       'Dispatches', required=True),
        'file_save': fields.binary('Intrastat Report File', readonly=True),
        'state': fields.selection([('draft', 'Draft'), ('download', 'Download')], string="State"),
    }

    _defaults = {
        'arrivals': 'be-standard',
        'dispatches': 'be-standard',
        'name': 'intrastat.xml',
        'company_id': _get_company_id,
        'month': _get_def_month,
        'year': _get_def_year,
        'state': 'draft',
    }

    def _company_warning(self, cr, uid, translated_msg, context=None):
        """ Raise a error with custom message, asking user to configure company settings """
        xmlid_mod = self.pool['ir.model.data']
        action_id = xmlid_mod.xmlid_to_res_id(cr, uid, 'base.action_res_company_form')
        raise exceptions.RedirectWarning(
            translated_msg, action_id, _('Go to company configuration screen'))

    def create_xml(self, cr, uid, ids, context=None):
        """Creates xml that is to be exported and sent to estate for partner vat intra.
        :return: Value for next action.
        :rtype: dict
        """
        decl_datas = self.browse(cr, uid, ids[0])
        company = decl_datas.company_id
        if not (company.partner_id and company.partner_id.country_id and
                company.partner_id.country_id.id):
            self._company_warning(
                cr, uid,
                _('The country of your company is not set, '
                  'please make sure to configure it first.'),
                context=context)
        kbo = company.company_registry
        if not kbo:
            self._company_warning(
                cr, uid,
                _('The registry number of your company is not set, '
                  'please make sure to configure it first.'),
                context=context)
        if len(decl_datas.year) != 4:
            raise exceptions.Warning(_('Year must be 4 digits number (YYYY)'))

        #Create root declaration
        decl = ET.Element('DeclarationReport')
        decl.set('xmlns', INTRASTAT_XMLNS)

        #Add Administration elements
        admin = ET.SubElement(decl, 'Administration')
        fromtag = ET.SubElement(admin, 'From')
        fromtag.text = kbo
        fromtag.set('declarerType', 'KBO')
        ET.SubElement(admin, 'To').text = "NBB"
        ET.SubElement(admin, 'Domain').text = "SXX"
        if decl_datas.arrivals == 'be-standard':
            decl.append(self._get_lines(cr, SUPERUSER_ID, ids, decl_datas, company,
                                        dispatchmode=False, extendedmode=False, context=context))
        elif decl_datas.arrivals == 'be-extended':
            decl.append(self._get_lines(cr, SUPERUSER_ID, ids, decl_datas, company,
                                        dispatchmode=False, extendedmode=True, context=context))
        if decl_datas.dispatches == 'be-standard':
            decl.append(self._get_lines(cr, SUPERUSER_ID, ids, decl_datas, company,
                                        dispatchmode=True, extendedmode=False, context=context))
        elif decl_datas.dispatches == 'be-extended':
            decl.append(self._get_lines(cr, SUPERUSER_ID, ids, decl_datas, company,
                                        dispatchmode=True, extendedmode=True, context=context))

        #Get xml string with declaration
        data_file = ET.tostring(decl, encoding='UTF-8', method='xml')

        #change state of the wizard
        self.write(cr, uid, ids,
                   {'name': 'intrastat_%s%s.xml' % (decl_datas.year, decl_datas.month),
                    'file_save': base64.encodestring(data_file),
                    'state': 'download'},
                   context=context)
        return {
            'name': _('Save'),
            'context': context,
            'view_type': 'form',
            'view_mode': 'form',
            'res_model': 'l10n_be_intrastat_xml.xml_decl',
            'type': 'ir.actions.act_window',
            'target': 'new',
            'res_id': ids[0],
        }

    def _get_lines(self, cr, uid, ids, decl_datas, company, dispatchmode=False,
                   extendedmode=False, context=None):
        intrastatcode_mod = self.pool['report.intrastat.code']
        invoiceline_mod = self.pool['account.invoice.line']
        product_mod = self.pool['product.product']
        region_mod = self.pool['l10n_be_intrastat.region']
        warehouse_mod = self.pool['stock.warehouse']

        if dispatchmode:
            mode1 = 'out_invoice'
            mode2 = 'in_refund'
            declcode = "29"
        else:
            mode1 = 'in_invoice'
            mode2 = 'out_refund'
            declcode = "19"

        decl = ET.Element('Report')
        if not extendedmode:
            decl.set('code', 'EX%sS' % declcode)
        else:
            decl.set('code', 'EX%sE' % declcode)
        decl.set('date', '%s-%s' % (decl_datas.year, decl_datas.month))
        datas = ET.SubElement(decl, 'Data')
        if not extendedmode:
            datas.set('form', 'EXF%sS' % declcode)
        else:
            datas.set('form', 'EXF%sE' % declcode)
        datas.set('close', 'true')
        intrastatkey = namedtuple("intrastatkey",
                                  ['EXTRF', 'EXCNT', 'EXTTA', 'EXREG',
                                   'EXGO', 'EXTPC', 'EXDELTRM'])
        entries = {}

        sqlreq = """
            select
                inv_line.id
            from
                account_invoice_line inv_line
                join account_invoice inv on inv_line.invoice_id=inv.id
                left join res_country on res_country.id = inv.intrastat_country_id
                left join res_partner on res_partner.id = inv.partner_id
                left join res_country countrypartner on countrypartner.id = res_partner.country_id
                join product_product on inv_line.product_id=product_product.id
                join product_template on product_product.product_tmpl_id=product_template.id
            where
                inv.state in ('open','paid')
                and inv.company_id=%s
                and not product_template.type='service'
                and (res_country.intrastat=true or (inv.intrastat_country_id is null
                                                    and countrypartner.intrastat=true))
                and ((res_country.code is not null and not res_country.code=%s)
                     or (res_country.code is null and countrypartner.code is not null
                     and not countrypartner.code=%s))
                and inv.type in (%s, %s)
                and to_char(inv.date_invoice, 'YYYY')=%s
                and to_char(inv.date_invoice, 'MM')=%s
            """

        cr.execute(sqlreq, (company.id, company.partner_id.country_id.code,
                            company.partner_id.country_id.code, mode1, mode2,
                            decl_datas.year, decl_datas.month))
        lines = cr.fetchall()
        invoicelines_ids = [rec[0] for rec in lines]
        invoicelines = invoiceline_mod.browse(cr, uid, invoicelines_ids, context=context)
        for inv_line in invoicelines:

            #Check type of transaction
            if inv_line.intrastat_transaction_id:
                extta = inv_line.intrastat_transaction_id.code
            else:
                extta = "1"
            #Check country
            if inv_line.invoice_id.intrastat_country_id:
                excnt = inv_line.invoice_id.intrastat_country_id.code
            else:
                excnt = inv_line.invoice_id.partner_id.country_id.code

            #Check region
            #If purchase, comes from purchase order, linked to a location,
            #which is linked to the warehouse
            #if sales, the sale order is linked to the warehouse
            #if sales, from a delivery order, linked to a location,
            #which is linked to the warehouse
            #If none found, get the company one.
            exreg = None
            if inv_line.invoice_id.type in ('in_invoice', 'in_refund'):
                #comes from purchase
                POL = self.pool['purchase.order.line']
                poline_ids = POL.search(
                    cr, uid, [('invoice_lines', 'in', inv_line.id)], context=context)
                if poline_ids:
                    purchaseorder = POL.browse(cr, uid, poline_ids[0], context=context).order_id
                    region_id = warehouse_mod.get_regionid_from_locationid(
                        cr, uid, purchaseorder.location_id.id, context=context)
                    if region_id:
                        exreg = region_mod.browse(cr, uid, region_id).code
            elif inv_line.invoice_id.type in ('out_invoice', 'out_refund'):
                #comes from sales
                soline_ids = self.pool['sale.order.line'].search(
                    cr, uid, [('invoice_lines', 'in', inv_line.id)], context=context)
                if soline_ids:
                    saleorder = self.pool['sale.order.line'].browse(
                        cr, uid, soline_ids[0], context=context).order_id
                    if saleorder and saleorder.warehouse_id and saleorder.warehouse_id.region_id:
                        exreg = region_mod.browse(
                            cr, uid, saleorder.warehouse_id.region_id.id, context=context).code

            if not exreg:
                if company.region_id:
                    exreg = company.region_id.code
                else:
                    self._company_warning(
                        cr, uid,
                        _('The Intrastat Region of the selected company is not set, '
                          'please make sure to configure it first.'),
                        context=context)

            #Check commodity codes
            intrastat_id = product_mod.get_intrastat_recursively(
                cr, uid, inv_line.product_id.id, context=context)
            if intrastat_id:
                exgo = intrastatcode_mod.browse(cr, uid, intrastat_id, context=context).name
            else:
                raise exceptions.Warning(
                    _('Product "%s" has no intrastat code, please configure it') %
                        inv_line.product_id.display_name)

            #In extended mode, 2 more fields required
            if extendedmode:
                #Check means of transport
                if inv_line.invoice_id.transport_mode_id:
                    extpc = inv_line.invoice_id.transport_mode_id.code
                elif company.transport_mode_id:
                    extpc = company.transport_mode_id.code
                else:
                    self._company_warning(
                        cr, uid,
                        _('The default Intrastat transport mode of your company '
                          'is not set, please make sure to configure it first.'),
                        context=context)

                #Check incoterm
                if inv_line.invoice_id.incoterm_id:
                    exdeltrm = inv_line.invoice_id.incoterm_id.code
                elif company.incoterm_id:
                    exdeltrm = company.incoterm_id.code
                else:
                    self._company_warning(
                        cr, uid,
                        _('The default Incoterm of your company is not set, '
                          'please make sure to configure it first.'),
                        context=context)
            else:
                extpc = ""
                exdeltrm = ""
            linekey = intrastatkey(EXTRF=declcode, EXCNT=excnt,
                                   EXTTA=extta, EXREG=exreg, EXGO=exgo,
                                   EXTPC=extpc, EXDELTRM=exdeltrm)
            #We have the key
            #calculate amounts
            if inv_line.price_unit and inv_line.quantity:
                amount = inv_line.price_unit * inv_line.quantity
            else:
                amount = 0
            if (not inv_line.uom_id.category_id
                    or not inv_line.product_id.uom_id.category_id
                    or inv_line.uos_id.category_id.id != inv_line.product_id.uom_id.category_id.id):
                weight = inv_line.product_id.weight * inv_line.quantity
            else:
                weight = (inv_line.product_id.weight *
                          inv_line.quantity * inv_line.uos_id.factor)
            if (not inv_line.uos_id.category_id or not inv_line.product_id.uom_id.category_id
                    or inv_line.uos_id.category_id.id != inv_line.product_id.uom_id.category_id.id):
                supply_units = inv_line.quantity
            else:
                supply_units = inv_line.quantity * inv_line.uom_id.factor
            amounts = entries.setdefault(linekey, (0, 0, 0))
            amounts = (amounts[0] + amount, amounts[1] + weight, amounts[2] + supply_units)
            entries[linekey] = amounts

        numlgn = 0
        for linekey in entries:
            numlgn += 1
            amounts = entries[linekey]
            item = ET.SubElement(datas, 'Item')
            self._set_Dim(item, 'EXSEQCODE', unicode(numlgn))
            self._set_Dim(item, 'EXTRF', unicode(linekey.EXTRF))
            self._set_Dim(item, 'EXCNT', unicode(linekey.EXCNT))
            self._set_Dim(item, 'EXTTA', unicode(linekey.EXTTA))
            self._set_Dim(item, 'EXREG', unicode(linekey.EXREG))
            self._set_Dim(item, 'EXTGO', unicode(linekey.EXGO))
            if extendedmode:
                self._set_Dim(item, 'EXTPC', unicode(linekey.EXTPC))
                self._set_Dim(item, 'EXDELTRM', unicode(linekey.EXDELTRM))
            self._set_Dim(item, 'EXTXVAL', unicode(round(amounts[0], 0)).replace(".", ","))
            self._set_Dim(item, 'EXWEIGHT', unicode(round(amounts[1], 0)).replace(".", ","))
            self._set_Dim(item, 'EXUNITS', unicode(round(amounts[2], 0)).replace(".", ","))

        if numlgn == 0:
            #no datas
            datas.set('action', 'nihil')
        return decl

    def _set_Dim(self, item, prop, value):
        dim = ET.SubElement(item, 'Dim')
        dim.set('prop', prop)
        dim.text = value
示例#12
0
文件: models.py 项目: ecoreos/hz
class test_model(orm.Model):
    _name = 'test_converter.test_model'

    _columns = {
        'char':
        fields.char(),
        'integer':
        fields.integer(),
        'float':
        fields.float(),
        'numeric':
        fields.float(digits=(16, 2)),
        'many2one':
        fields.many2one('test_converter.test_model.sub'),
        'binary':
        fields.binary(),
        'date':
        fields.date(),
        'datetime':
        fields.datetime(),
        'selection':
        fields.selection([
            (1, "réponse A"),
            (2, "réponse B"),
            (3, "réponse C"),
            (4, "réponse D"),
        ]),
        'selection_str':
        fields.selection(
            [
                ('A', "Qu'il n'est pas arrivé à Toronto"),
                ('B', "Qu'il était supposé arriver à Toronto"),
                ('C', "Qu'est-ce qu'il fout ce maudit pancake, tabernacle ?"),
                ('D', "La réponse D"),
            ],
            string="Lorsqu'un pancake prend l'avion à destination de Toronto et "
            "qu'il fait une escale technique à St Claude, on dit:"),
        'html':
        fields.html(),
        'text':
        fields.text(),
    }

    # `base` module does not contains any model that implement the `_group_by_full` functionality
    # test this feature here...

    def _gbf_m2o(self, cr, uid, ids, domain, read_group_order,
                 access_rights_uid, context):
        Sub = self.pool['test_converter.test_model.sub']
        all_ids = Sub._search(cr,
                              uid, [],
                              access_rights_uid=access_rights_uid,
                              context=context)
        result = Sub.name_get(cr,
                              access_rights_uid or uid,
                              all_ids,
                              context=context)
        folds = {i: i not in ids for i, _ in result}
        return result, folds

    _group_by_full = {
        'many2one': _gbf_m2o,
    }
示例#13
0
class ir_attachment(osv.osv):
    """Attachments are used to link binary files or url to any ecore document.

    External attachment storage
    ---------------------------

    The 'data' function field (_data_get,data_set) is implemented using
    _file_read, _file_write and _file_delete which can be overridden to
    implement other storage engines, such methods should check for other
    location pseudo uri (example: hdfs://hadoppserver)

    The default implementation is the file:dirname location that stores files
    on the local filesystem using name based on their sha1 hash
    """
    _order = 'id desc'

    def _name_get_resname(self, cr, uid, ids, object, method, context):
        data = {}
        for attachment in self.browse(cr, uid, ids, context=context):
            model_object = attachment.res_model
            res_id = attachment.res_id
            if model_object and res_id:
                model_pool = self.pool[model_object]
                res = model_pool.name_get(cr, uid, [res_id], context)
                res_name = res and res[0][1] or None
                if res_name:
                    field = self._columns.get('res_name', False)
                    if field and len(res_name) > field.size:
                        res_name = res_name[:30] + '...'
                data[attachment.id] = res_name or False
            else:
                data[attachment.id] = False
        return data

    def _storage(self, cr, uid, context=None):
        return self.pool['ir.config_parameter'].get_param(
            cr, SUPERUSER_ID, 'ir_attachment.location', 'file')

    def _filestore(self, cr, uid, context=None):
        return tools.config.filestore(cr.dbname)

    def force_storage(self, cr, uid, context=None):
        """Force all attachments to be stored in the currently configured storage"""
        if not self.pool['res.users']._is_admin(cr, uid, [uid]):
            raise AccessError(
                _('Only administrators can execute this action.'))

        location = self._storage(cr, uid, context)
        domain = {
            'db': [('store_fname', '!=', False)],
            'file': [('db_datas', '!=', False)],
        }[location]

        ids = self.search(cr, uid, domain, context=context)
        for attach in self.browse(cr, uid, ids, context=context):
            attach.write({'datas': attach.datas})
        return True

    # 'data' field implementation
    def _full_path(self, cr, uid, path):
        # sanitize ath
        path = re.sub('[.]', '', path)
        path = path.strip('/\\')
        return os.path.join(self._filestore(cr, uid), path)

    def _get_path(self, cr, uid, bin_data, sha):
        # retro compatibility
        fname = sha[:3] + '/' + sha
        full_path = self._full_path(cr, uid, fname)
        if os.path.isfile(full_path):
            return fname, full_path  # keep existing path

        # scatter files across 256 dirs
        # we use '/' in the db (even on windows)
        fname = sha[:2] + '/' + sha
        full_path = self._full_path(cr, uid, fname)
        dirname = os.path.dirname(full_path)
        if not os.path.isdir(dirname):
            os.makedirs(dirname)
        return fname, full_path

    def _file_read(self, cr, uid, fname, bin_size=False):
        full_path = self._full_path(cr, uid, fname)
        r = ''
        try:
            if bin_size:
                r = os.path.getsize(full_path)
            else:
                r = open(full_path, 'rb').read().encode('base64')
        except (IOError, OSError):
            _logger.info("_read_file reading %s", full_path, exc_info=True)
        return r

    def _file_write(self, cr, uid, value, checksum):
        bin_value = value.decode('base64')
        fname, full_path = self._get_path(cr, uid, bin_value, checksum)
        if not os.path.exists(full_path):
            try:
                with open(full_path, 'wb') as fp:
                    fp.write(bin_value)
            except IOError:
                _logger.info("_file_write writing %s",
                             full_path,
                             exc_info=True)
        return fname

    def _file_delete(self, cr, uid, fname):
        # using SQL to include files hidden through unlink or due to record rules
        cr.execute("SELECT COUNT(*) FROM ir_attachment WHERE store_fname = %s",
                   (fname, ))
        count = cr.fetchone()[0]
        full_path = self._full_path(cr, uid, fname)
        if not count and os.path.exists(full_path):
            try:
                os.unlink(full_path)
            except OSError:
                _logger.info("_file_delete could not unlink %s",
                             full_path,
                             exc_info=True)
            except IOError:
                # Harmless and needed for race conditions
                _logger.info("_file_delete could not unlink %s",
                             full_path,
                             exc_info=True)

    def _data_get(self, cr, uid, ids, name, arg, context=None):
        if context is None:
            context = {}
        result = {}
        bin_size = context.get('bin_size')
        for attach in self.browse(cr, uid, ids, context=context):
            if attach.store_fname:
                result[attach.id] = self._file_read(cr, uid,
                                                    attach.store_fname,
                                                    bin_size)
            else:
                result[attach.id] = attach.db_datas
        return result

    def _data_set(self, cr, uid, id, name, value, arg, context=None):
        # compute the field depending of datas, supporting the case of a empty/None datas
        bin_data = value and value.decode(
            'base64') or ''  # empty string to compute its hash
        checksum = self._compute_checksum(bin_data)
        vals = {
            'file_size': len(bin_data),
            'checksum': checksum,
        }
        # We dont handle setting data to null
        # datas is false, but file_size and checksum are not (computed as datas is an empty string)
        if not value:
            # reset computed fields
            super(ir_attachment, self).write(cr,
                                             SUPERUSER_ID, [id],
                                             vals,
                                             context=context)
            return True
        if context is None:
            context = {}
        # browse the attachment and get the file to delete
        attach = self.browse(cr, uid, id, context=context)
        fname_to_delete = attach.store_fname
        location = self._storage(cr, uid, context)
        # compute the index_content field
        vals['index_content'] = self._index(cr, SUPERUSER_ID, bin_data,
                                            attach.datas_fname,
                                            attach.mimetype),
        if location != 'db':
            # create the file
            fname = self._file_write(cr, uid, value, checksum)
            vals.update({'store_fname': fname, 'db_datas': False})
        else:
            vals.update({'store_fname': False, 'db_datas': value})
        # SUPERUSER_ID as probably don't have write access, trigger during create
        super(ir_attachment, self).write(cr,
                                         SUPERUSER_ID, [id],
                                         vals,
                                         context=context)

        # After de-referencing the file in the database, check whether we need
        # to garbage-collect it on the filesystem
        if fname_to_delete:
            self._file_delete(cr, uid, fname_to_delete)
        return True

    def _compute_checksum(self, bin_data):
        """ compute the checksum for the given datas
            :param bin_data : datas in its binary form
        """
        # an empty file has a checksum too (for caching)
        return hashlib.sha1(bin_data or '').hexdigest()

    def _compute_mimetype(self, values):
        """ compute the mimetype of the given values
            :param values : dict of values to create or write an ir_attachment
            :return mime : string indicating the mimetype, or application/octet-stream by default
        """
        mimetype = 'application/octet-stream'
        if values.get('datas_fname'):
            mimetype = mimetypes.guess_type(values['datas_fname'])[0]
        if values.get('datas'):
            mimetype = guess_mimetype(values['datas'].decode('base64'))
        return mimetype

    def _index(self, cr, uid, bin_data, datas_fname, file_type):
        """ compute the index content of the given filename, or binary data.
            This is a python implementation of the unix command 'strings'.
            :param bin_data : datas in binary form
            :return index_content : string containing all the printable character of the binary data
        """
        index_content = False
        if file_type:
            index_content = file_type.split('/')[0]
            if index_content == 'text':  # compute index_content only for text type
                words = re.findall("[^\x00-\x1F\x7F-\xFF]{4,}", bin_data)
                index_content = ustr("\n".join(words))
        return index_content

    _name = 'ir.attachment'
    _columns = {
        'name':
        fields.char('Attachment Name', required=True),
        'datas_fname':
        fields.char('File Name'),
        'description':
        fields.text('Description'),
        'res_name':
        fields.function(_name_get_resname,
                        type='char',
                        string='Resource Name',
                        store=True),
        'res_model':
        fields.char(
            'Resource Model',
            readonly=True,
            help="The database object this attachment will be attached to"),
        'res_field':
        fields.char('Resource Field', readonly=True),
        'res_id':
        fields.integer('Resource ID',
                       readonly=True,
                       help="The record id this is attached to"),
        'create_date':
        fields.datetime('Date Created', readonly=True),
        'create_uid':
        fields.many2one('res.users', 'Owner', readonly=True),
        'company_id':
        fields.many2one('res.company', 'Company', change_default=True),
        'type':
        fields.selection(
            [
                ('url', 'URL'),
                ('binary', 'File'),
            ],
            'Type',
            help=
            "You can either upload a file from your computer or copy/paste an internet link to your file",
            required=True,
            change_default=True),
        'url':
        fields.char('Url', size=1024),
        # al: We keep shitty field names for backward compatibility with document
        'datas':
        fields.function(_data_get,
                        fnct_inv=_data_set,
                        string='File Content',
                        type="binary",
                        nodrop=True),
        'store_fname':
        fields.char('Stored Filename'),
        'db_datas':
        fields.binary('Database Data'),
        # computed fields depending on datas
        'file_size':
        fields.integer('File Size', readonly=True),
        'checksum':
        fields.char("Checksum/SHA1", size=40, select=True, readonly=True),
        'mimetype':
        fields.char('Mime Type', readonly=True),
        'index_content':
        fields.text('Indexed Content', readonly=True),
        'public':
        fields.boolean('Is public document'),
    }

    _defaults = {
        'type':
        'binary',
        'file_size':
        0,
        'mimetype':
        False,
        'company_id':
        lambda s, cr, uid, c: s.pool.get('res.company')._company_default_get(
            cr, uid, 'ir.attachment', context=c),
    }

    def _auto_init(self, cr, context=None):
        super(ir_attachment, self)._auto_init(cr, context)
        cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s',
                   ('ir_attachment_res_idx', ))
        if not cr.fetchone():
            cr.execute(
                'CREATE INDEX ir_attachment_res_idx ON ir_attachment (res_model, res_id)'
            )
            cr.commit()

    def check(self, cr, uid, ids, mode, context=None, values=None):
        """Restricts the access to an ir.attachment, according to referred model
        In the 'document' module, it is overriden to relax this hard rule, since
        more complex ones apply there.
        """
        res_ids = {}
        require_employee = False
        if ids:
            if isinstance(ids, (int, long)):
                ids = [ids]
            cr.execute(
                'SELECT res_model, res_id, create_uid, public FROM ir_attachment WHERE id = ANY (%s)',
                (ids, ))
            for rmod, rid, create_uid, public in cr.fetchall():
                if public and mode == 'read':
                    continue
                if not (rmod and rid):
                    if create_uid != uid:
                        require_employee = True
                    continue
                res_ids.setdefault(rmod, set()).add(rid)
        if values:
            if values.get('res_model') and values.get('res_id'):
                res_ids.setdefault(values['res_model'],
                                   set()).add(values['res_id'])

        ima = self.pool.get('ir.model.access')
        for model, mids in res_ids.items():
            # ignore attachments that are not attached to a resource anymore when checking access rights
            # (resource was deleted but attachment was not)
            if not self.pool.get(model):
                require_employee = True
                continue
            existing_ids = self.pool[model].exists(cr, uid, mids)
            if len(existing_ids) != len(mids):
                require_employee = True
            # For related models, check if we can write to the model, as unlinking
            # and creating attachments can be seen as an update to the model
            if (mode in ['unlink', 'create']):
                ima.check(cr, uid, model, 'write')
            else:
                ima.check(cr, uid, model, mode)
            self.pool[model].check_access_rule(cr,
                                               uid,
                                               existing_ids,
                                               mode,
                                               context=context)
        if require_employee:
            if not uid == SUPERUSER_ID and not self.pool[
                    'res.users'].has_group(cr, uid, 'base.group_user'):
                raise AccessError(
                    _("Sorry, you are not allowed to access this document."))

    def _search(self,
                cr,
                uid,
                args,
                offset=0,
                limit=None,
                order=None,
                context=None,
                count=False,
                access_rights_uid=None):
        # add res_field=False in domain if not present; the arg[0] trick below
        # works for domain items and '&'/'|'/'!' operators too
        if not any(arg[0] in ('id', 'res_field') for arg in args):
            args.insert(0, ('res_field', '=', False))

        ids = super(ir_attachment,
                    self)._search(cr,
                                  uid,
                                  args,
                                  offset=offset,
                                  limit=limit,
                                  order=order,
                                  context=context,
                                  count=False,
                                  access_rights_uid=access_rights_uid)

        if uid == SUPERUSER_ID:
            # rules do not apply for the superuser
            return len(ids) if count else ids

        if not ids:
            return 0 if count else []

        # Work with a set, as list.remove() is prohibitive for large lists of documents
        # (takes 20+ seconds on a db with 100k docs during search_count()!)
        orig_ids = ids
        ids = set(ids)

        # For attachments, the permissions of the document they are attached to
        # apply, so we must remove attachments for which the user cannot access
        # the linked document.
        # Use pure SQL rather than read() as it is about 50% faster for large dbs (100k+ docs),
        # and the permissions are checked in super() and below anyway.
        cr.execute(
            """SELECT id, res_model, res_id, public FROM ir_attachment WHERE id = ANY(%s)""",
            (list(ids), ))
        targets = cr.dictfetchall()
        model_attachments = {}
        for target_dict in targets:
            if not target_dict['res_model'] or target_dict['public']:
                continue
            # model_attachments = { 'model': { 'res_id': [id1,id2] } }
            model_attachments.setdefault(target_dict['res_model'],
                                         {}).setdefault(
                                             target_dict['res_id'] or 0,
                                             set()).add(target_dict['id'])

        # To avoid multiple queries for each attachment found, checks are
        # performed in batch as much as possible.
        ima = self.pool.get('ir.model.access')
        for model, targets in model_attachments.iteritems():
            if model not in self.pool:
                continue
            if not ima.check(cr, uid, model, 'read', False):
                # remove all corresponding attachment ids
                for attach_id in itertools.chain(*targets.values()):
                    ids.remove(attach_id)
                continue  # skip ir.rule processing, these ones are out already

            # filter ids according to what access rules permit
            target_ids = targets.keys()
            allowed_ids = [0] + self.pool[model].search(
                cr, uid, [('id', 'in', target_ids)], context=context)
            disallowed_ids = set(target_ids).difference(allowed_ids)
            for res_id in disallowed_ids:
                for attach_id in targets[res_id]:
                    ids.remove(attach_id)

        # sort result according to the original sort ordering
        result = [id for id in orig_ids if id in ids]
        return len(result) if count else list(result)

    def read(self,
             cr,
             uid,
             ids,
             fields_to_read=None,
             context=None,
             load='_classic_read'):
        if isinstance(ids, (int, long)):
            ids = [ids]
        self.check(cr, uid, ids, 'read', context=context)
        return super(ir_attachment, self).read(cr,
                                               uid,
                                               ids,
                                               fields_to_read,
                                               context=context,
                                               load=load)

    def write(self, cr, uid, ids, vals, context=None):
        if isinstance(ids, (int, long)):
            ids = [ids]
        self.check(cr, uid, ids, 'write', context=context, values=vals)
        # remove computed field depending of datas
        for field in ['file_size', 'checksum']:
            vals.pop(field, False)
        return super(ir_attachment, self).write(cr, uid, ids, vals, context)

    def copy(self, cr, uid, id, default=None, context=None):
        self.check(cr, uid, [id], 'write', context=context)
        return super(ir_attachment, self).copy(cr, uid, id, default, context)

    def unlink(self, cr, uid, ids, context=None):
        if isinstance(ids, (int, long)):
            ids = [ids]
        self.check(cr, uid, ids, 'unlink', context=context)

        # First delete in the database, *then* in the filesystem if the
        # database allowed it. Helps avoid errors when concurrent transactions
        # are deleting the same file, and some of the transactions are
        # rolled back by PostgreSQL (due to concurrent updates detection).
        to_delete = [
            a.store_fname for a in self.browse(cr, uid, ids, context=context)
            if a.store_fname
        ]
        res = super(ir_attachment, self).unlink(cr, uid, ids, context)
        for file_path in to_delete:
            self._file_delete(cr, uid, file_path)

        return res

    def create(self, cr, uid, values, context=None):
        # remove computed field depending of datas
        for field in ['file_size', 'checksum']:
            values.pop(field, False)
        # if mimetype not given, compute it !
        if 'mimetype' not in values:
            values['mimetype'] = self._compute_mimetype(values)
        self.check(cr, uid, [], mode='write', context=context, values=values)
        return super(ir_attachment, self).create(cr, uid, values, context)

    def action_get(self, cr, uid, context=None):
        return self.pool.get('ir.actions.act_window').for_xml_id(
            cr, uid, 'base', 'action_attachment', context=context)
示例#14
0
class ir_model_fields_anonymize_wizard(osv.osv_memory):
    _name = 'ir.model.fields.anonymize.wizard'

    def _get_state(self, cr, uid, ids, name, arg, context=None):
        res = {}

        state = self._get_state_value(cr, uid, context=None)
        for id in ids:
            res[id] = state

        return res

    def _get_summary(self, cr, uid, ids, name, arg, context=None):
        res = {}
        summary = self._get_summary_value(cr, uid, context)
        for id in ids:
            res[id] = summary

        return res

    _columns = {
        'name':
        fields.char(string='File Name'),
        'summary':
        fields.function(_get_summary, type='text', string='Summary'),
        'file_export':
        fields.binary(string='Export'),
        'file_import':
        fields.binary(
            string='Import',
            help=
            "This is the file created by the anonymization process. It should have the '.pickle' extention."
        ),
        'state':
        fields.function(_get_state,
                        string='Status',
                        type='selection',
                        selection=WIZARD_ANONYMIZATION_STATES,
                        readonly=False),
        'msg':
        fields.text(string='Message'),
    }

    def _get_state_value(self, cr, uid, context=None):
        state = self.pool.get(
            'ir.model.fields.anonymization')._get_global_state(cr,
                                                               uid,
                                                               context=context)
        return state

    def _get_summary_value(self, cr, uid, context=None):
        summary = u''
        anon_field_obj = self.pool.get('ir.model.fields.anonymization')
        ir_model_fields_obj = self.pool.get('ir.model.fields')

        anon_field_ids = anon_field_obj.search(
            cr, uid, [('state', '<>', 'not_existing')], context=context)
        anon_fields = anon_field_obj.browse(cr,
                                            uid,
                                            anon_field_ids,
                                            context=context)

        field_ids = [
            anon_field.field_id.id for anon_field in anon_fields
            if anon_field.field_id
        ]
        fields = ir_model_fields_obj.browse(cr,
                                            uid,
                                            field_ids,
                                            context=context)

        fields_by_id = dict([(f.id, f) for f in fields])

        for anon_field in anon_fields:
            field = fields_by_id.get(anon_field.field_id.id)

            values = {
                'model_name': field.model_id.name,
                'model_code': field.model_id.model,
                'field_code': field.name,
                'field_name': field.field_description,
                'state': anon_field.state,
            }
            summary += u" * %(model_name)s (%(model_code)s) -> %(field_name)s (%(field_code)s): state: (%(state)s)\n" % values

        return summary

    def default_get(self, cr, uid, fields_list, context=None):
        res = {}
        res['name'] = '.pickle'
        res['summary'] = self._get_summary_value(cr, uid, context)
        res['state'] = self._get_state_value(cr, uid, context)
        res['msg'] = _(
            """Before executing the anonymization process, you should make a backup of your database."""
        )

        return res

    def fields_view_get(self,
                        cr,
                        uid,
                        view_id=None,
                        view_type='form',
                        context=None,
                        toolbar=False,
                        submenu=False):
        state = self.pool.get(
            'ir.model.fields.anonymization')._get_global_state(cr,
                                                               uid,
                                                               context=context)

        if context is None:
            context = {}

        step = context.get('step', 'new_window')

        res = super(ir_model_fields_anonymize_wizard,
                    self).fields_view_get(cr,
                                          uid,
                                          view_id=view_id,
                                          view_type=view_type,
                                          context=context,
                                          toolbar=toolbar,
                                          submenu=submenu)

        eview = etree.fromstring(res['arch'])
        placeholder = eview.xpath("group[@name='placeholder1']")
        if len(placeholder):
            placeholder = placeholder[0]
            if step == 'new_window' and state == 'clear':
                # clicked in the menu and the fields are not anonymized: warn the admin that backuping the db is very important
                placeholder.addnext(
                    etree.Element('field', {
                        'name': 'msg',
                        'colspan': '4',
                        'nolabel': '1'
                    }))
                placeholder.addnext(etree.Element('newline'))
                placeholder.addnext(
                    etree.Element('label', {'string': 'Warning'}))
                eview.remove(placeholder)
            elif step == 'new_window' and state == 'anonymized':
                # clicked in the menu and the fields are already anonymized
                placeholder.addnext(etree.Element('newline'))
                placeholder.addnext(
                    etree.Element('field', {
                        'name': 'file_import',
                        'required': "1"
                    }))
                placeholder.addnext(
                    etree.Element('label', {'string': 'Anonymization file'}))
                eview.remove(placeholder)
            elif step == 'just_anonymized':
                # we just ran the anonymization process, we need the file export field
                placeholder.addnext(etree.Element('newline'))
                placeholder.addnext(
                    etree.Element('field', {'name': 'file_export'}))
                # we need to remove the button:
                buttons = eview.xpath("button")
                for button in buttons:
                    eview.remove(button)
                # and add a message:
                placeholder.addnext(
                    etree.Element('field', {
                        'name': 'msg',
                        'colspan': '4',
                        'nolabel': '1'
                    }))
                placeholder.addnext(etree.Element('newline'))
                placeholder.addnext(
                    etree.Element('label', {'string': 'Result'}))
                # remove the placeholer:
                eview.remove(placeholder)
            elif step == 'just_desanonymized':
                # we just reversed the anonymization process, we don't need any field
                # we need to remove the button
                buttons = eview.xpath("button")
                for button in buttons:
                    eview.remove(button)
                # and add a message
                # and add a message:
                placeholder.addnext(
                    etree.Element('field', {
                        'name': 'msg',
                        'colspan': '4',
                        'nolabel': '1'
                    }))
                placeholder.addnext(etree.Element('newline'))
                placeholder.addnext(
                    etree.Element('label', {'string': 'Result'}))
                # remove the placeholer:
                eview.remove(placeholder)
            else:
                msg = _("The database anonymization is currently in an unstable state. Some fields are anonymized," + \
                  " while some fields are not anonymized. You should try to solve this problem before trying to do anything else.")
                raise UserError(msg)

            res['arch'] = etree.tostring(eview)

        return res

    def _raise_after_history_update(self, cr, uid, history_id, error_type,
                                    error_msg):
        self.pool.get('ir.model.fields.anonymization.history').write(
            cr, uid, history_id, {
                'state': 'in_exception',
                'msg': error_msg,
            })
        raise UserError('%s: %s' % (error_type, error_msg))

    def anonymize_database(self, cr, uid, ids, context=None):
        """Sets the 'anonymized' state to defined fields"""

        # create a new history record:
        anonymization_history_model = self.pool.get(
            'ir.model.fields.anonymization.history')

        vals = {
            'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
            'state': 'started',
            'direction': 'clear -> anonymized',
        }
        history_id = anonymization_history_model.create(cr, uid, vals)

        # check that all the defined fields are in the 'clear' state
        state = self.pool.get(
            'ir.model.fields.anonymization')._get_global_state(cr,
                                                               uid,
                                                               context=context)
        if state == 'anonymized':
            self._raise_after_history_update(
                cr, uid, history_id, _('Error !'),
                _("The database is currently anonymized, you cannot anonymize it again."
                  ))
        elif state == 'unstable':
            msg = _("The database anonymization is currently in an unstable state. Some fields are anonymized," + \
                  " while some fields are not anonymized. You should try to solve this problem before trying to do anything.")
            self._raise_after_history_update(cr, uid, history_id, 'Error !',
                                             msg)

        # do the anonymization:
        dirpath = os.environ.get('HOME') or os.getcwd()
        rel_filepath = 'field_anonymization_%s_%s.pickle' % (cr.dbname,
                                                             history_id)
        abs_filepath = os.path.abspath(os.path.join(dirpath, rel_filepath))

        ir_model_fields_anonymization_model = self.pool.get(
            'ir.model.fields.anonymization')
        field_ids = ir_model_fields_anonymization_model.search(
            cr, uid, [('state', '<>', 'not_existing')], context=context)
        fields = ir_model_fields_anonymization_model.browse(cr,
                                                            uid,
                                                            field_ids,
                                                            context=context)

        if not fields:
            msg = "No fields are going to be anonymized."
            self._raise_after_history_update(cr, uid, history_id, 'Error !',
                                             msg)

        data = []

        for field in fields:
            model_name = field.model_id.model
            field_name = field.field_id.name
            field_type = field.field_id.ttype
            table_name = self.pool[model_name]._table

            # get the current value
            sql = "select id, %s from %s" % (field_name, table_name)
            cr.execute(sql)
            records = cr.dictfetchall()
            for record in records:
                data.append({
                    "model_id": model_name,
                    "field_id": field_name,
                    "id": record['id'],
                    "value": record[field_name]
                })

                # anonymize the value:
                anonymized_value = None

                sid = str(record['id'])
                if field_type == 'char':
                    anonymized_value = 'xxx' + sid
                elif field_type == 'selection':
                    anonymized_value = 'xxx' + sid
                elif field_type == 'text':
                    anonymized_value = 'xxx' + sid
                elif field_type == 'boolean':
                    anonymized_value = random.choice([True, False])
                elif field_type == 'date':
                    anonymized_value = '2011-11-11'
                elif field_type == 'datetime':
                    anonymized_value = '2011-11-11 11:11:11'
                elif field_type == 'float':
                    anonymized_value = 0.0
                elif field_type == 'integer':
                    anonymized_value = 0
                elif field_type in [
                        'binary', 'many2many', 'many2one', 'one2many',
                        'reference'
                ]:  # cannot anonymize these kind of fields
                    msg = _(
                        "Cannot anonymize fields of these types: binary, many2many, many2one, one2many, reference."
                    )
                    self._raise_after_history_update(cr, uid, history_id,
                                                     'Error !', msg)

                if anonymized_value is None:
                    self._raise_after_history_update(
                        cr, uid, history_id, _('Error !'),
                        _("Anonymized value can not be empty."))

                sql = "update %(table)s set %(field)s = %%(anonymized_value)s where id = %%(id)s" % {
                    'table': table_name,
                    'field': field_name,
                }
                cr.execute(sql, {
                    'anonymized_value': anonymized_value,
                    'id': record['id']
                })

        # save pickle:
        fn = open(abs_filepath, 'w')
        pickle.dump(data, fn, pickle.HIGHEST_PROTOCOL)

        # update the anonymization fields:
        values = {
            'state': 'anonymized',
        }
        ir_model_fields_anonymization_model.write(cr,
                                                  uid,
                                                  field_ids,
                                                  values,
                                                  context=context)

        # add a result message in the wizard:
        msgs = [
            "Anonymization successful.",
            "",
            "Donot forget to save the resulting file to a safe place because you will not be able to revert the anonymization without this file.",
            "",
            "This file is also stored in the %s directory. The absolute file path is: %s.",
        ]
        msg = '\n'.join(msgs) % (dirpath, abs_filepath)

        fn = open(abs_filepath, 'r')

        self.write(cr, uid, ids, {
            'msg': msg,
            'file_export': base64.encodestring(fn.read()),
        })
        fn.close()

        # update the history record:
        anonymization_history_model.write(
            cr, uid, history_id, {
                'field_ids': [[6, 0, field_ids]],
                'msg': msg,
                'filepath': abs_filepath,
                'state': 'done',
            })

        # handle the view:
        view_id = self.pool['ir.model.data'].xmlid_to_res_id(
            cr, uid,
            'anonymization.view_ir_model_fields_anonymize_wizard_form')

        return {
            'res_id': ids[0],
            'view_id': [view_id],
            'view_type': 'form',
            "view_mode": 'form',
            'res_model': 'ir.model.fields.anonymize.wizard',
            'type': 'ir.actions.act_window',
            'context': {
                'step': 'just_anonymized'
            },
            'target': 'new',
        }

    def reverse_anonymize_database(self, cr, uid, ids, context=None):
        """Set the 'clear' state to defined fields"""
        ir_model_fields_anonymization_model = self.pool.get(
            'ir.model.fields.anonymization')
        anonymization_history_model = self.pool.get(
            'ir.model.fields.anonymization.history')

        # create a new history record:
        vals = {
            'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
            'state': 'started',
            'direction': 'anonymized -> clear',
        }
        history_id = anonymization_history_model.create(cr, uid, vals)

        # check that all the defined fields are in the 'anonymized' state
        state = ir_model_fields_anonymization_model._get_global_state(
            cr, uid, context=context)
        if state == 'clear':
            raise UserError(
                _("The database is not currently anonymized, you cannot reverse the anonymization."
                  ))
        elif state == 'unstable':
            msg = _("The database anonymization is currently in an unstable state. Some fields are anonymized," + \
                  " while some fields are not anonymized. You should try to solve this problem before trying to do anything.")
            raise UserError(msg)

        wizards = self.browse(cr, uid, ids, context=context)
        for wizard in wizards:
            if not wizard.file_import:
                msg = _(
                    "It is not possible to reverse the anonymization process without supplying the anonymization export file."
                )
                self._raise_after_history_update(cr, uid, history_id,
                                                 'Error !', msg)

            # reverse the anonymization:
            # load the pickle file content into a data structure:
            data = pickle.loads(base64.decodestring(wizard.file_import))

            migration_fix_obj = self.pool.get(
                'ir.model.fields.anonymization.migration.fix')
            fix_ids = migration_fix_obj.search(
                cr, uid, [('target_version', '=', '8.0')])
            fixes = migration_fix_obj.read(cr, uid, fix_ids, [
                'model_name', 'field_name', 'query', 'query_type', 'sequence'
            ])
            fixes = group(fixes, ('model_name', 'field_name'))

            for line in data:
                queries = []
                table_name = self.pool[line['model_id']]._table if line[
                    'model_id'] in self.pool else None

                # check if custom sql exists:
                key = (line['model_id'], line['field_id'])
                custom_updates = fixes.get(key)
                if custom_updates:
                    custom_updates.sort(key=itemgetter('sequence'))
                    queries = [(record['query'], record['query_type'])
                               for record in custom_updates
                               if record['query_type']]
                elif table_name:
                    queries = [(
                        "update %(table)s set %(field)s = %%(value)s where id = %%(id)s"
                        % {
                            'table': table_name,
                            'field': line['field_id'],
                        }, 'sql')]

                for query in queries:
                    if query[1] == 'sql':
                        sql = query[0]
                        cr.execute(sql, {
                            'value': line['value'],
                            'id': line['id']
                        })
                    elif query[1] == 'python':
                        raw_code = query[0]
                        code = raw_code % line
                        eval(code)
                    else:
                        raise Exception(
                            "Unknown query type '%s'. Valid types are: sql, python."
                            % (query['query_type'], ))

            # update the anonymization fields:
            ir_model_fields_anonymization_model = self.pool.get(
                'ir.model.fields.anonymization')
            field_ids = ir_model_fields_anonymization_model.search(
                cr, uid, [('state', '<>', 'not_existing')], context=context)
            values = {
                'state': 'clear',
            }
            ir_model_fields_anonymization_model.write(cr,
                                                      uid,
                                                      field_ids,
                                                      values,
                                                      context=context)

            # add a result message in the wizard:
            msg = '\n'.join([
                "Successfully reversed the anonymization.",
                "",
            ])

            self.write(cr, uid, ids, {'msg': msg})

            # update the history record:
            anonymization_history_model.write(
                cr, uid, history_id, {
                    'field_ids': [[6, 0, field_ids]],
                    'msg': msg,
                    'filepath': False,
                    'state': 'done',
                })

            # handle the view:
            view_id = self.pool['ir.model.data'].xmlid_to_res_id(
                cr, uid,
                'anonymization.view_ir_model_fields_anonymize_wizard_form')

            return {
                'res_id': ids[0],
                'view_id': [view_id],
                'view_type': 'form',
                "view_mode": 'form',
                'res_model': 'ir.model.fields.anonymize.wizard',
                'type': 'ir.actions.act_window',
                'context': {
                    'step': 'just_desanonymized'
                },
                'target': 'new',
            }

    def _id_get(self, cr, uid, model, id_str, mod):
        if '.' in id_str:
            mod, id_str = id_str.split('.')
        try:
            idn = self.pool.get('ir.model.data')._get_id(cr, uid, mod, id_str)
            res = int(
                self.pool.get('ir.model.data').read(cr, uid, [idn],
                                                    ['res_id'])[0]['res_id'])
        except:
            res = None
        return res
示例#15
0
class ir_property(osv.osv):
    _name = 'ir.property'

    _columns = {
        'name':
        fields.char('Name', select=1),
        'res_id':
        fields.char(
            'Resource',
            help="If not set, acts as a default value for new resources",
            select=1),
        'company_id':
        fields.many2one('res.company', 'Company', select=1),
        'fields_id':
        fields.many2one('ir.model.fields',
                        'Field',
                        ondelete='cascade',
                        required=True,
                        select=1),
        'value_float':
        fields.float('Value'),
        'value_integer':
        fields.integer('Value'),
        'value_text':
        fields.text('Value'),  # will contain (char, text)
        'value_binary':
        fields.binary('Value'),
        'value_reference':
        fields.char('Value'),
        'value_datetime':
        fields.datetime('Value'),
        'type':
        fields.selection([
            ('char', 'Char'),
            ('float', 'Float'),
            ('boolean', 'Boolean'),
            ('integer', 'Integer'),
            ('text', 'Text'),
            ('binary', 'Binary'),
            ('many2one', 'Many2One'),
            ('date', 'Date'),
            ('datetime', 'DateTime'),
            ('selection', 'Selection'),
        ],
                         'Type',
                         required=True,
                         select=1),
    }

    _defaults = {
        'type': 'many2one',
    }

    def _update_values(self, cr, uid, ids, values):
        value = values.pop('value', None)
        if not value:
            return values

        prop = None
        type_ = values.get('type')
        if not type_:
            if ids:
                prop = self.browse(cr, uid, ids[0])
                type_ = prop.type
            else:
                type_ = self._defaults['type']

        field = TYPE2FIELD.get(type_)
        if not field:
            raise UserError(_('Invalid type'))

        if field == 'value_reference':
            if isinstance(value, orm.BaseModel):
                value = '%s,%d' % (value._name, value.id)
            elif isinstance(value, (int, long)):
                field_id = values.get('fields_id')
                if not field_id:
                    if not prop:
                        raise ValueError()
                    field_id = prop.fields_id
                else:
                    field_id = self.pool.get('ir.model.fields').browse(
                        cr, uid, field_id)

                value = '%s,%d' % (field_id.relation, value)

        values[field] = value
        return values

    def write(self, cr, uid, ids, values, context=None):
        return super(ir_property,
                     self).write(cr,
                                 uid,
                                 ids,
                                 self._update_values(cr, uid, ids, values),
                                 context=context)

    def create(self, cr, uid, values, context=None):
        return super(ir_property,
                     self).create(cr,
                                  uid,
                                  self._update_values(cr, uid, None, values),
                                  context=context)

    def get_by_record(self, cr, uid, record, context=None):
        if record.type in ('char', 'text', 'selection'):
            return record.value_text
        elif record.type == 'float':
            return record.value_float
        elif record.type == 'boolean':
            return bool(record.value_integer)
        elif record.type == 'integer':
            return record.value_integer
        elif record.type == 'binary':
            return record.value_binary
        elif record.type == 'many2one':
            if not record.value_reference:
                return False
            model, resource_id = record.value_reference.split(',')
            value = self.pool[model].browse(cr,
                                            uid,
                                            int(resource_id),
                                            context=context)
            return value.exists()
        elif record.type == 'datetime':
            return record.value_datetime
        elif record.type == 'date':
            if not record.value_datetime:
                return False
            return time.strftime(
                '%Y-%m-%d',
                time.strptime(record.value_datetime, '%Y-%m-%d %H:%M:%S'))
        return False

    def get(self, cr, uid, name, model, res_id=False, context=None):
        domain = self._get_domain(cr, uid, name, model, context=context)
        if domain is not None:
            domain = [('res_id', '=', res_id)] + domain
            #make the search with company_id asc to make sure that properties specific to a company are given first
            nid = self.search(cr,
                              uid,
                              domain,
                              limit=1,
                              order='company_id asc',
                              context=context)
            if not nid: return False
            record = self.browse(cr, uid, nid[0], context=context)
            return self.get_by_record(cr, uid, record, context=context)
        return False

    def _get_domain(self, cr, uid, prop_name, model, context=None):
        context = context or {}
        cr.execute('select id from ir_model_fields where name=%s and model=%s',
                   (prop_name, model))
        res = cr.fetchone()
        if not res:
            return None

        cid = context.get('force_company')
        if not cid:
            company = self.pool.get('res.company')
            cid = company._company_default_get(cr,
                                               uid,
                                               model,
                                               res[0],
                                               context=context)

        return [('fields_id', '=', res[0]), ('company_id', 'in', [cid, False])]

    @api.model
    def get_multi(self, name, model, ids):
        """ Read the property field `name` for the records of model `model` with
            the given `ids`, and return a dictionary mapping `ids` to their
            corresponding value.
        """
        if not ids:
            return {}

        domain = self._get_domain(name, model)
        if domain is None:
            return dict.fromkeys(ids, False)

        # retrieve the values for the given ids and the default value, too
        refs = {('%s,%s' % (model, id)): id for id in ids}
        refs[False] = False
        domain += [('res_id', 'in', list(refs))]

        # note: order by 'company_id asc' will return non-null values first
        props = self.search(domain, order='company_id asc')
        result = {}
        for prop in props:
            # for a given res_id, take the first property only
            id = refs.pop(prop.res_id, None)
            if id is not None:
                result[id] = self.get_by_record(prop)

        # set the default value to the ids that are not in result
        default_value = result.pop(False, False)
        for id in ids:
            result.setdefault(id, default_value)

        return result

    @api.model
    def set_multi(self, name, model, values):
        """ Assign the property field `name` for the records of model `model`
            with `values` (dictionary mapping record ids to their value).
        """
        def clean(value):
            return value.id if isinstance(value, models.BaseModel) else value

        if not values:
            return

        domain = self._get_domain(name, model)
        if domain is None:
            raise Exception()

        # retrieve the default value for the field
        default_value = clean(self.get(name, model))

        # retrieve the properties corresponding to the given record ids
        self._cr.execute(
            "SELECT id FROM ir_model_fields WHERE name=%s AND model=%s",
            (name, model))
        field_id = self._cr.fetchone()[0]
        company_id = self.env.context.get(
            'force_company') or self.env['res.company']._company_default_get(
                model, field_id).id
        refs = {('%s,%s' % (model, id)): id for id in values}
        props = self.search([
            ('fields_id', '=', field_id),
            ('company_id', '=', company_id),
            ('res_id', 'in', list(refs)),
        ])

        # modify existing properties
        for prop in props:
            id = refs.pop(prop.res_id)
            value = clean(values[id])
            if value == default_value:
                prop.unlink()
            elif value != clean(prop.get_by_record(prop)):
                prop.write({'value': value})

        # create new properties for records that do not have one yet
        for ref, id in refs.iteritems():
            value = clean(values[id])
            if value != default_value:
                self.create({
                    'fields_id': field_id,
                    'company_id': company_id,
                    'res_id': ref,
                    'name': name,
                    'value': value,
                    'type': self.env[model]._fields[name].type,
                })

    @api.model
    def search_multi(self, name, model, operator, value):
        """ Return a domain for the records that match the given condition. """
        default_matches = False
        include_zero = False

        field = self.env[model]._fields[name]
        if field.type == 'many2one':
            comodel = field.comodel_name

            def makeref(value):
                return value and '%s,%s' % (comodel, value)

            if operator == "=":
                value = makeref(value)
                # if searching properties not set, search those not in those set
                if value is False:
                    default_matches = True
            elif operator in ('!=', '<=', '<', '>', '>='):
                value = makeref(value)
            elif operator in ('in', 'not in'):
                value = map(makeref, value)
            elif operator in ('=like', '=ilike', 'like', 'not like', 'ilike',
                              'not ilike'):
                # most probably inefficient... but correct
                target = self.env[comodel]
                target_names = target.name_search(value,
                                                  operator=operator,
                                                  limit=None)
                target_ids = map(itemgetter(0), target_names)
                operator, value = 'in', map(makeref, target_ids)
        elif field.type in ('integer', 'float'):
            # No record is created in ir.property if the field's type is float or integer with a value
            # equal to 0. Then to match with the records that are linked to a property field equal to 0,
            # the negation of the operator must be taken  to compute the goods and the domain returned
            # to match the searched records is just the opposite.
            if value == 0 and operator == '=':
                operator = '!='
                include_zero = True
            elif value <= 0 and operator == '>=':
                operator = '<'
                include_zero = True
            elif value <= 0 and operator == '>':
                operator = '<='
                include_zero = True
            elif value >= 0 and operator == '<=':
                operator = '>'
                include_zero = True
            elif value >= 0 and operator == '<':
                operator = '>='
                include_zero = True

        # retrieve the properties that match the condition
        domain = self._get_domain(name, model)
        if domain is None:
            raise Exception()
        props = self.search(domain +
                            [(TYPE2FIELD[field.type], operator, value)])

        # retrieve the records corresponding to the properties that match
        good_ids = []
        for prop in props:
            if prop.res_id:
                res_model, res_id = prop.res_id.split(',')
                good_ids.append(int(res_id))
            else:
                default_matches = True

        if include_zero:
            return [('id', 'not in', good_ids)]
        elif default_matches:
            # exclude all records with a property that does not match
            all_ids = []
            props = self.search(domain + [('res_id', '!=', False)])
            for prop in props:
                res_model, res_id = prop.res_id.split(',')
                all_ids.append(int(res_id))
            bad_ids = list(set(all_ids) - set(good_ids))
            return [('id', 'not in', bad_ids)]
        else:
            return [('id', 'in', good_ids)]