odoo13源碼-004: models_2

接上文, 太多了,佔個位, 有空再填坑.

    def write(self, vals):
        """ write(vals)

        Updates all records in the current set with the provided values.

        :param dict vals: fields to update and the value to set on them e.g::

                {'foo': 1, 'bar': "Qux"}

            will set the field ``foo`` to ``1`` and the field ``bar`` to
            ``"Qux"`` if those are valid (otherwise it will trigger an error).

        :raise AccessError: * if user has no write rights on the requested object
                            * if user tries to bypass access rules for write on the requested object
        :raise ValidationError: if user tries to enter invalid value for a field that is not in selection
        :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)

        * For numeric fields (:class:`~odoo.fields.Integer`,
          :class:`~odoo.fields.Float`) the value should be of the
          corresponding type
        * For :class:`~odoo.fields.Boolean`, the value should be a
          :class:`python:bool`
        * For :class:`~odoo.fields.Selection`, the value should match the
          selection values (generally :class:`python:str`, sometimes
          :class:`python:int`)
        * For :class:`~odoo.fields.Many2one`, the value should be the
          database identifier of the record to set
        * Other non-relational fields use a string for value

          .. danger::

              for historical and compatibility reasons,
              :class:`~odoo.fields.Date` and
              :class:`~odoo.fields.Datetime` fields use strings as values
              (written and read) rather than :class:`~python:datetime.date` or
              :class:`~python:datetime.datetime`. These date strings are
              UTC-only and formatted according to
              :const:`odoo.tools.misc.DEFAULT_SERVER_DATE_FORMAT` and
              :const:`odoo.tools.misc.DEFAULT_SERVER_DATETIME_FORMAT`
        * .. _openerp/models/relationals/format:

          :class:`~odoo.fields.One2many` and
          :class:`~odoo.fields.Many2many` use a special "commands" format to
          manipulate the set of records stored in/associated with the field.

          This format is a list of triplets executed sequentially, where each
          triplet is a command to execute on the set of records. Not all
          commands apply in all situations. Possible commands are:

          ``(0, 0, values)``
              adds a new record created from the provided ``value`` dict.
          ``(1, id, values)``
              updates an existing record of id ``id`` with the values in
              ``values``. Can not be used in :meth:`~.create`.
          ``(2, id, 0)``
              removes the record of id ``id`` from the set, then deletes it
              (from the database). Can not be used in :meth:`~.create`.
          ``(3, id, 0)``
              removes the record of id ``id`` from the set, but does not
              delete it. Can not be used in
              :meth:`~.create`.
          ``(4, id, 0)``
              adds an existing record of id ``id`` to the set.
          ``(5, 0, 0)``
              removes all records from the set, equivalent to using the
              command ``3`` on every record explicitly. Can not be used in
              :meth:`~.create`.
          ``(6, 0, ids)``
              replaces all existing records in the set by the ``ids`` list,
              equivalent to using the command ``5`` followed by a command
              ``4`` for each ``id`` in ``ids``.
        """
        if not self:
            return True

        self.check_access_rights('write')
        self.check_field_access_rights('write', vals.keys())
        self.check_access_rule('write')
        env = self.env

        bad_names = {'id', 'parent_path'}
        if self._log_access:
            # the superuser can set log_access fields while loading registry
            if not(self.env.uid == SUPERUSER_ID and not self.pool.ready):
                bad_names.update(LOG_ACCESS_COLUMNS)

        determine_inverses = defaultdict(list)      # {inverse: fields}
        records_to_inverse = {}                     # {field: records}
        relational_names = []
        protected = set()
        check_company = False
        for fname in vals:
            field = self._fields.get(fname)
            if not field:
                raise ValueError("Invalid field %r on model %r" % (fname, self._name))
            if field.inverse:
                if field.type in ('one2many', 'many2many'):
                    # The written value is a list of commands that must applied
                    # on the field's current value. Because the field is
                    # protected while being written, the field's current value
                    # will not be computed and default to an empty recordset. So
                    # make sure the field's value is in cache before writing, in
                    # order to avoid an inconsistent update.
                    self[fname]
                determine_inverses[field.inverse].append(field)
                # DLE P150: `test_cancel_propagation`, `test_manufacturing_3_steps`, `test_manufacturing_flow`
                # TODO: check whether still necessary
                records_to_inverse[field] = self.filtered('id')
            if field.relational or self._field_inverses[field]:
                relational_names.append(fname)
            if field.inverse or (field.compute and not field.readonly):
                if field.store or field.type not in ('one2many', 'many2many'):
                    # Protect the field from being recomputed while being
                    # inversed. In the case of non-stored x2many fields, the
                    # field's value may contain unexpeced new records (created
                    # by command 0). Those new records are necessary for
                    # inversing the field, but should no longer appear if the
                    # field is recomputed afterwards. Not protecting the field
                    # will automatically invalidate the field from the cache,
                    # forcing its value to be recomputed once dependencies are
                    # up-to-date.
                    protected.update(self._field_computed.get(field, [field]))
            if fname == 'company_id' or (field.relational and field.check_company):
                check_company = True

        # protect fields being written against recomputation
        with env.protecting(protected, self):
            # determine records depending on values
            self.modified(relational_names)

            real_recs = self.filtered('id')

            # If there are only fields that do not trigger _write (e.g. only
            # determine inverse), the below ensures that `write_date` and
            # `write_uid` are updated (`test_orm.py`, `test_write_date`)
            if self._log_access and self.ids:
                towrite = env.all.towrite[self._name]
                for record in real_recs:
                    towrite[record.id]['write_uid'] = self.env.uid
                    towrite[record.id]['write_date'] = False
                self.env.cache.invalidate([
                    (self._fields['write_date'], self.ids),
                    (self._fields['write_uid'], self.ids),
                ])

            # for monetary field, their related currency field must be cached
            # before the amount so it can be rounded correctly
            for fname in sorted(vals, key=lambda x: self._fields[x].type=='monetary'):
                if fname in bad_names:
                    continue
                field = self._fields[fname]
                field.write(self, vals[fname])

            # determine records depending on new values
            #
            # Call modified after write, because the modified can trigger a
            # search which can trigger a flush which can trigger a recompute
            # which remove the field from the recompute list while all the
            # values required for the computation could not be yet in cache.
            # e.g. Write on `name` of `res.partner` trigger the recompute of
            # `display_name`, which triggers a search on child_ids to find the
            # childs to which the display_name must be recomputed, which
            # triggers the flush of `display_name` because the _order of
            # res.partner includes display_name. The computation of display_name
            # is then done too soon because the parent_id was not yet written.
            # (`test_01_website_reset_password_tour`)
            self.modified(vals)

            if self._parent_store and self._parent_name in vals:
                self.flush([self._parent_name])

            # validate non-inversed fields first
            inverse_fields = [f.name for fs in determine_inverses.values() for f in fs]
            real_recs._validate_fields(set(vals) - set(inverse_fields))

            for fields in determine_inverses.values():
                # inverse records that are not being computed
                try:
                    fields[0].determine_inverse(real_recs)
                except AccessError as e:
                    if fields[0].inherited:
                        description = self.env['ir.model']._get(self._name).name
                        raise AccessError(
                            _("%(previous_message)s\n\nImplicitly accessed through '%(document_kind)s' (%(document_model)s).") % {
                                'previous_message': e.args[0],
                                'document_kind': description,
                                'document_model': self._name,
                            }
                        )
                    raise

            # validate inversed fields
            real_recs._validate_fields(inverse_fields)

        if check_company and self._check_company_auto:
            self._check_company()
        return True

    def _write(self, vals):
        # low-level implementation of write()
        if not self:
            return True

        self._check_concurrency()
        cr = self._cr

        # determine records that require updating parent_path
        parent_records = self._parent_store_update_prepare(vals)

        # determine SQL values
        columns = []                    # list of (column_name, format, value)

        for name, val in vals.items():
            if self._log_access and name in LOG_ACCESS_COLUMNS and not val:
                continue
            field = self._fields[name]
            assert field.store

            if field.deprecated:
                _logger.warning('Field %s is deprecated: %s', field, field.deprecated)

            assert field.column_type
            columns.append((name, field.column_format, val))

        if self._log_access:
            if not vals.get('write_uid'):
                columns.append(('write_uid', '%s', self._uid))
            if not vals.get('write_date'):
                columns.append(('write_date', '%s', AsIs("(now() at time zone 'UTC')")))

        # update columns
        if columns:
            query = 'UPDATE "%s" SET %s WHERE id IN %%s' % (
                self._table, ','.join('"%s"=%s' % (column[0], column[1]) for column in columns),
            )
            params = [column[2] for column in columns]
            for sub_ids in cr.split_for_in_conditions(set(self.ids)):
                cr.execute(query, params + [sub_ids])
                if cr.rowcount != len(sub_ids):
                    raise MissingError(
                        _('One of the records you are trying to modify has already been deleted (Document type: %s).') % self._description
                        + '\n\n({} {}, {} {})'.format(_('Records:'), sub_ids[:6], _('User:'), self._uid)
                    )

        # update parent_path
        if parent_records:
            parent_records._parent_store_update()

        return True

    @api.model_create_multi
    @api.returns('self', lambda value: value.id)
    def create(self, vals_list):
        """ create(vals_list) -> records

        Creates new records for the model.

        The new records are initialized using the values from the list of dicts
        ``vals_list``, and if necessary those from :meth:`~.default_get`.

        :param list vals_list:
            values for the model's fields, as a list of dictionaries::

                [{'field_name': field_value, ...}, ...]

            For backward compatibility, ``vals_list`` may be a dictionary.
            It is treated as a singleton list ``[vals]``, and a single record
            is returned.

            see :meth:`~.write` for details

        :return: the created records
        :raise AccessError: * if user has no create rights on the requested object
                            * if user tries to bypass access rules for create on the requested object
        :raise ValidationError: if user tries to enter invalid value for a field that is not in selection
        :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
        """
        if not vals_list:
            return self.browse()

        self = self.browse()
        self.check_access_rights('create')

        bad_names = {'id', 'parent_path'}
        if self._log_access:
            # the superuser can set log_access fields while loading registry
            if not(self.env.uid == SUPERUSER_ID and not self.pool.ready):
                bad_names.update(LOG_ACCESS_COLUMNS)

        # classify fields for each record
        data_list = []
        inversed_fields = set()

        for vals in vals_list:
            # add missing defaults
            vals = self._add_missing_default_values(vals)

            # distribute fields into sets for various purposes
            data = {}
            data['stored'] = stored = {}
            data['inversed'] = inversed = {}
            data['inherited'] = inherited = defaultdict(dict)
            data['protected'] = protected = set()
            for key, val in vals.items():
                if key in bad_names:
                    continue
                field = self._fields.get(key)
                if not field:
                    raise ValueError("Invalid field %r on model %r" % (key, self._name))
                if field.company_dependent:
                    irprop_def = self.env['ir.property'].get(key, self._name)
                    cached_def = field.convert_to_cache(irprop_def, self)
                    cached_val = field.convert_to_cache(val, self)
                    if cached_val == cached_def:
                        # val is the same as the default value defined in
                        # 'ir.property'; by design, 'ir.property' will not
                        # create entries specific to these records; skipping the
                        # field inverse saves 4 SQL queries
                        continue
                if field.store:
                    stored[key] = val
                if field.inherited:
                    inherited[field.related_field.model_name][key] = val
                elif field.inverse:
                    inversed[key] = val
                    inversed_fields.add(field)
                # protect non-readonly computed fields against (re)computation
                if field.compute and not field.readonly:
                    protected.update(self._field_computed.get(field, [field]))

            data_list.append(data)

        # create or update parent records
        for model_name, parent_name in self._inherits.items():
            parent_data_list = []
            for data in data_list:
                if not data['stored'].get(parent_name):
                    parent_data_list.append(data)
                elif data['inherited'][model_name]:
                    parent = self.env[model_name].browse(data['stored'][parent_name])
                    parent.write(data['inherited'][model_name])

            if parent_data_list:
                parents = self.env[model_name].create([
                    data['inherited'][model_name]
                    for data in parent_data_list
                ])
                for parent, data in zip(parents, parent_data_list):
                    data['stored'][parent_name] = parent.id

        # create records with stored fields
        records = self._create(data_list)

        # protect fields being written against recomputation
        protected = [(data['protected'], data['record']) for data in data_list]
        with self.env.protecting(protected):
            # group fields by inverse method (to call it once), and order groups
            # by dependence (in case they depend on each other)
            field_groups = (fields for _inv, fields in groupby(inversed_fields, attrgetter('inverse')))
            for fields in field_groups:
                # determine which records to inverse for those fields
                inv_names = {field.name for field in fields}
                rec_vals = [
                    (data['record'], {
                        name: data['inversed'][name]
                        for name in inv_names
                        if name in data['inversed']
                    })
                    for data in data_list
                    if not inv_names.isdisjoint(data['inversed'])
                ]

                # If a field is not stored, its inverse method will probably
                # write on its dependencies, which will invalidate the field on
                # all records. We therefore inverse the field record by record.
                if all(field.store or field.company_dependent for field in fields):
                    batches = [rec_vals]
                else:
                    batches = [[rec_data] for rec_data in rec_vals]

                for batch in batches:
                    for record, vals in batch:
                        record._update_cache(vals)
                    batch_recs = self.concat(*(record for record, vals in batch))
                    fields[0].determine_inverse(batch_recs)

        # check Python constraints for non-stored inversed fields
        for data in data_list:
            data['record']._validate_fields(set(data['inversed']) - set(data['stored']))

        if self._check_company_auto:
            records._check_company()
        return records

    @api.model
    def _create(self, data_list):
        """ Create records from the stored field values in ``data_list``. """
        assert data_list
        cr = self.env.cr
        quote = '"{}"'.format

        # insert rows
        ids = []                        # ids of created records
        other_fields = set()            # non-column fields
        translated_fields = set()       # translated fields

        # column names, formats and values (for common fields)
        columns0 = [('id', "nextval(%s)", self._sequence)]
        if self._log_access:
            columns0.append(('create_uid', "%s", self._uid))
            columns0.append(('create_date', "%s", AsIs("(now() at time zone 'UTC')")))
            columns0.append(('write_uid', "%s", self._uid))
            columns0.append(('write_date', "%s", AsIs("(now() at time zone 'UTC')")))

        for data in data_list:
            # determine column values
            stored = data['stored']
            columns = [column for column in columns0 if column[0] not in stored]
            for name, val in sorted(stored.items()):
                field = self._fields[name]
                assert field.store

                if field.column_type:
                    col_val = field.convert_to_column(val, self, stored)
                    columns.append((name, field.column_format, col_val))
                    if field.translate is True:
                        translated_fields.add(field)
                else:
                    other_fields.add(field)

            # insert a row with the given columns
            query = "INSERT INTO {} ({}) VALUES ({}) RETURNING id".format(
                quote(self._table),
                ", ".join(quote(name) for name, fmt, val in columns),
                ", ".join(fmt for name, fmt, val in columns),
            )
            params = [val for name, fmt, val in columns]
            cr.execute(query, params)
            ids.append(cr.fetchone()[0])

        # put the new records in cache, and update inverse fields, for many2one
        #
        # cachetoclear is an optimization to avoid modified()'s cost until other_fields are processed
        cachetoclear = []
        records = self.browse(ids)
        inverses_update = defaultdict(list)     # {(field, value): ids}
        for data, record in zip(data_list, records):
            data['record'] = record
            # DLE P104: test_inherit.py, test_50_search_one2many
            vals = dict({k: v for d in data['inherited'].values() for k, v in d.items()}, **data['stored'])
            set_vals = list(vals) + LOG_ACCESS_COLUMNS + [self.CONCURRENCY_CHECK_FIELD, 'id', 'parent_path']
            for field in self._fields.values():
                if field.type in ('one2many', 'many2many'):
                    self.env.cache.set(record, field, ())
                elif field.related and not field.column_type:
                    self.env.cache.set(record, field, field.convert_to_cache(None, record))
                # DLE P123: `test_adv_activity`, `test_message_assignation_inbox`, `test_message_log`, `test_create_mail_simple`, ...
                # Set `mail.message.parent_id` to False in cache so it doesn't do the useless SELECT when computing the modified of `child_ids`
                # in other words, if `parent_id` is not set, no other message `child_ids` are impacted.
                # + avoid the fetch of fields which are False. e.g. if a boolean field is not passed in vals and as no default set in the field attributes,
                # then we know it can be set to False in the cache in the case of a create.
                elif field.name not in set_vals and not field.compute:
                    self.env.cache.set(record, field, field.convert_to_cache(None, record))
            for fname, value in vals.items():
                field = self._fields[fname]
                if field.type in ('one2many', 'many2many'):
                    cachetoclear.append((record, field))
                else:
                    cache_value = field.convert_to_cache(value, record)
                    self.env.cache.set(record, field, cache_value)
                    if field.type in ('many2one', 'many2one_reference') and record._field_inverses[field]:
                        inverses_update[(field, cache_value)].append(record.id)

        for (field, value), record_ids in inverses_update.items():
            field._update_inverses(self.browse(record_ids), value)

        # update parent_path
        records._parent_store_create()

        # protect fields being written against recomputation
        protected = [(data['protected'], data['record']) for data in data_list]
        with self.env.protecting(protected):
            # mark computed fields as todo
            records.modified(self._fields, create=True)

            if other_fields:
                # discard default values from context for other fields
                others = records.with_context(clean_context(self._context))
                for field in sorted(other_fields, key=attrgetter('_sequence')):
                    field.create([
                        (other, data['stored'][field.name])
                        for other, data in zip(others, data_list)
                        if field.name in data['stored']
                    ])

                # mark fields to recompute
                records.modified([field.name for field in other_fields], create=True)

            # if value in cache has not been updated by other_fields, remove it
            for record, field in cachetoclear:
                if self.env.cache.contains(record, field) and not self.env.cache.get(record, field):
                    self.env.cache.remove(record, field)

        # check Python constraints for stored fields
        records._validate_fields(name for data in data_list for name in data['stored'])
        records.check_access_rule('create')

        # add translations
        if self.env.lang and self.env.lang != 'en_US':
            Translations = self.env['ir.translation']
            for field in translated_fields:
                tname = "%s,%s" % (field.model_name, field.name)
                for data in data_list:
                    if field.name in data['stored']:
                        record = data['record']
                        val = data['stored'][field.name]
                        Translations._set_ids(tname, 'model', self.env.lang, record.ids, val, val)

        return records

    def _compute_field_value(self, field):
        # This is for base automation, to have something to override to catch
        # the changes of values for stored compute fields.
        if isinstance(field.compute, str):
            getattr(self, field.compute)()
        else:
            field.compute(self)

        if field.store and any(self._ids):
            # check constraints of the fields that have been computed
            fnames = [f.name for f in self._field_computed[field]]
            self.filtered('id')._validate_fields(fnames)

    def _parent_store_create(self):
        """ Set the parent_path field on ``self`` after its creation. """
        if not self._parent_store:
            return

        query = """
            UPDATE {0} node
            SET parent_path=concat((SELECT parent.parent_path FROM {0} parent
                                    WHERE parent.id=node.{1}), node.id, '/')
            WHERE node.id IN %s
        """.format(self._table, self._parent_name)
        self._cr.execute(query, [tuple(self.ids)])

    def _parent_store_update_prepare(self, vals):
        """ Return the records in ``self`` that must update their parent_path
            field. This must be called before updating the parent field.
        """
        if not self._parent_store or self._parent_name not in vals:
            return self.browse()

        # No need to recompute the values if the parent is the same.
        parent_val = vals[self._parent_name]
        if parent_val:
            query = """ SELECT id FROM {0}
                        WHERE id IN %s AND ({1} != %s OR {1} IS NULL) """
            params = [tuple(self.ids), parent_val]
        else:
            query = """ SELECT id FROM {0}
                        WHERE id IN %s AND {1} IS NOT NULL """
            params = [tuple(self.ids)]
        query = query.format(self._table, self._parent_name)
        self._cr.execute(query, params)
        return self.browse([row[0] for row in self._cr.fetchall()])

    def _parent_store_update(self):
        """ Update the parent_path field of ``self``. """
        cr = self.env.cr

        # determine new prefix of parent_path
        query = """
            SELECT parent.parent_path FROM {0} node, {0} parent
            WHERE node.id = %s AND parent.id = node.{1}
        """
        cr.execute(query.format(self._table, self._parent_name), [self.ids[0]])
        prefix = cr.fetchone()[0] if cr.rowcount else ''

        # check for recursion
        if prefix:
            parent_ids = {int(label) for label in prefix.split('/')[:-1]}
            if not parent_ids.isdisjoint(self._ids):
                raise UserError(_("Recursion Detected."))

        # update parent_path of all records and their descendants
        query = """
            UPDATE {0} child
            SET parent_path = concat(%s, substr(child.parent_path,
                    length(node.parent_path) - length(node.id || '/') + 1))
            FROM {0} node
            WHERE node.id IN %s
            AND child.parent_path LIKE concat(node.parent_path, '%%')
            RETURNING child.id
        """
        cr.execute(query.format(self._table), [prefix, tuple(self.ids)])
        modified_ids = {row[0] for row in cr.fetchall()}
        self.browse(modified_ids).modified(['parent_path'])

    def _load_records_write(self, values):
        self.write(values)

    def _load_records_create(self, values):
        return self.create(values)

    def _load_records(self, data_list, update=False):
        """ Create or update records of this model, and assign XMLIDs.

            :param data_list: list of dicts with keys `xml_id` (XMLID to
                assign), `noupdate` (flag on XMLID), `values` (field values)
            :param update: should be ``True`` when upgrading a module

            :return: the records corresponding to ``data_list``
        """
        original_self = self.browse()
        # records created during installation should not display messages
        self = self.with_context(install_mode=True)
        imd = self.env['ir.model.data'].sudo()

        # The algorithm below partitions 'data_list' into three sets: the ones
        # to create, the ones to update, and the others. For each set, we assign
        # data['record'] for each data. All those records are then retrieved for
        # the result.

        # determine existing xml_ids
        xml_ids = [data['xml_id'] for data in data_list if data.get('xml_id')]
        existing = {
            ("%s.%s" % row[1:3]): row
            for row in imd._lookup_xmlids(xml_ids, self)
        }

        # determine which records to create and update
        to_create = []                  # list of data
        to_update = []                  # list of data

        for data in data_list:
            xml_id = data.get('xml_id')
            if not xml_id:
                vals = data['values']
                if vals.get('id'):
                    data['record'] = self.browse(vals['id'])
                    to_update.append(data)
                elif not update:
                    to_create.append(data)
                continue
            row = existing.get(xml_id)
            if not row:
                to_create.append(data)
                continue
            d_id, d_module, d_name, d_model, d_res_id, d_noupdate, r_id = row
            record = self.browse(d_res_id)
            if update and d_noupdate:
                data['record'] = record
            elif r_id:
                data['record'] = record
                to_update.append(data)
            else:
                imd.browse(d_id).unlink()
                to_create.append(data)

        # update existing records
        for data in to_update:
            data['record']._load_records_write(data['values'])

        # determine existing parents for new records
        for parent_model, parent_field in self._inherits.items():
            suffix = '_' + parent_model.replace('.', '_')
            xml_ids_vals = {
                (data['xml_id'] + suffix): data['values']
                for data in to_create
                if data.get('xml_id')
            }
            for row in imd._lookup_xmlids(xml_ids_vals, self.env[parent_model]):
                d_id, d_module, d_name, d_model, d_res_id, d_noupdate, r_id = row
                if r_id:
                    xml_id = '%s.%s' % (d_module, d_name)
                    xml_ids_vals[xml_id][parent_field] = r_id
                else:
                    imd.browse(d_id).unlink()

        # check for records to create with an XMLID from another module
        module = self.env.context.get('install_module')
        if module:
            prefix = module + "."
            for data in to_create:
                if data.get('xml_id') and not data['xml_id'].startswith(prefix):
                    _logger.warning("Creating record %s in module %s.", data['xml_id'], module)

        # create records
        records = self._load_records_create([data['values'] for data in to_create])
        for data, record in zip(to_create, records):
            data['record'] = record

        # create or update XMLIDs
        if to_create or to_update:
            imd_data_list = [data for data in data_list if data.get('xml_id')]
            imd._update_xmlids(imd_data_list, update)

        return original_self.concat(*(data['record'] for data in data_list))

    # TODO: ameliorer avec NULL
    @api.model
    def _where_calc(self, domain, active_test=True):
        """Computes the WHERE clause needed to implement an OpenERP domain.
        :param domain: the domain to compute
        :type domain: list
        :param active_test: whether the default filtering of records with ``active``
                            field set to ``False`` should be applied.
        :return: the query expressing the given domain as provided in domain
        :rtype: osv.query.Query
        """
        # if the object has a field named 'active', filter out all inactive
        # records unless they were explicitely asked for
        if 'active' in self._fields and active_test and self._context.get('active_test', True):
            # the item[0] trick below works for domain items and '&'/'|'/'!'
            # operators too
            if not any(item[0] == 'active' for item in domain):
                domain = [('active', '=', 1)] + domain

        if domain:
            e = expression.expression(domain, self)
            tables = e.get_tables()
            where_clause, where_params = e.to_sql()
            where_clause = [where_clause] if where_clause else []
        else:
            where_clause, where_params, tables = [], [], ['"%s"' % self._table]

        return Query(tables, where_clause, where_params)

    def _check_qorder(self, word):
        if not regex_order.match(word):
            raise UserError(_('Invalid "order" specified. A valid "order" specification is a comma-separated list of valid field names (optionally followed by asc/desc for the direction)'))
        return True

    @api.model
    def _apply_ir_rules(self, query, mode='read'):
        """Add what's missing in ``query`` to implement all appropriate ir.rules
          (using the ``model_name``'s rules or the current model's rules if ``model_name`` is None)

           :param query: the current query object
        """
        if self.env.su:
            return

        def apply_rule(clauses, params, tables, parent_model=None):
            """ :param parent_model: name of the parent model, if the added
                    clause comes from a parent model
            """
            if clauses:
                if parent_model:
                    # as inherited rules are being applied, we need to add the
                    # missing JOIN to reach the parent table (if not JOINed yet)
                    parent_table = '"%s"' % self.env[parent_model]._table
                    parent_alias = '"%s"' % self._inherits_join_add(self, parent_model, query)
                    # inherited rules are applied on the external table, replace
                    # parent_table by parent_alias
                    clauses = [clause.replace(parent_table, parent_alias) for clause in clauses]
                    # replace parent_table by parent_alias, and introduce
                    # parent_alias if needed
                    tables = [
                        (parent_table + ' as ' + parent_alias) if table == parent_table \
                            else table.replace(parent_table, parent_alias)
                        for table in tables
                    ]
                query.where_clause += clauses
                query.where_clause_params += params
                for table in tables:
                    if table not in query.tables:
                        query.tables.append(table)

        if self._transient:
            # One single implicit access rule for transient models: owner only!
            # This is ok because we assert that TransientModels always have
            # log_access enabled, so that 'create_uid' is always there.
            domain = [('create_uid', '=', self._uid)]
            tquery = self._where_calc(domain, active_test=False)
            apply_rule(tquery.where_clause, tquery.where_clause_params, tquery.tables)
            return

        # apply main rules on the object
        Rule = self.env['ir.rule']
        where_clause, where_params, tables = Rule.domain_get(self._name, mode)
        apply_rule(where_clause, where_params, tables)

        # apply ir.rules from the parents (through _inherits)
        for parent_model in self._inherits:
            where_clause, where_params, tables = Rule.domain_get(parent_model, mode)
            apply_rule(where_clause, where_params, tables, parent_model)

    @api.model
    def _generate_translated_field(self, table_alias, field, query):
        """
        Add possibly missing JOIN with translations table to ``query`` and
        generate the expression for the translated field.

        :return: the qualified field name (or expression) to use for ``field``
        """
        if self.env.lang:
            alias, alias_statement = query.add_join(
                (table_alias, 'ir_translation', 'id', 'res_id', field),
                implicit=False,
                outer=True,
                extra='"{rhs}"."type" = \'model\' AND "{rhs}"."name" = %s AND "{rhs}"."lang" = %s AND "{rhs}"."value" != %s',
                extra_params=["%s,%s" % (self._name, field), self.env.lang, ""],
            )
            return 'COALESCE("%s"."%s", "%s"."%s")' % (alias, 'value', table_alias, field)
        else:
            return '"%s"."%s"' % (table_alias, field)

    @api.model
    def _generate_m2o_order_by(self, alias, order_field, query, reverse_direction, seen):
        """
        Add possibly missing JOIN to ``query`` and generate the ORDER BY clause for m2o fields,
        either native m2o fields or function/related fields that are stored, including
        intermediate JOINs for inheritance if required.

        :return: the qualified field name to use in an ORDER BY clause to sort by ``order_field``
        """
        field = self._fields[order_field]
        if field.inherited:
            # also add missing joins for reaching the table containing the m2o field
            qualified_field = self._inherits_join_calc(alias, order_field, query)
            alias, order_field = qualified_field.replace('"', '').split('.', 1)
            field = field.base_field

        assert field.type == 'many2one', 'Invalid field passed to _generate_m2o_order_by()'
        if not field.store:
            _logger.debug("Many2one function/related fields must be stored "
                          "to be used as ordering fields! Ignoring sorting for %s.%s",
                          self._name, order_field)
            return []

        # figure out the applicable order_by for the m2o
        dest_model = self.env[field.comodel_name]
        m2o_order = dest_model._order
        if not regex_order.match(m2o_order):
            # _order is complex, can't use it here, so we default to _rec_name
            m2o_order = dest_model._rec_name

        # Join the dest m2o table if it's not joined yet. We use [LEFT] OUTER join here
        # as we don't want to exclude results that have NULL values for the m2o
        join = (alias, dest_model._table, order_field, 'id', order_field)
        dest_alias, _ = query.add_join(join, implicit=False, outer=True)
        return dest_model._generate_order_by_inner(dest_alias, m2o_order, query,
                                                   reverse_direction, seen)

    @api.model
    def _generate_order_by_inner(self, alias, order_spec, query, reverse_direction=False, seen=None):
        if seen is None:
            seen = set()
        self._check_qorder(order_spec)

        order_by_elements = []
        for order_part in order_spec.split(','):
            order_split = order_part.strip().split(' ')
            order_field = order_split[0].strip()
            order_direction = order_split[1].strip().upper() if len(order_split) == 2 else ''
            if reverse_direction:
                order_direction = 'ASC' if order_direction == 'DESC' else 'DESC'
            do_reverse = order_direction == 'DESC'

            field = self._fields.get(order_field)
            if not field:
                raise ValueError("Invalid field %r on model %r" % (order_field, self._name))

            if order_field == 'id':
                order_by_elements.append('"%s"."%s" %s' % (alias, order_field, order_direction))
            else:
                if field.inherited:
                    field = field.base_field
                if field.store and field.type == 'many2one':
                    key = (field.model_name, field.comodel_name, order_field)
                    if key not in seen:
                        seen.add(key)
                        order_by_elements += self._generate_m2o_order_by(alias, order_field, query, do_reverse, seen)
                elif field.store and field.column_type:
                    qualifield_name = self._inherits_join_calc(alias, order_field, query, implicit=False, outer=True)
                    if field.type == 'boolean':
                        qualifield_name = "COALESCE(%s, false)" % qualifield_name
                    order_by_elements.append("%s %s" % (qualifield_name, order_direction))
                else:
                    _logger.warning("Model %r cannot be sorted on field %r (not a column)", self._name, order_field)
                    continue  # ignore non-readable or "non-joinable" fields

        return order_by_elements

    @api.model
    def _generate_order_by(self, order_spec, query):
        """
        Attempt to construct an appropriate ORDER BY clause based on order_spec, which must be
        a comma-separated list of valid field names, optionally followed by an ASC or DESC direction.

        :raise ValueError in case order_spec is malformed
        """
        order_by_clause = ''
        order_spec = order_spec or self._order
        if order_spec:
            order_by_elements = self._generate_order_by_inner(self._table, order_spec, query)
            if order_by_elements:
                order_by_clause = ",".join(order_by_elements)

        return order_by_clause and (' ORDER BY %s ' % order_by_clause) or ''

    @api.model
    def _flush_search(self, domain, fields=None, order=None):
        """ Flush all the fields appearing in `domain`, `fields` and `order`. """
        to_flush = defaultdict(set)             # {model_name: field_names}
        if fields:
            to_flush[self._name].update(fields)
        # also take into account the fields in the record rules
        domain = list(domain) + (self.env['ir.rule']._compute_domain(self._name, 'read') or [])
        for arg in domain:
            if isinstance(arg, str):
                continue
            if not isinstance(arg[0], str):
                continue
            model_name = self._name
            for fname in arg[0].split('.'):
                field = self.env[model_name]._fields.get(fname)
                if not field:
                    break
                to_flush[model_name].add(fname)
                # DLE P111: `test_message_process_email_partner_find`
                # Search on res.users with email_normalized in domain
                # must trigger the recompute and flush of res.partner.email_normalized
                if field.related_field:
                    model = self
                    # DLE P129: `test_transit_multi_companies`
                    # `self.env['stock.picking'].search([('product_id', '=', product.id)])`
                    # Should flush `stock.move.picking_ids` as `product_id` on `stock.picking` is defined as:
                    # `product_id = fields.Many2one('product.product', 'Product', related='move_lines.product_id', readonly=False)`
                    for f in field.related:
                        rfield = model._fields.get(f)
                        if rfield:
                            to_flush[model._name].add(f)
                            if rfield.type in ('many2one', 'one2many', 'many2many'):
                                model = self.env[rfield.comodel_name]
                                if rfield.type == 'one2many':
                                    to_flush[rfield.comodel_name].add(rfield.inverse_name)
                if field.comodel_name:
                    model_name = field.comodel_name
            # hierarchy operators need the parent field
            if arg[1] in ('child_of', 'parent_of'):
                model = self.env[model_name]
                if model._parent_store:
                    to_flush[model_name].add(model._parent_name)

        # flush the order fields
        order_spec = order or self._order
        for order_part in order_spec.split(','):
            order_field = order_part.split()[0]
            if order_field in self._fields:
                to_flush[self._name].add(order_field)

        if 'active' in self:
            to_flush[self._name].add('active')

        for model_name, field_names in to_flush.items():
            self.env[model_name].flush(field_names)

    @api.model
    def _search(self, args, offset=0, limit=None, order=None, count=False, access_rights_uid=None):
        """
        Private implementation of search() method, allowing specifying the uid to use for the access right check.
        This is useful for example when filling in the selection list for a drop-down and avoiding access rights errors,
        by specifying ``access_rights_uid=1`` to bypass access rights check, but not ir.rules!
        This is ok at the security level because this method is private and not callable through XML-RPC.

        :param access_rights_uid: optional user ID to use when checking access rights
                                  (not for ir.rules, this is only for ir.model.access)
        :return: a list of record ids or an integer (if count is True)
        """
        model = self.with_user(access_rights_uid) if access_rights_uid else self
        model.check_access_rights('read')

        if expression.is_false(self, args):
            # optimization: no need to query, as no record satisfies the domain
            return 0 if count else []

        # the flush must be done before the _where_calc(), as the latter can do some selects
        self._flush_search(args, order=order)

        query = self._where_calc(args)
        self._apply_ir_rules(query, 'read')
        order_by = self._generate_order_by(order, query)
        from_clause, where_clause, where_clause_params = query.get_sql()

        where_str = where_clause and (" WHERE %s" % where_clause) or ''

        if count:
            # Ignore order, limit and offset when just counting, they don't make sense and could
            # hurt performance
            query_str = 'SELECT count(1) FROM ' + from_clause + where_str
            self._cr.execute(query_str, where_clause_params)
            res = self._cr.fetchone()
            return res[0]

        limit_str = limit and ' limit %d' % limit or ''
        offset_str = offset and ' offset %d' % offset or ''
        query_str = 'SELECT "%s".id FROM ' % self._table + from_clause + where_str + order_by + limit_str + offset_str
        self._cr.execute(query_str, where_clause_params)
        res = self._cr.fetchall()

        # TDE note: with auto_join, we could have several lines about the same result
        # i.e. a lead with several unread messages; we uniquify the result using
        # a fast way to do it while preserving order (http://www.peterbe.com/plog/uniqifiers-benchmark)
        def _uniquify_list(seq):
            seen = set()
            return [x for x in seq if x not in seen and not seen.add(x)]

        return _uniquify_list([x[0] for x in res])

    @api.returns(None, lambda value: value[0])
    def copy_data(self, default=None):
        """
        Copy given record's data with all its fields values

        :param default: field values to override in the original values of the copied record
        :return: list with a dictionary containing all the field values
        """
        # In the old API, this method took a single id and return a dict. When
        # invoked with the new API, it returned a list of dicts.
        self.ensure_one()

        # avoid recursion through already copied records in case of circular relationship
        if '__copy_data_seen' not in self._context:
            self = self.with_context(__copy_data_seen=defaultdict(set))
        seen_map = self._context['__copy_data_seen']
        if self.id in seen_map[self._name]:
            return
        seen_map[self._name].add(self.id)

        default = dict(default or [])
        if 'state' not in default and 'state' in self._fields:
            field = self._fields['state']
            if field.default:
                value = field.default(self)
                value = field.convert_to_write(value, self)
                default['state'] = value

        # build a black list of fields that should not be copied
        blacklist = set(MAGIC_COLUMNS + ['parent_path'])
        whitelist = set(name for name, field in self._fields.items() if not field.inherited)

        def blacklist_given_fields(model):
            # blacklist the fields that are given by inheritance
            for parent_model, parent_field in model._inherits.items():
                blacklist.add(parent_field)
                if parent_field in default:
                    # all the fields of 'parent_model' are given by the record:
                    # default[parent_field], except the ones redefined in self
                    blacklist.update(set(self.env[parent_model]._fields) - whitelist)
                else:
                    blacklist_given_fields(self.env[parent_model])
            # blacklist deprecated fields
            for name, field in model._fields.items():
                if field.deprecated:
                    blacklist.add(name)

        blacklist_given_fields(self)

        fields_to_copy = {name: field
                          for name, field in self._fields.items()
                          if field.copy and name not in default and name not in blacklist}

        for name, field in fields_to_copy.items():
            if field.type == 'one2many':
                # duplicate following the order of the ids because we'll rely on
                # it later for copying translations in copy_translation()!
                lines = [rec.copy_data()[0] for rec in self[name].sorted(key='id')]
                # the lines are duplicated using the wrong (old) parent, but then
                # are reassigned to the correct one thanks to the (0, 0, ...)
                default[name] = [(0, 0, line) for line in lines if line]
            elif field.type == 'many2many':
                default[name] = [(6, 0, self[name].ids)]
            else:
                default[name] = field.convert_to_write(self[name], self)

        return [default]

    def copy_translations(old, new, excluded=()):
        """ Recursively copy the translations from original to new record

        :param old: the original record
        :param new: the new record (copy of the original one)
        :param excluded: a container of user-provided field names
        """
        # avoid recursion through already copied records in case of circular relationship
        if '__copy_translations_seen' not in old._context:
            old = old.with_context(__copy_translations_seen=defaultdict(set))
        seen_map = old._context['__copy_translations_seen']
        if old.id in seen_map[old._name]:
            return
        seen_map[old._name].add(old.id)

        def get_trans(field, old, new):
            """ Return the 'name' of the translations to search for, together
                with the record ids corresponding to ``old`` and ``new``.
            """
            if field.inherited:
                pname = field.related[0]
                return get_trans(field.related_field, old[pname], new[pname])
            return "%s,%s" % (field.model_name, field.name), old.id, new.id

        # removing the lang to compare untranslated values
        old_wo_lang, new_wo_lang = (old + new).with_context(lang=None)
        Translation = old.env['ir.translation']

        for name, field in old._fields.items():
            if not field.copy:
                continue

            if field.inherited and field.related[0] in excluded:
                # inherited fields that come from a user-provided parent record
                # must not copy translations, as the parent record is not a copy
                # of the old parent record
                continue

            if field.type == 'one2many' and field.name not in excluded:
                # we must recursively copy the translations for o2m; here we
                # rely on the order of the ids to match the translations as
                # foreseen in copy_data()
                old_lines = old[name].sorted(key='id')
                new_lines = new[name].sorted(key='id')
                for (old_line, new_line) in zip(old_lines, new_lines):
                    # don't pass excluded as it is not about those lines
                    old_line.copy_translations(new_line)

            elif field.translate:
                # for translatable fields we copy their translations
                trans_name, source_id, target_id = get_trans(field, old, new)
                domain = [('name', '=', trans_name), ('res_id', '=', source_id)]
                new_val = new_wo_lang[name]
                if old.env.lang and callable(field.translate):
                    # the new value *without lang* must be the old value without lang
                    new_wo_lang[name] = old_wo_lang[name]
                vals_list = []
                for vals in Translation.search_read(domain):
                    del vals['id']
                    del vals['module']      # duplicated vals is not linked to any module
                    vals['res_id'] = target_id
                    if not callable(field.translate):
                        vals['src'] = new_wo_lang[name]
                    if vals['lang'] == old.env.lang and field.translate is True:
                        # update master record if the new_val was not changed by copy override
                        if new_val == old[name]:
                            new_wo_lang[name] = old_wo_lang[name]
                            vals['src'] = old_wo_lang[name]
                        # the value should be the new value (given by copy())
                        vals['value'] = new_val
                    vals_list.append(vals)
                Translation._upsert_translations(vals_list)

    @api.returns('self', lambda value: value.id)
    def copy(self, default=None):
        """ copy(default=None)

        Duplicate record ``self`` updating it with default values

        :param dict default: dictionary of field values to override in the
               original values of the copied record, e.g: ``{'field_name': overridden_value, ...}``
        :returns: new record

        """
        self.ensure_one()
        vals = self.with_context(active_test=False).copy_data(default)[0]
        # To avoid to create a translation in the lang of the user, copy_translation will do it
        new = self.with_context(lang=None).create(vals)
        self.with_context(from_copy_translation=True).copy_translations(new, excluded=default or ())
        return new

    @api.returns('self')
    def exists(self):
        """  exists() -> records

        Returns the subset of records in ``self`` that exist, and marks deleted
        records as such in cache. It can be used as a test on records::

            if record.exists():
                ...

        By convention, new records are returned as existing.
        """
        ids, new_ids = [], []
        for i in self._ids:
            (ids if isinstance(i, int) else new_ids).append(i)
        if not ids:
            return self
        query = """SELECT id FROM "%s" WHERE id IN %%s""" % self._table
        self._cr.execute(query, [tuple(ids)])
        ids = [r[0] for r in self._cr.fetchall()]
        return self.browse(ids + new_ids)

    def _check_recursion(self, parent=None):
        """
        Verifies that there is no loop in a hierarchical structure of records,
        by following the parent relationship using the **parent** field until a
        loop is detected or until a top-level record is found.

        :param parent: optional parent field name (default: ``self._parent_name``)
        :return: **True** if no loop was found, **False** otherwise.
        """
        if not parent:
            parent = self._parent_name

        # must ignore 'active' flag, ir.rules, etc. => direct SQL query
        cr = self._cr
        self.flush([parent])
        query = 'SELECT "%s" FROM "%s" WHERE id = %%s' % (parent, self._table)
        for id in self.ids:
            current_id = id
            while current_id:
                cr.execute(query, (current_id,))
                result = cr.fetchone()
                current_id = result[0] if result else None
                if current_id == id:
                    return False
        return True

    def _check_m2m_recursion(self, field_name):
        """
        Verifies that there is no loop in a directed graph of records, by
        following a many2many relationship with the given field name.

        :param field_name: field to check
        :return: **True** if no loop was found, **False** otherwise.
        """
        field = self._fields.get(field_name)
        if not (field and field.type == 'many2many' and
                field.comodel_name == self._name and field.store):
            # field must be a many2many on itself
            raise ValueError('invalid field_name: %r' % (field_name,))

        self.flush([field_name])

        cr = self._cr
        query = 'SELECT "%s", "%s" FROM "%s" WHERE "%s" IN %%s AND "%s" IS NOT NULL' % \
                    (field.column1, field.column2, field.relation, field.column1, field.column2)

        succs = defaultdict(set)        # transitive closure of successors
        preds = defaultdict(set)        # transitive closure of predecessors
        todo, done = set(self.ids), set()
        while todo:
            # retrieve the respective successors of the nodes in 'todo'
            cr.execute(query, [tuple(todo)])
            done.update(todo)
            todo.clear()
            for id1, id2 in cr.fetchall():
                # connect id1 and its predecessors to id2 and its successors
                for x, y in itertools.product([id1] + list(preds[id1]),
                                              [id2] + list(succs[id2])):
                    if x == y:
                        return False    # we found a cycle here!
                    succs[x].add(y)
                    preds[y].add(x)
                if id2 not in done:
                    todo.add(id2)
        return True

    def _get_external_ids(self):
        """Retrieve the External ID(s) of any database record.

        **Synopsis**: ``_get_external_ids() -> { 'id': ['module.external_id'] }``

        :return: map of ids to the list of their fully qualified External IDs
                 in the form ``module.key``, or an empty list when there's no External
                 ID for a record, e.g.::

                     { 'id': ['module.ext_id', 'module.ext_id_bis'],
                       'id2': [] }
        """
        result = {record.id: [] for record in self}
        domain = [('model', '=', self._name), ('res_id', 'in', self.ids)]
        for data in self.env['ir.model.data'].sudo().search_read(domain, ['module', 'name', 'res_id'], order='id'):
            result[data['res_id']].append('%(module)s.%(name)s' % data)
        return result

    def get_external_id(self):
        """Retrieve the External ID of any database record, if there
        is one. This method works as a possible implementation
        for a function field, to be able to add it to any
        model object easily, referencing it as ``Model.get_external_id``.

        When multiple External IDs exist for a record, only one
        of them is returned (randomly).

        :return: map of ids to their fully qualified XML ID,
                 defaulting to an empty string when there's none
                 (to be usable as a function field),
                 e.g.::

                     { 'id': 'module.ext_id',
                       'id2': '' }
        """
        results = self._get_external_ids()
        return {key: val[0] if val else ''
                for key, val in results.items()}

    # backwards compatibility
    get_xml_id = get_external_id
    _get_xml_ids = _get_external_ids

    # Transience
    @classmethod
    def is_transient(cls):
        """ Return whether the model is transient.

        See :class:`TransientModel`.

        """
        return cls._transient

    def _transient_clean_rows_older_than(self, seconds):
        assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
        # Never delete rows used in last 5 minutes
        seconds = max(seconds, 300)
        query = ("SELECT id FROM " + self._table + " WHERE"
            " COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp"
            " < ((now() at time zone 'UTC') - interval %s)")
        self._cr.execute(query, ("%s seconds" % seconds,))
        ids = [x[0] for x in self._cr.fetchall()]
        self.sudo().browse(ids).unlink()

    def _transient_clean_old_rows(self, max_count):
        # Check how many rows we have in the table
        self._cr.execute("SELECT count(*) AS row_count FROM " + self._table)
        res = self._cr.fetchall()
        if res[0][0] <= max_count:
            return  # max not reached, nothing to do
        self._transient_clean_rows_older_than(300)

    @api.model
    def _transient_vacuum(self, force=False):
        """Clean the transient records.

        This unlinks old records from the transient model tables whenever the
        "_transient_max_count" or "_max_age" conditions (if any) are reached.
        Actual cleaning will happen only once every "_transient_check_time" calls.
        This means this method can be called frequently called (e.g. whenever
        a new record is created).
        Example with both max_hours and max_count active:
        Suppose max_hours = 0.2 (e.g. 12 minutes), max_count = 20, there are 55 rows in the
        table, 10 created/changed in the last 5 minutes, an additional 12 created/changed between
        5 and 10 minutes ago, the rest created/changed more then 12 minutes ago.
        - age based vacuum will leave the 22 rows created/changed in the last 12 minutes
        - count based vacuum will wipe out another 12 rows. Not just 2, otherwise each addition
          would immediately cause the maximum to be reached again.
        - the 10 rows that have been created/changed the last 5 minutes will NOT be deleted
        """
        assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
        _transient_check_time = 20          # arbitrary limit on vacuum executions
        cls = type(self)
        cls._transient_check_count += 1
        if not force and (cls._transient_check_count < _transient_check_time):
            return True  # no vacuum cleaning this time
        cls._transient_check_count = 0

        # Age-based expiration
        if self._transient_max_hours:
            self._transient_clean_rows_older_than(self._transient_max_hours * 60 * 60)

        # Count-based expiration
        if self._transient_max_count:
            self._transient_clean_old_rows(self._transient_max_count)

        return True

    @api.model
    def resolve_2many_commands(self, field_name, commands, fields=None):
        """ Serializes one2many and many2many commands into record dictionaries
            (as if all the records came from the database via a read()).  This
            method is aimed at onchange methods on one2many and many2many fields.

            Because commands might be creation commands, not all record dicts
            will contain an ``id`` field.  Commands matching an existing record
            will have an ``id``.

            :param field_name: name of the one2many or many2many field matching the commands
            :type field_name: str
            :param commands: one2many or many2many commands to execute on ``field_name``
            :type commands: list((int|False, int|False, dict|False))
            :param fields: list of fields to read from the database, when applicable
            :type fields: list(str)
            :returns: records in a shape similar to that returned by ``read()``
                (except records may be missing the ``id`` field if they don't exist in db)
            :rtype: list(dict)
        """
        result = []                     # result (list of dict)
        record_ids = []                 # ids of records to read
        updates = defaultdict(dict)     # {id: vals} of updates on records

        for command in commands or []:
            if not isinstance(command, (list, tuple)):
                record_ids.append(command)
            elif command[0] == 0:
                result.append(command[2])
            elif command[0] == 1:
                record_ids.append(command[1])
                updates[command[1]].update(command[2])
            elif command[0] in (2, 3):
                record_ids = [id for id in record_ids if id != command[1]]
            elif command[0] == 4:
                record_ids.append(command[1])
            elif command[0] == 5:
                result, record_ids = [], []
            elif command[0] == 6:
                result, record_ids = [], list(command[2])

        # read the records and apply the updates
        field = self._fields[field_name]
        records = self.env[field.comodel_name].browse(record_ids)
        for data in records.read(fields):
            data.update(updates.get(data['id'], {}))
            result.append(data)

        return result

    # for backward compatibility
    resolve_o2m_commands_to_record_dicts = resolve_2many_commands

    @api.model
    def search_read(self, domain=None, fields=None, offset=0, limit=None, order=None):
        """Perform a :meth:`search` followed by a :meth:`read`.

        :param domain: Search domain, see ``args`` parameter in :meth:`search`.
            Defaults to an empty domain that will match all records.
        :param fields: List of fields to read, see ``fields`` parameter in :meth:`read`.
            Defaults to all fields.
        :param int offset: Number of records to skip, see ``offset`` parameter in :meth:`search`.
            Defaults to 0.
        :param int limit: Maximum number of records to return, see ``limit`` parameter in :meth:`search`.
            Defaults to no limit.
        :param order: Columns to sort result, see ``order`` parameter in :meth:`search`.
            Defaults to no sort.
        :return: List of dictionaries containing the asked fields.
        :rtype: list(dict).
        """
        records = self.search(domain or [], offset=offset, limit=limit, order=order)
        if not records:
            return []

        if fields and fields == ['id']:
            # shortcut read if we only want the ids
            return [{'id': record.id} for record in records]

        # read() ignores active_test, but it would forward it to any downstream search call
        # (e.g. for x2m or function fields), and this is not the desired behavior, the flag
        # was presumably only meant for the main search().
        # TODO: Move this to read() directly?
        if 'active_test' in self._context:
            context = dict(self._context)
            del context['active_test']
            records = records.with_context(context)

        result = records.read(fields)
        if len(result) <= 1:
            return result

        # reorder read
        index = {vals['id']: vals for vals in result}
        return [index[record.id] for record in records if record.id in index]

    def toggle_active(self):
        """ Inverse the value of the field ``active`` on the records in ``self``. """
        for record in self:
            record.active = not record.active

    def action_archive(self):
        """
            Set active=False on a recordset, by calling toggle_active to take the
            corresponding actions according to the model
        """
        return self.filtered(lambda record: record.active).toggle_active()

    def action_unarchive(self):
        """
            Set active=True on a recordset, by calling toggle_active to take the
            corresponding actions according to the model
        """
        return self.filtered(lambda record: not record.active).toggle_active()

    def _register_hook(self):
        """ stuff to do right after the registry is built """
        pass

    @classmethod
    def _patch_method(cls, name, method):
        """ Monkey-patch a method for all instances of this model. This replaces
            the method called ``name`` by ``method`` in the given class.
            The original method is then accessible via ``method.origin``, and it
            can be restored with :meth:`~._revert_method`.

            Example::

                def do_write(self, values):
                    # do stuff, and call the original method
                    return do_write.origin(self, values)

                # patch method write of model
                model._patch_method('write', do_write)

                # this will call do_write
                records = model.search([...])
                records.write(...)

                # restore the original method
                model._revert_method('write')
        """
        origin = getattr(cls, name)
        method.origin = origin
        # propagate decorators from origin to method, and apply api decorator
        wrapped = api.propagate(origin, method)
        wrapped.origin = origin
        setattr(cls, name, wrapped)

    @classmethod
    def _revert_method(cls, name):
        """ Revert the original method called ``name`` in the given class.
            See :meth:`~._patch_method`.
        """
        method = getattr(cls, name)
        setattr(cls, name, method.origin)

    #
    # Instance creation
    #
    # An instance represents an ordered collection of records in a given
    # execution environment. The instance object refers to the environment, and
    # the records themselves are represented by their cache dictionary. The 'id'
    # of each record is found in its corresponding cache dictionary.
    #
    # This design has the following advantages:
    #  - cache access is direct and thus fast;
    #  - one can consider records without an 'id' (see new records);
    #  - the global cache is only an index to "resolve" a record 'id'.
    #

    @classmethod
    def _browse(cls, env, ids, prefetch_ids):
        """ Create a recordset instance.

        :param env: an environment
        :param ids: a tuple of record ids
        :param prefetch_ids: a collection of record ids (for prefetching)
        """
        records = object.__new__(cls)
        records.env = env
        records._ids = ids
        records._prefetch_ids = prefetch_ids
        return records

    def browse(self, ids=None):
        """ browse([ids]) -> records

        Returns a recordset for the ids provided as parameter in the current
        environment.

        .. code-block:: python

            self.browse([7, 18, 12])
            res.partner(7, 18, 12)

        :param ids: id(s)
        :type ids: int or list(int) or None
        :return: recordset
        """
        if not ids:
            ids = ()
        elif ids.__class__ in IdType:
            ids = (ids,)
        else:
            ids = tuple(ids)
        return self._browse(self.env, ids, ids)

    #
    # Internal properties, for manipulating the instance's implementation
    #

    @property
    def ids(self):
        """ Return the list of actual record ids corresponding to ``self``. """
        return list(origin_ids(self._ids))

    # backward-compatibility with former browse records
    _cr = property(lambda self: self.env.cr)
    _uid = property(lambda self: self.env.uid)
    _context = property(lambda self: self.env.context)

    #
    # Conversion methods
    #

    def ensure_one(self):
        """Verify that the current recorset holds a single record.

        :raise odoo.exceptions.ValueError: ``len(self) != 1``
        """
        try:
            # unpack to ensure there is only one value is faster than len when true and
            # has a significant impact as this check is largely called
            _id, = self._ids
            return self
        except ValueError:
            raise ValueError("Expected singleton: %s" % self)

    def with_env(self, env):
        """Return a new version of this recordset attached to the provided environment.

        :param env:
        :type env: :class:`~odoo.api.Environment`

        .. warning::
            The new environment will not benefit from the current
            environment's data cache, so later data access may incur extra
            delays while re-fetching from the database.
            The returned recordset has the same prefetch object as ``self``.
        """
        return self._browse(env, self._ids, self._prefetch_ids)

    def sudo(self, flag=True):
        """ sudo([flag=True])

        Returns a new version of this recordset with superuser mode enabled or
        disabled, depending on `flag`. The superuser mode does not change the
        current user, and simply bypasses access rights checks.

        .. warning::

            Using ``sudo`` could cause data access to cross the
            boundaries of record rules, possibly mixing records that
            are meant to be isolated (e.g. records from different
            companies in multi-company environments).

            It may lead to un-intuitive results in methods which select one
            record among many - for example getting the default company, or
            selecting a Bill of Materials.

        .. note::

            Because the record rules and access control will have to be
            re-evaluated, the new recordset will not benefit from the current
            environment's data cache, so later data access may incur extra
            delays while re-fetching from the database.
            The returned recordset has the same prefetch object as ``self``.

        """
        if not isinstance(flag, bool):
            _logger.warning("deprecated use of sudo(user), use with_user(user) instead", stack_info=True)
            return self.with_user(flag)
        return self.with_env(self.env(su=flag))

    def with_user(self, user):
        """ with_user(user)

        Return a new version of this recordset attached to the given user, in
        non-superuser mode, unless `user` is the superuser (by convention, the
        superuser is always in superuser mode.)
        """
        return self.with_env(self.env(user=user, su=False))

    def with_context(self, *args, **kwargs):
        """ with_context([context][, **overrides]) -> records

        Returns a new version of this recordset attached to an extended
        context.

        The extended context is either the provided ``context`` in which
        ``overrides`` are merged or the *current* context in which
        ``overrides`` are merged e.g.::

            # current context is {'key1': True}
            r2 = records.with_context({}, key2=True)
            # -> r2._context is {'key2': True}
            r2 = records.with_context(key2=True)
            # -> r2._context is {'key1': True, 'key2': True}

        .. note:

            The returned recordset has the same prefetch object as ``self``.
        """
        if args and 'allowed_company_ids' not in args[0] and 'allowed_company_ids' in self._context:
            args[0]['allowed_company_ids'] = self._context.get('allowed_company_ids') 
        context = dict(args[0] if args else self._context, **kwargs)
        return self.with_env(self.env(context=context))

    def with_prefetch(self, prefetch_ids=None):
        """ with_prefetch([prefetch_ids]) -> records

        Return a new version of this recordset that uses the given prefetch ids,
        or ``self``'s ids if not given.
        """
        if prefetch_ids is None:
            prefetch_ids = self._ids
        return self._browse(self.env, self._ids, prefetch_ids)

    def _update_cache(self, values, validate=True):
        """ Update the cache of ``self`` with ``values``.

            :param values: dict of field values, in any format.
            :param validate: whether values must be checked
        """
        def is_monetary(pair):
            return pair[0].type == 'monetary'

        self.ensure_one()
        cache = self.env.cache
        fields = self._fields
        try:
            field_values = [(fields[name], value) for name, value in values.items()]
        except KeyError as e:
            raise ValueError("Invalid field %r on model %r" % (e.args[0], self._name))

        # convert monetary fields last in order to ensure proper rounding
        for field, value in sorted(field_values, key=is_monetary):
            cache.set(self, field, field.convert_to_cache(value, self, validate))

            # set inverse fields on new records in the comodel
            if field.relational:
                inv_recs = self[field.name].filtered(lambda r: not r.id)
                if not inv_recs:
                    continue
                for invf in self._field_inverses[field]:
                    # DLE P98: `test_40_new_fields`
                    # /home/dle/src/odoo/master-nochange-fp/odoo/addons/test_new_api/tests/test_new_fields.py
                    # Be careful to not break `test_onchange_taxes_1`, `test_onchange_taxes_2`, `test_onchange_taxes_3`
                    # If you attempt to find a better solution
                    for inv_rec in inv_recs:
                        if not cache.contains(inv_rec, invf):
                            val = invf.convert_to_cache(self, inv_rec, validate=False)
                            cache.set(inv_rec, invf, val)
                        else:
                            invf._update(inv_rec, self)

    def _convert_to_record(self, values):
        """ Convert the ``values`` dictionary from the cache format to the
        record format.
        """
        return {
            name: self._fields[name].convert_to_record(value, self)
            for name, value in values.items()
        }

    def _convert_to_write(self, values):
        """ Convert the ``values`` dictionary into the format of :meth:`write`. """
        fields = self._fields
        result = {}
        for name, value in values.items():
            if name in fields:
                field = fields[name]
                value = field.convert_to_write(value, self)
                if not isinstance(value, NewId):
                    result[name] = value
        return result

    #
    # Record traversal and update
    #

    def _mapped_func(self, func):
        """ Apply function ``func`` on all records in ``self``, and return the
            result as a list or a recordset (if ``func`` returns recordsets).
        """
        if self:
            vals = [func(rec) for rec in self]
            if isinstance(vals[0], BaseModel):
                return vals[0].union(*vals)         # union of all recordsets
            return vals
        else:
            vals = func(self)
            return vals if isinstance(vals, BaseModel) else []

    def mapped(self, func):
        """Apply ``func`` on all records in ``self``, and return the result as a
        list or a recordset (if ``func`` return recordsets). In the latter
        case, the order of the returned recordset is arbitrary.

        :param func: a function or a dot-separated sequence of field names
        :type func: callable or str
        :return: self if func is falsy, result of func applied to all ``self`` records.
        :rtype: list or recordset

        .. code-block:: python3

            # returns a list of summing two fields for each record in the set
            records.mapped(lambda r: r.field1 + r.field2)

        The provided function can be a string to get field values:

        .. code-block:: python3

            # returns a list of names
            records.mapped('name')

            # returns a recordset of partners
            record.mapped('partner_id')

            # returns the union of all partner banks, with duplicates removed
            record.mapped('partner_id.bank_ids')
        """
        if not func:
            return self                 # support for an empty path of fields
        if isinstance(func, str):
            recs = self
            for name in func.split('.'):
                recs = recs._mapped_func(operator.itemgetter(name))
            return recs
        else:
            return self._mapped_func(func)

    def _mapped_cache(self, name_seq):
        """ Same as `~.mapped`, but ``name_seq`` is a dot-separated sequence of
            field names, and only cached values are used.
        """
        recs = self
        for name in name_seq.split('.'):
            field = recs._fields[name]
            null = field.convert_to_cache(False, self, validate=False)
            if recs:
                recs = recs.mapped(lambda rec: field.convert_to_record(rec._cache.get(name, null), rec))
            else:
                recs = field.convert_to_record(null, recs)
        return recs

    def filtered(self, func):
        """Return the records in ``self`` satisfying ``func``.

        :param func: a function or a dot-separated sequence of field names
        :type func: callable or str
        :return: recordset of records satisfying func, may be empty.

        .. code-block:: python3

            # only keep records whose company is the current user's
            records.filtered(lambda r: r.company_id == user.company_id)

            # only keep records whose partner is a company
            records.filtered("partner_id.is_company")
        """
        if isinstance(func, str):
            name = func
            func = lambda rec: any(rec.mapped(name))
        return self.browse([rec.id for rec in self if func(rec)])

    def filtered_domain(self, domain):
        if not domain: return self
        result = []
        for d in reversed(domain):
            if d == '|':
                result.append(result.pop() | result.pop())
            elif d == '!':
                result.append(self - result.pop())
            elif d == '&':
                result.append(result.pop() & result.pop())
            elif d == expression.TRUE_LEAF:
                result.append(self)
            elif d == expression.FALSE_LEAF:
                result.append(self.browse())
            else:
                (key, comparator, value) = d
                if key.endswith('.id'):
                    key = key[:-3]
                if key == 'id':
                    key = ''
                # determine the field with the final type for values
                field = None
                if key:
                    model = self.browse()
                    for fname in key.split('.'):
                        field = model._fields[fname]
                        model = model[fname]
                if comparator in ('like', 'ilike', '=like', '=ilike', 'not ilike', 'not like'):
                    value_esc = value.replace('_', '?').replace('%', '*').replace('[', '?')
                records = self.browse()
                for rec in self:
                    data = rec.mapped(key)
                    if comparator in ('child_of', 'parent_of'):
                        value = data.search([(data._parent_name, comparator, value)]).ids
                        comparator = 'in'
                    if isinstance(data, BaseModel):
                        v = value
                        if (isinstance(value, list) or isinstance(value, tuple)) and len(value):
                            v = value[0]
                        if isinstance(v, str):
                            data = data.mapped('display_name')
                        else:
                            data = data and data.ids or [False]
                    elif field and field.type in ('date', 'datetime'):
                        # convert all date and datetime values to datetime
                        normalize = Datetime.to_datetime
                        if isinstance(value, (list, tuple)):
                            value = [normalize(v) for v in value]
                        else:
                            value = normalize(value)
                        data = [normalize(d) for d in data]
                    if comparator in ('in', 'not in'):
                        if not (isinstance(value, list) or isinstance(value, tuple)):
                            value = [value]

                    if comparator == '=':
                        ok = value in data
                    elif comparator == 'in':
                        ok = any(map(lambda x: x in data, value))
                    elif comparator == '<':
                        ok = any(map(lambda x: x is not None and x < value, data))
                    elif comparator == '>':
                        ok = any(map(lambda x: x is not None and x > value, data))
                    elif comparator == '<=':
                        ok = any(map(lambda x: x is not None and x <= value, data))
                    elif comparator == '>=':
                        ok = any(map(lambda x: x is not None and x >= value, data))
                    elif comparator in ('!=', '<>'):
                        ok = value not in data
                    elif comparator == 'not in':
                        ok = all(map(lambda x: x not in data, value))
                    elif comparator == 'not ilike':
                        ok = all(map(lambda x: value.lower() not in x.lower(), data))
                    elif comparator == 'ilike':
                        data = [x.lower() for x in data]
                        ok = bool(fnmatch.filter(data, '*'+(value_esc or '').lower()+'*'))
                    elif comparator == 'not like':
                        ok = all(map(lambda x: value not in x, data))
                    elif comparator == 'like':
                        ok = bool(fnmatch.filter(data, value and '*'+value_esc+'*'))
                    elif comparator == '=?':
                        ok = (value in data) or not value
                    elif comparator in ('=like'):
                        ok = bool(fnmatch.filter(data, value_esc))
                    elif comparator in ('=ilike'):
                        data = [x.lower() for x in data]
                        ok = bool(fnmatch.filter(data, value and value_esc.lower()))
                    else:
                        raise ValueError
                    if ok: records |= rec
                result.append(records)
        while len(result)>1:
            result.append(result.pop() & result.pop())
        return result[0]


    def sorted(self, key=None, reverse=False):
        """Return the recordset ``self`` ordered by ``key``.

        :param key: either a function of one argument that returns a
            comparison key for each record, or a field name, or ``None``, in
            which case records are ordered according the default model's order
        :type key: callable or str or None
        :param bool reverse: if ``True``, return the result in reverse order

        .. code-block:: python3

            # sort records by name
            records.sorted(key=lambda r: r.name)
        """
        if key is None:
            recs = self.search([('id', 'in', self.ids)])
            return self.browse(reversed(recs._ids)) if reverse else recs
        if isinstance(key, str):
            key = itemgetter(key)
        return self.browse(item.id for item in sorted(self, key=key, reverse=reverse))

    def update(self, values):
        """ Update the records in ``self`` with ``values``. """
        for record in self:
            for name, value in values.items():
                record[name] = value

    @api.model
    def flush(self, fnames=None, records=None):
        """ Process all the pending recomputations (or at least the given field
            names `fnames` if present) and flush the pending updates to the
            database.
        """
        def process(model, id_vals):
            # group record ids by vals, to update in batch when possible
            updates = defaultdict(list)
            for rid, vals in id_vals.items():
                updates[frozendict(vals)].append(rid)

            for vals, ids in updates.items():
                recs = model.browse(ids)
                try:
                    recs._write(vals)
                except MissingError:
                    recs.exists()._write(vals)

        if fnames is None:
            # flush everything
            self.recompute()
            while self.env.all.towrite:
                model_name, id_vals = self.env.all.towrite.popitem()
                process(self.env[model_name], id_vals)
        else:
            # flush self's model if any of the fields must be flushed
            self.recompute(fnames, records=records)

            # check whether any of 'records' must be flushed
            if records is not None:
                fnames = set(fnames)
                towrite = self.env.all.towrite.get(self._name)
                if not towrite or all(
                    fnames.isdisjoint(towrite.get(record.id, ()))
                    for record in records
                ):
                    return

            # DLE P76: test_onchange_one2many_with_domain_on_related_field
            # ```
            # email.important = True
            # self.assertIn(email, discussion.important_emails)
            # ```
            # When a search on a field coming from a related occurs (the domain
            # on discussion.important_emails field), make sure the related field
            # is flushed
            model_fields = {}
            for fname in fnames:
                field = self._fields[fname]
                model_fields.setdefault(field.model_name, []).append(field)
                if field.related_field:
                    model_fields.setdefault(field.related_field.model_name, []).append(field.related_field)
            for model_name, fields in model_fields.items():
                if any(
                    field.name in vals
                    for vals in self.env.all.towrite.get(model_name, {}).values()
                    for field in fields
                ):
                    id_vals = self.env.all.towrite.pop(model_name)
                    process(self.env[model_name], id_vals)

    #
    # New records - represent records that do not exist in the database yet;
    # they are used to perform onchanges.
    #

    @api.model
    def new(self, values={}, origin=None, ref=None):
        """ new([values], [origin], [ref]) -> record

        Return a new record instance attached to the current environment and
        initialized with the provided ``value``. The record is *not* created
        in database, it only exists in memory.

        One can pass an ``origin`` record, which is the actual record behind the
        result. It is retrieved as ``record._origin``. Two new records with the
        same origin record are considered equal.

        One can also pass a ``ref`` value to identify the record among other new
        records. The reference is encapsulated in the ``id`` of the record.
        """
        if origin is not None:
            origin = origin.id
        record = self.browse([NewId(origin, ref)])
        record._update_cache(values, validate=False)

        return record

    @property
    def _origin(self):
        """ Return the actual records corresponding to ``self``. """
        ids = tuple(origin_ids(self._ids))
        prefetch_ids = IterableGenerator(origin_ids, self._prefetch_ids)
        return self._browse(self.env, ids, prefetch_ids)

    #
    # "Dunder" methods
    #

    def __bool__(self):
        """ Test whether ``self`` is nonempty. """
        return bool(getattr(self, '_ids', True))
    __nonzero__ = __bool__

    def __len__(self):
        """ Return the size of ``self``. """
        return len(self._ids)

    def __iter__(self):
        """ Return an iterator over ``self``. """
        for id in self._ids:
            yield self._browse(self.env, (id,), self._prefetch_ids)

    def __contains__(self, item):
        """ Test whether ``item`` (record or field name) is an element of ``self``.
            In the first case, the test is fully equivalent to::

                any(item == record for record in self)
        """
        if isinstance(item, BaseModel) and self._name == item._name:
            return len(item) == 1 and item.id in self._ids
        elif isinstance(item, str):
            return item in self._fields
        else:
            raise TypeError("Mixing apples and oranges: %s in %s" % (item, self))

    def __add__(self, other):
        """ Return the concatenation of two recordsets. """
        return self.concat(other)

    def concat(self, *args):
        """ Return the concatenation of ``self`` with all the arguments (in
            linear time complexity).
        """
        ids = list(self._ids)
        for arg in args:
            if not (isinstance(arg, BaseModel) and arg._name == self._name):
                raise TypeError("Mixing apples and oranges: %s.concat(%s)" % (self, arg))
            ids.extend(arg._ids)
        return self.browse(ids)

    def __sub__(self, other):
        """ Return the recordset of all the records in ``self`` that are not in
            ``other``. Note that recordset order is preserved.
        """
        if not isinstance(other, BaseModel) or self._name != other._name:
            raise TypeError("Mixing apples and oranges: %s - %s" % (self, other))
        other_ids = set(other._ids)
        return self.browse([id for id in self._ids if id not in other_ids])

    def __and__(self, other):
        """ Return the intersection of two recordsets.
            Note that first occurrence order is preserved.
        """
        if not isinstance(other, BaseModel) or self._name != other._name:
            raise TypeError("Mixing apples and oranges: %s & %s" % (self, other))
        other_ids = set(other._ids)
        return self.browse(OrderedSet(id for id in self._ids if id in other_ids))

    def __or__(self, other):
        """ Return the union of two recordsets.
            Note that first occurrence order is preserved.
        """
        return self.union(other)

    def union(self, *args):
        """ Return the union of ``self`` with all the arguments (in linear time
            complexity, with first occurrence order preserved).
        """
        ids = list(self._ids)
        for arg in args:
            if not (isinstance(arg, BaseModel) and arg._name == self._name):
                raise TypeError("Mixing apples and oranges: %s.union(%s)" % (self, arg))
            ids.extend(arg._ids)
        return self.browse(OrderedSet(ids))

    def __eq__(self, other):
        """ Test whether two recordsets are equivalent (up to reordering). """
        if not isinstance(other, BaseModel):
            if other:
                filename, lineno = frame_codeinfo(currentframe(), 1)
                _logger.warning("Comparing apples and oranges: %r == %r (%s:%s)",
                                self, other, filename, lineno)
            return NotImplemented
        return self._name == other._name and set(self._ids) == set(other._ids)

    def __lt__(self, other):
        if not isinstance(other, BaseModel) or self._name != other._name:
            return NotImplemented
        return set(self._ids) < set(other._ids)

    def __le__(self, other):
        if not isinstance(other, BaseModel) or self._name != other._name:
            return NotImplemented
        # these are much cheaper checks than a proper subset check, so
        # optimise for checking if a null or singleton are subsets of a
        # recordset
        if not self or self in other:
            return True
        return set(self._ids) <= set(other._ids)

    def __gt__(self, other):
        if not isinstance(other, BaseModel) or self._name != other._name:
            return NotImplemented
        return set(self._ids) > set(other._ids)

    def __ge__(self, other):
        if not isinstance(other, BaseModel) or self._name != other._name:
            return NotImplemented
        if not other or other in self:
            return True
        return set(self._ids) >= set(other._ids)

    def __int__(self):
        return self.id

    def __str__(self):
        return "%s%s" % (self._name, getattr(self, '_ids', ""))
    def __repr__(self):
        return str(self)

    def __hash__(self):
        if hasattr(self, '_ids'):
            return hash((self._name, frozenset(self._ids)))
        else:
            return hash(self._name)

    def __getitem__(self, key):
        """ If ``key`` is an integer or a slice, return the corresponding record
            selection as an instance (attached to ``self.env``).
            Otherwise read the field ``key`` of the first record in ``self``.

            Examples::

                inst = model.search(dom)    # inst is a recordset
                r4 = inst[3]                # fourth record in inst
                rs = inst[10:20]            # subset of inst
                nm = rs['name']             # name of first record in inst
        """
        if isinstance(key, str):
            # important: one must call the field's getter
            return self._fields[key].__get__(self, type(self))
        elif isinstance(key, slice):
            return self.browse(self._ids[key])
        else:
            return self.browse((self._ids[key],))

    def __setitem__(self, key, value):
        """ Assign the field ``key`` to ``value`` in record ``self``. """
        # important: one must call the field's setter
        return self._fields[key].__set__(self, value)

    #
    # Cache and recomputation management
    #

    @lazy_property
    def _cache(self):
        """ Return the cache of ``self``, mapping field names to values. """
        return RecordCache(self)

    @api.model
    def _in_cache_without(self, field, limit=PREFETCH_MAX):
        """ Return records to prefetch that have no value in cache for ``field``
            (:class:`Field` instance), including ``self``.
            Return at most ``limit`` records.
        """
        recs = self.browse(self._prefetch_ids)
        ids = [self.id]
        for record_id in self.env.cache.get_missing_ids(recs - self, field):
            if not record_id:
                # Do not prefetch `NewId`
                continue
            ids.append(record_id)
            if limit and limit <= len(ids):
                break
        return self.browse(ids)

    @api.model
    def refresh(self):
        """ Clear the records cache.

            .. deprecated:: 8.0
                The record cache is automatically invalidated.
        """
        self.invalidate_cache()

    @api.model
    def invalidate_cache(self, fnames=None, ids=None):
        """ Invalidate the record caches after some records have been modified.
            If both ``fnames`` and ``ids`` are ``None``, the whole cache is cleared.

            :param fnames: the list of modified fields, or ``None`` for all fields
            :param ids: the list of modified record ids, or ``None`` for all
        """
        if fnames is None:
            if ids is None:
                return self.env.cache.invalidate()
            fields = list(self._fields.values())
        else:
            fields = [self._fields[n] for n in fnames]

        # invalidate fields and inverse fields, too
        spec = [(f, ids) for f in fields] + \
               [(invf, None) for f in fields for invf in self._field_inverses[f]]
        self.env.cache.invalidate(spec)

    def modified(self, fnames, create=False):
        """ Notify that fields have been modified on ``self``. This invalidates
            the cache, and prepares the recomputation of stored function fields
            (new-style fields only).

            :param fnames: iterable of field names that have been modified on
                records ``self``
            :param create: whether modified is called in the context of record creation
        """
        if not self or not fnames:
            return
        if len(fnames) == 1:
            tree = self._field_triggers.get(self._fields[next(iter(fnames))])
        else:
            # merge dependency trees to evaluate all triggers at once
            tree = {}
            for fname in fnames:
                node = self._field_triggers.get(self._fields[fname])
                if node:
                    trigger_tree_merge(tree, node)
        if tree:
            self.sudo().with_context(active_test=False)._modified_triggers(tree, create)

    def _modified_triggers(self, tree, create=False):
        """ Process a tree of field triggers on ``self``. """
        if not self:
            return
        for key, val in tree.items():
            if key is None:
                # val is a list of fields to mark as todo
                for field in val:
                    records = self - self.env.protected(field)
                    if not records:
                        continue
                    # Dont force the recomputation of compute fields which are
                    # not stored as this is not really necessary.
                    recursive = not create and field.recursive
                    if field.compute and field.store:
                        if recursive:
                            added = self.env.not_to_compute(field, records)
                        self.env.add_to_compute(field, records)
                    else:
                        if recursive:
                            added = self & self.env.cache.get_records(self, field)
                        self.env.cache.invalidate([(field, records._ids)])
                    # recursively trigger recomputation of field's dependents
                    if recursive:
                        added.modified([field.name])
            elif create and key.type in ('many2one', 'many2one_reference'):
                # upon creation, no other record has a reference to self
                continue
            else:
                # val is another tree of dependencies
                model = self.env[key.model_name]
                for invf in model._field_inverses[key]:
                    # use an inverse of field without domain
                    if not (invf.type in ('one2many', 'many2many') and invf.domain):
                        if invf.type == 'many2one_reference':
                            rec_ids = set()
                            for rec in self:
                                try:
                                    if rec[invf.model_field] == key.model_name:
                                        rec_ids.add(rec[invf.name])
                                except MissingError:
                                    continue
                            records = model.browse(rec_ids)
                        else:
                            try:
                                records = self[invf.name]
                            except MissingError:
                                records = self.exists()[invf.name]

                        # TODO: find a better fix
                        if key.model_name == records._name:
                            break
                else:
                    new_records = self.filtered(lambda r: not r.id)
                    real_records = self - new_records
                    records = model.browse()
                    if real_records:
                        records |= model.search([(key.name, 'in', real_records.ids)], order='id')
                    if new_records:
                        cache_records = self.env.cache.get_records(model, key)
                        records |= cache_records.filtered(lambda r: set(r[key.name]._ids) & set(self._ids))
                records._modified_triggers(val)

    @api.model
    def recompute(self, fnames=None, records=None):
        """ Recompute all function fields (or the given ``fnames`` if present).
            The fields and records to recompute have been determined by method
            :meth:`modified`.
        """
        def process(field):
            recs = self.env.records_to_compute(field)
            if not recs:
                return
            if field.compute and field.store:
                try:
                    recs.mapped(field.name)
                except MissingError:
                    existing = recs.exists()
                    existing.mapped(field.name)
                    # mark the field as computed on missing records, otherwise
                    # they remain forever in the todo list, and lead to an
                    # infinite loop...
                    self.env.remove_to_compute(field, recs - existing)
            else:
                self.env.cache.invalidate([(field, recs._ids)])
                self.env.remove_to_compute(field, recs)

        if fnames is None:
            # recompute everything
            fields_to_compute = self.env.fields_to_compute()
            while fields_to_compute:
                process(next(iter(fields_to_compute)))
        else:
            fields = [self._fields[fname] for fname in fnames]

            # check whether any 'records' must be computed
            if records is not None and not any(
                records & self.env.records_to_compute(field)
                for field in fields
            ):
                return

            # recompute the given fields on self's model
            for field in fields:
                process(field)

    #
    # Generic onchange method
    #

    def _dependent_fields(self, field):
        """ Return an iterator on the fields that depend on ``field``. """
        def traverse(node):
            for key, val in node.items():
                if key is None:
                    yield from val
                else:
                    yield from traverse(val)
        return traverse(self._field_triggers.get(field, {}))

    def _has_onchange(self, field, other_fields):
        """ Return whether ``field`` should trigger an onchange event in the
            presence of ``other_fields``.
        """
        return (field.name in self._onchange_methods) or any(
            dep in other_fields for dep in self._dependent_fields(field)
        )

    @api.model
    def _onchange_spec(self, view_info=None):
        """ Return the onchange spec from a view description; if not given, the
            result of ``self.fields_view_get()`` is used.
        """
        result = {}

        # for traversing the XML arch and populating result
        def process(node, info, prefix):
            if node.tag == 'field':
                name = node.attrib['name']
                names = "%s.%s" % (prefix, name) if prefix else name
                if not result.get(names):
                    result[names] = node.attrib.get('on_change')
                # traverse the subviews included in relational fields
                for subinfo in info['fields'][name].get('views', {}).values():
                    process(etree.fromstring(subinfo['arch']), subinfo, names)
            else:
                for child in node:
                    process(child, info, prefix)

        if view_info is None:
            view_info = self.fields_view_get()
        process(etree.fromstring(view_info['arch']), view_info, '')
        return result

    def _onchange_eval(self, field_name, onchange, result):
        """ Apply onchange method(s) for field ``field_name`` with spec ``onchange``
            on record ``self``. Value assignments are applied on ``self``, while
            domain and warning messages are put in dictionary ``result``.
        """
        onchange = onchange.strip()

        def process(res):
            if not res:
                return
            if res.get('value'):
                res['value'].pop('id', None)
                self.update({key: val for key, val in res['value'].items() if key in self._fields})
            if res.get('domain'):
                result.setdefault('domain', {}).update(res['domain'])
            if res.get('warning'):
                result['warnings'].add((
                    res['warning'].get('title') or _("Warning"),
                    res['warning'].get('message') or "",
                    res['warning'].get('type') or "",
                ))

        if onchange in ("1", "true"):
            for method in self._onchange_methods.get(field_name, ()):
                method_res = method(self)
                process(method_res)
            return

    def onchange(self, values, field_name, field_onchange):
        """ Perform an onchange on the given field.

            :param values: dictionary mapping field names to values, giving the
                current state of modification
            :param field_name: name of the modified field, or list of field
                names (in view order), or False
            :param field_onchange: dictionary mapping field names to their
                on_change attribute
        """
        # this is for tests using `Form`
        self.flush()

        env = self.env
        if isinstance(field_name, list):
            names = field_name
        elif field_name:
            names = [field_name]
        else:
            names = []

        if not all(name in self._fields for name in names):
            return {}

        def PrefixTree(model, dotnames):
            """ Return a prefix tree for sequences of field names. """
            if not dotnames:
                return {}
            # group dotnames by prefix
            suffixes = defaultdict(list)
            for dotname in dotnames:
                # name, *names = dotname.split('.', 1)
                names = dotname.split('.', 1)
                name = names.pop(0)
                suffixes[name].extend(names)
            # fill in prefix tree in fields order
            tree = OrderedDict()
            for name, field in model._fields.items():
                if name in suffixes:
                    tree[name] = subtree = PrefixTree(model[name], suffixes[name])
                    if subtree and field.type == 'one2many':
                        subtree.pop(field.inverse_name, None)
            return tree

        class Snapshot(dict):
            """ A dict with the values of a record, following a prefix tree. """
            __slots__ = ()

            def __init__(self, record, tree):
                # put record in dict to include it when comparing snapshots
                super(Snapshot, self).__init__({'<record>': record, '<tree>': tree})
                for name in tree:
                    self.fetch(name)

            def fetch(self, name):
                """ Set the value of field ``name`` from the record's value. """
                record = self['<record>']
                tree = self['<tree>']
                if record._fields[name].type in ('one2many', 'many2many'):
                    # x2many fields are serialized as a list of line snapshots
                    self[name] = [Snapshot(line, tree[name]) for line in record[name]]
                else:
                    self[name] = record[name]

            def has_changed(self, name):
                """ Return whether a field on record has changed. """
                record = self['<record>']
                subnames = self['<tree>'][name]
                if record._fields[name].type not in ('one2many', 'many2many'):
                    return self[name] != record[name]
                return (
                    len(self[name]) != len(record[name])
                    or (
                        set(line_snapshot["<record>"].id for line_snapshot in self[name])
                        != set(record[name]._ids)
                    )
                    or any(
                        line_snapshot.has_changed(subname)
                        for line_snapshot in self[name]
                        for subname in subnames
                    )
                )

            def diff(self, other):
                """ Return the values in ``self`` that differ from ``other``.
                    Requires record cache invalidation for correct output!
                """
                record = self['<record>']
                result = {}
                for name, subnames in self['<tree>'].items():
                    if (name == 'id') or (other.get(name) == self[name]):
                        continue
                    field = record._fields[name]
                    if field.type not in ('one2many', 'many2many'):
                        result[name] = field.convert_to_onchange(self[name], record, {})
                    else:
                        # x2many fields: serialize value as commands
                        result[name] = commands = [(5,)]
                        for line_snapshot in self[name]:
                            line = line_snapshot['<record>']
                            line = line._origin or line
                            if not line.id:
                                # new line: send diff from scratch
                                line_diff = line_snapshot.diff({})
                                commands.append((0, line.id.ref or 0, line_diff))
                            else:
                                # existing line: check diff from database
                                # (requires a clean record cache!)
                                line_diff = line_snapshot.diff(Snapshot(line, subnames))
                                if line_diff:
                                    # send all fields because the web client
                                    # might need them to evaluate modifiers
                                    line_diff = line_snapshot.diff({})
                                    commands.append((1, line.id, line_diff))
                                else:
                                    commands.append((4, line.id))
                return result

        nametree = PrefixTree(self.browse(), field_onchange)

        # prefetch x2many lines without data (for the initial snapshot)
        for name, subnames in nametree.items():
            if subnames and values.get(name):
                # retrieve all ids in commands
                line_ids = set()
                for cmd in values[name]:
                    if cmd[0] in (1, 4):
                        line_ids.add(cmd[1])
                    elif cmd[0] == 6:
                        line_ids.update(cmd[2])
                # build corresponding new lines, and prefetch fields
                new_lines = self[name].browse(NewId(id_) for id_ in line_ids)
                for subname in subnames:
                    new_lines.mapped(subname)

        # Isolate changed values, to handle inconsistent data sent from the
        # client side: when a form view contains two one2many fields that
        # overlap, the lines that appear in both fields may be sent with
        # different data. Consider, for instance:
        #
        #   foo_ids: [line with value=1, ...]
        #   bar_ids: [line with value=1, ...]
        #
        # If value=2 is set on 'line' in 'bar_ids', the client sends
        #
        #   foo_ids: [line with value=1, ...]
        #   bar_ids: [line with value=2, ...]
        #
        # The idea is to put 'foo_ids' in cache first, so that the snapshot
        # contains value=1 for line in 'foo_ids'. The snapshot is then updated
        # with the value of `bar_ids`, which will contain value=2 on line.
        #
        # The issue also occurs with other fields. For instance, an onchange on
        # a move line has a value for the field 'move_id' that contains the
        # values of the move, among which the one2many that contains the line
        # itself, with old values!
        #
        changed_values = {name: values[name] for name in names}
        # set changed values to null in initial_values; not setting them
        # triggers default_get() on the new record when creating snapshot0
        initial_values = dict(values, **dict.fromkeys(names, False))

        # create a new record with values
        record = self.new(initial_values, origin=self)

        # make a snapshot based on the initial values of record
        snapshot0 = Snapshot(record, nametree)

        # store changed values in cache, and update snapshot0
        record._update_cache(changed_values, validate=False)
        for name in names:
            snapshot0.fetch(name)

        # determine which field(s) should be triggered an onchange
        todo = list(names or nametree)
        done = set()

        # dummy assignment: trigger invalidations on the record
        for name in todo:
            if name == 'id':
                continue
            value = record[name]
            field = self._fields[name]
            if field.type == 'many2one' and field.delegate and not value:
                # do not nullify all fields of parent record for new records
                continue
            record[name] = value

        result = {'warnings': OrderedSet()}

        # process names in order
        while todo:
            # apply field-specific onchange methods
            for name in todo:
                if field_onchange.get(name):
                    record._onchange_eval(name, field_onchange[name], result)
                done.add(name)

            # determine which fields to process for the next pass
            todo = [
                name
                for name in nametree
                if name not in done and snapshot0.has_changed(name)
            ]

            if not env.context.get('recursive_onchanges', True):
                todo = []

        # make the snapshot with the final values of record
        snapshot1 = Snapshot(record, nametree)

        # determine values that have changed by comparing snapshots
        self.invalidate_cache()
        result['value'] = snapshot1.diff(snapshot0)

        # format warnings
        warnings = result.pop('warnings')
        if len(warnings) == 1:
            title, message, type = warnings.pop()
            if not type:
                type = 'dialog'
            result['warning'] = dict(title=title, message=message, type=type)
        elif len(warnings) > 1:
            # concatenate warning titles and messages
            title = _("Warnings")
            message = '\n\n'.join([warn_title + '\n\n' + warn_message for warn_title, warn_message, warn_type in warnings])
            result['warning'] = dict(title=title, message=message, type='dialog')

        return result

collections.Set.register(BaseModel)
# not exactly true as BaseModel doesn't have __reversed__, index or count
collections.Sequence.register(BaseModel)

class RecordCache(MutableMapping):
    """ A mapping from field names to values, to read and update the cache of a record. """
    def __init__(self, record):
        assert len(record) == 1, "Unexpected RecordCache(%s)" % record
        self._record = record

    def __contains__(self, name):
        """ Return whether `record` has a cached value for field ``name``. """
        field = self._record._fields[name]
        return self._record.env.cache.contains(self._record, field)

    def __getitem__(self, name):
        """ Return the cached value of field ``name`` for `record`. """
        field = self._record._fields[name]
        return self._record.env.cache.get(self._record, field)

    def __setitem__(self, name, value):
        """ Assign the cached value of field ``name`` for ``record``. """
        field = self._record._fields[name]
        self._record.env.cache.set(self._record, field, value)

    def __delitem__(self, name):
        """ Remove the cached value of field ``name`` for ``record``. """
        field = self._record._fields[name]
        self._record.env.cache.remove(self._record, field)

    def __iter__(self):
        """ Iterate over the field names with a cached value. """
        for field in self._record.env.cache.get_fields(self._record):
            yield field.name

    def __len__(self):
        """ Return the number of fields with a cached value. """
        return sum(1 for name in self)


AbstractModel = BaseModel

class Model(AbstractModel):
    """
    models.Model(AbstraceModel),Model繼承於AbstraceModel.
    AbstractMOdel = BaseModel
    BaseMOdel繼承於MetalModel, class BaseModel(MetaModel('DummyModel', (object,), {'_register': False})):
    MetaModel繼承api.meta: MetaModel(api.Meta)
    Metal爲基類, class Meta(type).


    """
    """ Main super-class for regular database-persisted Odoo models.
    # models.model 抽象類
    Odoo models are created by inheriting from this class::

        class user(Model):
            ...

    The system will later instantiate the class once per database (on
    which the class' module is installed).
    """
    _auto = True                # automatically create database backend
    _register = False           # not visible in ORM registry, meant to be python-inherited only
    _abstract = False           # not abstract
    _transient = False          # not transient

class TransientModel(Model):
    """ Model super-class for transient records, meant to be temporarily
    persistent, and regularly vacuum-cleaned.

    A TransientModel has a simplified access rights management, all users can
    create new records, and may only access the records they created. The
    superuser has unrestricted access to all TransientModel records.
    """
    _auto = True                # automatically create database backend
    _register = False           # not visible in ORM registry, meant to be python-inherited only
    _abstract = False           # not abstract
    _transient = True           # transient

def itemgetter_tuple(items):
    """ Fixes itemgetter inconsistency (useful in some cases) of not returning
    a tuple if len(items) == 1: always returns an n-tuple where n = len(items)
    """
    if len(items) == 0:
        return lambda a: ()
    if len(items) == 1:
        return lambda gettable: (gettable[items[0]],)
    return operator.itemgetter(*items)

def convert_pgerror_not_null(model, fields, info, e):
    if e.diag.table_name != model._table:
        return {'message': _(u"Missing required value for the field '%s'") % (e.diag.column_name)}

    field_name = e.diag.column_name
    field = fields[field_name]
    message = _(u"Missing required value for the field '%s' (%s)") % (field['string'], field_name)
    return {
        'message': message,
        'field': field_name,
    }

def convert_pgerror_unique(model, fields, info, e):
    # new cursor since we're probably in an error handler in a blown
    # transaction which may not have been rollbacked/cleaned yet
    with closing(model.env.registry.cursor()) as cr:
        cr.execute("""
            SELECT
                conname AS "constraint name",
                t.relname AS "table name",
                ARRAY(
                    SELECT attname FROM pg_attribute
                    WHERE attrelid = conrelid
                      AND attnum = ANY(conkey)
                ) as "columns"
            FROM pg_constraint
            JOIN pg_class t ON t.oid = conrelid
            WHERE conname = %s
        """, [e.diag.constraint_name])
        constraint, table, ufields = cr.fetchone() or (None, None, None)
    # if the unique constraint is on an expression or on an other table
    if not ufields or model._table != table:
        return {'message': tools.ustr(e)}

    # TODO: add stuff from e.diag.message_hint? provides details about the constraint & duplication values but may be localized...
    if len(ufields) == 1:
        field_name = ufields[0]
        field = fields[field_name]
        message = _(u"The value for the field '%s' already exists (this is probably '%s' in the current model).") % (field_name, field['string'])
        return {
            'message': message,
            'field': field_name,
        }
    field_strings = [fields[fname]['string'] for fname in ufields]
    message = _(u"The values for the fields '%s' already exist (they are probably '%s' in the current model).") % (', '.join(ufields), ', '.join(field_strings))
    return {
        'message': message,
        # no field, unclear which one we should pick and they could be in any order
    }

def convert_pgerror_constraint(model, fields, info, e):
    sql_constraints = dict([(('%s_%s') % (e.diag.table_name, x[0]), x) for x in model._sql_constraints])
    if e.diag.constraint_name in sql_constraints.keys():
        return {'message': "'%s'" % sql_constraints[e.diag.constraint_name][2]}
    return {'message': tools.ustr(e)}

PGERROR_TO_OE = defaultdict(
    # shape of mapped converters
    lambda: (lambda model, fvg, info, pgerror: {'message': tools.ustr(pgerror)}), {
    '23502': convert_pgerror_not_null,
    '23505': convert_pgerror_unique,
    '23514': convert_pgerror_constraint,
})


def lazy_name_get(self):
    """ Evaluate self.name_get() lazily. """
    names = tools.lazy(lambda: dict(self.name_get()))
    return [(rid, tools.lazy(operator.getitem, names, rid)) for rid in self.ids]


# keep those imports here to avoid dependency cycle errors
from .osv import expression
from .fields import Field, Datetime

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章