def connect(self): if self._connpool is not None: return self logger.info('connect to "%s"', self.name) minconn = config.getint('database', 'minconn', default=1) maxconn = config.getint('database', 'maxconn', default=64) self._connpool = ThreadedConnectionPool( minconn, maxconn, self.dsn(self.name)) return self
def check(dbname, user, session, context=None): DatabaseOperationalError = backend.get('DatabaseOperationalError') for count in range(config.getint('database', 'retry'), -1, -1): with Transaction().start(dbname, user, context=context) as transaction: pool = _get_pool(dbname) Session = pool.get('ir.session') try: find = Session.check(user, session) break except DatabaseOperationalError: if count: continue raise finally: transaction.commit() if find is None: logger.error("session failed for '%s' from '%s' on database '%s'", user, _get_remote_addr(context), dbname) return elif not find: logger.info("session expired for '%s' from '%s' on database '%s'", user, _get_remote_addr(context), dbname) return else: logger.debug("session valid for '%s' from '%s' on database '%s'", user, _get_remote_addr(context), dbname) return user
def get_sessions(users, name): # AKE: manage session on redis if security.config_session_redis(): dbname = Pool().database_name return { u.id: security.redis.count_sessions(dbname, u.id) for u in users } Session = Pool().get('ir.session') now = datetime.datetime.now() timeout = datetime.timedelta( seconds=config.getint('session', 'max_age')) result = dict((u.id, 0) for u in users) with Transaction().set_user(0): for sub_ids in grouped_slice(users): sessions = Session.search([ ('create_uid', 'in', sub_ids), ], order=[('create_uid', 'ASC')]) def filter_(session): timestamp = session.write_date or session.create_date return abs(timestamp - now) < timeout result.update( dict((i, len(list(g))) for i, g in groupby(filter(filter_, sessions), attrgetter('create_uid.id')))) return result
def wrapper(request, pool, *args, **kwargs): DatabaseOperationalError = backend.get('DatabaseOperationalError') readonly_ = readonly # can not modify non local if readonly_ is None: if request.method in {'POST', 'PUT', 'DELETE', 'PATCH'}: readonly_ = False else: readonly_ = True for count in range(config.getint('database', 'retry'), -1, -1): with Transaction().start(pool.database_name, 0, readonly=readonly_) as transaction: try: result = func(request, pool, *args, **kwargs) except DatabaseOperationalError: if count and not readonly_: transaction.rollback() continue logger.error('%s', request, exc_info=True) raise except Exception: logger.error('%s', request, exc_info=True) raise # Need to commit to unlock SQLite database transaction.commit() return result
def wsgi_app(self, environ, start_response): for cls in self.protocols: if cls.content_type in environ.get('CONTENT_TYPE', ''): request = cls.request(environ) break else: request = Request(environ) origin = request.headers.get('Origin') origin_host = urllib.parse.urlparse(origin).netloc if origin else '' host = request.headers.get('Host') if origin and origin_host != host: cors = filter(None, config.get('web', 'cors', default='').splitlines()) if origin not in cors: abort(HTTPStatus.FORBIDDEN) data = self.dispatch_request(request) if not isinstance(data, (Response, HTTPException)): response = self.make_response(request, data) else: response = data if origin and isinstance(response, Response): response.headers['Access-Control-Allow-Origin'] = origin response.headers['Vary'] = 'Origin' method = request.headers.get('Access-Control-Request-Method') if method: response.headers['Access-Control-Allow-Methods'] = method headers = request.headers.get('Access-Control-Request-Headers') if headers: response.headers['Access-Control-Allow-Headers'] = headers response.headers['Access-Control-Max-Age'] = config.getint( 'web', 'cache_timeout') return response(environ, start_response)
def _dispatch(request, pool, *args, **kwargs): DatabaseOperationalError = backend.get('DatabaseOperationalError') obj, method = get_object_method(request, pool) if method in obj.__rpc__: rpc = obj.__rpc__[method] else: raise UserError('Calling method %s on %s is not allowed' % (method, obj)) log_message = '%s.%s(*%s, **%s) from %s@%s/%s' log_args = (obj, method, args, kwargs, request.authorization.username, request.remote_addr, request.path) logger.info(log_message, *log_args) user = request.user_id for count in range(config.getint('database', 'retry'), -1, -1): with Transaction().start(pool.database_name, user, readonly=rpc.readonly) as transaction: Cache.clean(pool.database_name) try: c_args, c_kwargs, transaction.context, transaction.timestamp \ = rpc.convert(obj, *args, **kwargs) meth = getattr(obj, method) if (rpc.instantiate is None or not is_instance_method(obj, method)): result = rpc.result(meth(*c_args, **c_kwargs)) else: assert rpc.instantiate == 0 inst = c_args.pop(0) if hasattr(inst, method): result = rpc.result(meth(inst, *c_args, **c_kwargs)) else: result = [rpc.result(meth(i, *c_args, **c_kwargs)) for i in inst] except DatabaseOperationalError: if count and not rpc.readonly: transaction.rollback() continue logger.error(log_message, *log_args, exc_info=True) raise except (ConcurrencyException, UserError, UserWarning): logger.debug(log_message, *log_args, exc_info=True) raise except Exception: logger.error(log_message, *log_args, exc_info=True) raise # Need to commit to unlock SQLite database transaction.commit() Cache.resets(pool.database_name) if request.authorization.type == 'session': try: with Transaction().start(pool.database_name, 0) as transaction: Session = pool.get('ir.session') Session.reset(request.authorization.get('session')) except DatabaseOperationalError: logger.debug('Reset session failed', exc_info=True) logger.debug('Result: %s', result) return result
def login(dbname, loginname, parameters, cache=True, language=None): DatabaseOperationalError = backend.get('DatabaseOperationalError') context = {'language': language} for count in range(config.getint('database', 'retry'), -1, -1): with Transaction().start(dbname, 0, context=context) as transaction: pool = _get_pool(dbname) User = pool.get('res.user') try: user_id = User.get_login(loginname, parameters) except DatabaseOperationalError: if count: continue raise except LoginException: # Let's store any changes done transaction.commit() raise break if user_id: if not cache: return user_id with Transaction().start(dbname, user_id): Session = pool.get('ir.session') session, = Session.create([{}]) return user_id, session.key return
def get_connection(self, autocommit=False, readonly=False): for count in range(config.getint('database', 'retry'), -1, -1): try: conn = self._connpool.getconn() break except PoolError: if count and not self._connpool.closed: logger.info('waiting a connection') time.sleep(1) continue raise except Exception: logger.error('connection to "%s" failed', self.name, exc_info=True) raise # We do not use set_session because psycopg2 < 2.7 and psycopg2cffi # change the default_transaction_* attributes which breaks external # pooling at the transaction level. if autocommit: conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) else: conn.set_isolation_level(ISOLATION_LEVEL_REPEATABLE_READ) # psycopg2cffi does not have the readonly property if hasattr(conn, 'readonly'): conn.readonly = readonly elif not autocommit and readonly: cursor = conn.cursor() cursor.execute('SET TRANSACTION READ ONLY') conn.cursor_factory = PerfCursor return conn
def run_task(pool, task_id): if not isinstance(pool, Pool): pool = Pool(pool) Queue = pool.get('ir.queue') logger.info('task "%d" started', task_id) try: retry = config.getint('database', 'retry') for count in range(retry, -1, -1): if count != retry: time.sleep(0.02 * (retry - count)) with Transaction().start(pool.database_name, 0) as transaction: try: try: task, = Queue.search([('id', '=', task_id)]) except ValueError: # the task was rollbacked, nothing to do break task.run() break except DatabaseOperationalError: if count: transaction.rollback() continue raise logger.info('task "%d" done', task_id) except Exception: logger.critical('task "%d" failed', task_id, exc_info=True)
def logout(dbname, user, session, context=None): # AKE: manage session on redis if config_session_redis(): name = redis.get_session(dbname, user, session) if name: redis.del_session(dbname, user, session) return name DatabaseOperationalError = backend.get('DatabaseOperationalError') for count in range(config.getint('database', 'retry'), -1, -1): with Transaction().start(dbname, 0, context=context): pool = _get_pool(dbname) Session = pool.get('ir.session') try: name = Session.remove(session) break except DatabaseOperationalError: if count: continue raise if name: logger.info("logout for '%s' from '%s' on database '%s'", name, _get_remote_addr(context), dbname) else: logger.error("logout failed for '%s' from '%s' on database '%s'", user, _get_remote_addr(context), dbname)
def run(cls, db_name): transaction = Transaction() logger.info('cron started for "%s"', db_name) now = datetime.datetime.now() retry = config.getint('database', 'retry') with transaction.start(db_name, 0, context={'_skip_warnings': True}): transaction.database.lock(transaction.connection, cls._table) crons = cls.search(['OR', ('next_call', '<=', now), ('next_call', '=', None), ]) for cron in crons: name = '<Cron %s@%s %s>' % (cron.id, db_name, cron.method) logger.info("%s started", name) for count in range(retry, -1, -1): if count != retry: time.sleep(0.02 * (retry - count)) try: with processing(name): cron.run_once() cron.next_call = cron.compute_next_call(now) cron.save() transaction.commit() except Exception as e: transaction.rollback() if (isinstance(e, backend.DatabaseOperationalError) and count): continue logger.error('%s failed', name, exc_info=True) break while transaction.tasks: task_id = transaction.tasks.pop() run_task(db_name, task_id) logger.info('cron finished for "%s"', db_name)
def get_cache(self): from trytond.cache import LRUDict keys = tuple(((key, self.context[key]) for key in sorted(self.cache_keys) if key in self.context)) return self.cache.setdefault((self.user, keys), LRUDict(config.getint('cache', 'model')))
def connect(self): if self._connpool is not None: return self logger.info('connect to "%s"', self.database_name) uri = parse_uri(config.get('database', 'uri')) assert uri.scheme == 'postgresql' host = uri.hostname and "host=%s" % uri.hostname or '' port = uri.port and "port=%s" % uri.port or '' name = "dbname=%s" % self.database_name user = uri.username and "user=%s" % uri.username or '' password = uri.password and "password=%s" % uri.password or '' minconn = config.getint('database', 'minconn', default=1) maxconn = config.getint('database', 'maxconn', default=64) dsn = '%s %s %s %s %s' % (host, port, name, user, password) self._connpool = ThreadedConnectionPool(minconn, maxconn, dsn) return self
def list(self, hostname=None): now = time.time() timeout = config.getint('session', 'timeout') res = self.__class__._list_cache.get(hostname) timestamp = self.__class__._list_cache_timestamp.get(hostname, now) if res and abs(timestamp - now) < timeout: return res connection = self.get_connection() try: cursor = connection.cursor() cursor.execute('SELECT datname FROM pg_database ' 'WHERE datistemplate = false ORDER BY datname') res = [] for db_name, in cursor: try: conn = connect(**self._connection_params(db_name)) try: with conn: if self._test(conn, hostname=hostname): res.append(db_name) finally: conn.close() except Exception: logger.debug('Test failed for "%s"', db_name, exc_info=True) continue finally: self.put_connection(connection) self.__class__._list_cache[hostname] = res self.__class__._list_cache_timestamp[hostname] = now return res
def __post_setup__(cls): super(ModelView, cls).__post_setup__() # Update __rpc__ for field_name, field in cls._fields.items(): field.set_rpc(cls) for button in cls._buttons: if not is_instance_method(cls, button): cls.__rpc__.setdefault(button, RPC(readonly=False, instantiate=0)) else: cls.__rpc__.setdefault(button, RPC(instantiate=0, result=on_change_result)) for parent_cls in cls.__mro__: parent_meth = getattr(parent_cls, button, None) if not parent_meth: continue cls.__change_buttons[button] |= getattr( parent_meth, 'change', set()) for method_name, rpc in cls.__rpc__.items(): if not rpc.cache: continue cache_name = 'rpc.%s.%s' % (cls.__name__, method_name) cache_duration = config.getint('cache', cache_name, default=None) if cache_duration is not None: rpc.cache.duration = datetime.timedelta(seconds=cache_duration)
def check(cls, user, key, domain=None): """ Check user key against max_age and delete old one. Return True if key is still valid, False if the key is expired and None if the key does not exist. """ now = datetime.datetime.now() timeout = datetime.timedelta( seconds=config.getint('session', 'max_age')) sessions = cls.search([ ('create_uid', '=', user), domain or [], ]) find, last_reset = None, None to_delete = [] for session in sessions: if abs(session.create_date - now) < timeout: if session.key == key: find = True last_reset = session.write_date or session.create_date else: if find is None and session.key == key: find = False to_delete.append(session) cls.delete(to_delete) if find: cls._session_last_reset.set(key, last_reset) return find
def login(dbname, loginname, parameters, cache=True, context=None): DatabaseOperationalError = backend.get('DatabaseOperationalError') for count in range(config.getint('database', 'retry'), -1, -1): with Transaction().start(dbname, 0, context=context) as transaction: pool = _get_pool(dbname) User = pool.get('res.user') try: user_id = User.get_login(loginname, parameters) except DatabaseOperationalError: if count: continue raise except (LoginException, RateLimitException): # Let's store any changes done transaction.commit() raise break session = None if user_id: if not cache: session = user_id else: with Transaction().start(dbname, user_id): Session = pool.get('ir.session') session = user_id, Session.new() logger.info("login succeeded for '%s' from '%s' on database '%s'", loginname, _get_remote_addr(context), dbname) else: logger.error("login failed for '%s' from '%s' on database '%s'", loginname, _get_remote_addr(context), dbname) return session
def list(self): now = time.time() timeout = config.getint('session', 'timeout') res = self.__class__._list_cache if res and abs(self.__class__._list_cache_timestamp - now) < timeout: return res connection = self.get_connection() try: cursor = connection.cursor() cursor.execute('SELECT datname FROM pg_database ' 'WHERE datistemplate = false ORDER BY datname') res = [] for db_name, in cursor: try: with connect(self.dsn(db_name)) as conn: if self._test(conn): res.append(db_name) except Exception: continue finally: self.put_connection(connection) self.__class__._list_cache = res self.__class__._list_cache_timestamp = now return res
def hit_session(dbname, user, session): k = key(dbname, user, session) timeout = config.getint('session', 'timeout') ttl = get_client().ttl(k) if ttl != -2: get_client().expire(k, timeout) return ttl
def convert_api(cls, report, data, timeout): # AKE: support printing via external api User = Pool().get('res.user') input_format = report.template_extension output_format = report.extension or report.template_extension if output_format in MIMETYPES: return output_format, data oext = FORMAT2EXT.get(output_format, output_format) url_tpl = config.get('report', 'api') url = url_tpl.format(oext=oext) files = {'file': ('doc.' + input_format, data)} for count in range(config.getint('report', 'unoconv_retry'), -1, -1): try: r = requests.post(url, files=files, timeout=timeout) if r.status_code < 300: return oext, r.content else: raise UnoConversionError('Conversion of "%s" failed. ' 'Unoconv responsed with "%s".' % (report.report_name, r.reason)) except UnoConversionError as e: if count: time.sleep(0.1) continue user = User(Transaction().user) logger.error(e.message + ' User: %s' % user.name or '') raise
def get_connection(self, autocommit=False, readonly=False): for count in range(config.getint('database', 'retry'), -1, -1): try: conn = self._connpool.getconn() break except PoolError: if count and not self._connpool.closed: logger.info('waiting a connection') time.sleep(1) continue raise except Exception: logger.error('connection to "%s" failed', self.name, exc_info=True) raise if autocommit: conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) else: conn.set_isolation_level(ISOLATION_LEVEL_REPEATABLE_READ) if readonly: cursor = conn.cursor() cursor.execute('SET TRANSACTION READ ONLY') conn.cursor_factory = PerfCursor return conn
def list(cursor): now = time.time() timeout = config.getint('session', 'timeout') res = Database._list_cache if res and abs(Database._list_cache_timestamp - now) < timeout: return res cursor.execute('SELECT datname FROM pg_database ' 'WHERE datistemplate = false ORDER BY datname') res = [] for db_name, in cursor.fetchall(): db_name = db_name.encode('utf-8') try: database = Database(db_name).connect() except Exception: continue cursor2 = database.cursor() if cursor2.test(): res.append(db_name) cursor2.close(close=True) else: cursor2.close(close=True) database.close() Database._list_cache = res Database._list_cache_timestamp = now return res
def check_request_size(self, request, size=None): if request.method not in {'POST', 'PUT', 'PATCH'}: return if size is None: if request.user_id: max_size = config.getint('request', 'max_size_authenticated') else: max_size = config.getint('request', 'max_size') else: max_size = size if max_size: content_length = request.content_length if content_length is None: abort(http.client.LENGTH_REQUIRED) elif content_length > max_size: abort(http.client.REQUEST_ENTITY_TOO_LARGE)
def run(cls, db_name): logger.info('cron started for "%s"', db_name) now = datetime.datetime.now() retry = config.getint('database', 'retry') with Transaction().start(db_name, 0) as transaction: transaction.database.lock(transaction.connection, cls._table) crons = cls.search([ 'OR', ('next_call', '<=', now), ('next_call', '=', None), ]) for cron in crons: logger.info("Run cron %s", cron.id) for count in range(retry, -1, -1): if count != retry: time.sleep(0.02 * (retry - count)) try: cron.run_once() cron.next_call = cron.compute_next_call(now) cron.save() transaction.commit() except Exception as e: transaction.rollback() if (isinstance(e, backend.DatabaseOperationalError) and count): continue logger.error('Running cron %s', cron.id, exc_info=True) break while transaction.tasks: task_id = transaction.tasks.pop() run_task(db_name, task_id) logger.info('cron finished for "%s"', db_name)
def validate_password(cls, password, users): password_b = password if isinstance(password, str): password_b = password.encode('utf-8') length = config.getint('password', 'length', default=0) if length > 0: if len(password_b) < length: raise PasswordError( gettext( 'res.msg_password_length', length=length, )) path = config.get('password', 'forbidden', default=None) if path: with open(path, 'r') as f: forbidden = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) if forbidden.find(password_b) >= 0: raise PasswordError(gettext('res.msg_password_forbidden')) for user in users: # Use getattr to allow to use non User instances for test, message in [ (getattr(user, 'name', ''), 'res.msg_password_name'), (getattr(user, 'login', ''), 'res.msg_password_login'), (getattr(user, 'email', ''), 'res.msg_password_email'), ]: if test and password.lower() == test.lower(): raise PasswordError(gettext(message))
def run_task(pool, task_id): if not isinstance(pool, Pool): database_list = Pool.database_list() pool = Pool(pool) if pool.database_name not in database_list: with Transaction().start(pool.database_name, 0, readonly=True): pool.init() Queue = pool.get('ir.queue') Error = pool.get('ir.error') name = '<Task %s@%s>' % (task_id, pool.database_name) logger.info('%s started', name) retry = config.getint('database', 'retry') try: for count in range(retry, -1, -1): if count != retry: time.sleep(0.02 * (retry - count)) with Transaction().start(pool.database_name, 0) as transaction: try: try: task, = Queue.search([('id', '=', task_id)]) except ValueError: # the task was rollbacked, nothing to do break with processing(name): task.run() break except backend.DatabaseOperationalError: if count: transaction.rollback() continue raise except (UserError, UserWarning) as e: Error.log(task, e) raise logger.info('%s done', name) except backend.DatabaseOperationalError: logger.info('%s failed, retrying', name, exc_info=True) if not config.getboolean('queue', 'worker', default=False): time.sleep(0.02 * retry) try: with Transaction().start(pool.database_name, 0) as transaction: if not transaction.database.has_channel(): logger.critical('%s failed', name, exc_info=True) return task = Queue(task_id) if task.scheduled_at and task.enqueued_at < task.scheduled_at: duration = (task.scheduled_at - task.enqueued_at) * 2 else: duration = dt.timedelta(seconds=2 * retry) duration = max(duration, dt.timedelta(hours=1)) scheduled_at = dt.datetime.now() + duration * random.random() Queue.push(task.name, task.data, scheduled_at=scheduled_at) except Exception: logger.critical( 'rescheduling %s failed', name, exc_info=True) except (UserError, UserWarning): logger.info('%s failed', name) except Exception: logger.critical('%s failed', name, exc_info=True)
def connect(self): if self._connpool is not None: return self logger.info('connect to "%s"', self.database_name) uri = parse_uri(config.get('database', 'uri')) assert uri.scheme == 'postgresql' host = uri.hostname and "host=%s" % uri.hostname or '' port = uri.port and "port=%s" % uri.port or '' name = "dbname=%s" % self.database_name user = uri.username and "user=%s" % uri.username or '' password = ("password=%s" % urllib.unquote_plus(uri.password) if uri.password else '') minconn = config.getint('database', 'minconn', default=1) maxconn = config.getint('database', 'maxconn', default=64) dsn = '%s %s %s %s %s' % (host, port, name, user, password) self._connpool = ThreadedConnectionPool(minconn, maxconn, dsn) return self
def get_cache(self): from trytond.cache import LRUDict from trytond.transaction import Transaction user = Transaction().user context = Transaction().context keys = tuple(((key, context[key]) for key in sorted(self.cache_keys) if key in context)) return self.cache.setdefault((user, keys), LRUDict(config.getint('cache', 'model')))
def reset_password(cls, users, length=8, from_=None): for user in users: user.password_reset = gen_password(length=length) user.password_reset_expire = ( datetime.datetime.now() + datetime.timedelta( seconds=config.getint('password', 'reset_timeout'))) user.password = None cls.save(users) _send_email(from_, users, cls.get_email_reset_password)
def unsubscribe(cls, tokens): # Make it slow to prevent brute force attacks delay = config.getint('marketing', 'subscribe_delay', default=1) Transaction().atexit(time.sleep, delay) records = cls.search([ ('email_token', 'in', tokens), ]) cls.write(records, {'active': False}) return bool(records)
def check_token(dbname, token): for count in range(config.getint('database', 'retry'), -1, -1): with Transaction().start(dbname, 0, readonly=True): pool = _get_pool(dbname) Token = pool.get('api.token') try: return Token.check(token) except backend.DatabaseOperationalError: if count: continue raise
def reset(cls, key, domain=None): "Reset key session timestamp" now = datetime.datetime.now() timeout = datetime.timedelta( seconds=config.getint('session', 'timeout')) # reset_timeout must be way shorter than the session timeout in order # to not bother users with popup asking their password reset_timeout = datetime.timedelta( seconds=config.getint('session', 'timeout') // 10) last_reset = cls._session_last_reset.get(key) if last_reset is None or (now - reset_timeout) > last_reset: timestamp = now - timeout sessions = cls.search([ ('key', '=', key), ['OR', ('create_date', '>=', timestamp), ('write_date', '>=', timestamp), ], domain or [], ]) cls.write(sessions, {})
def check(cls, user, key): "Check user key and delete old one" now = datetime.datetime.now() timeout = datetime.timedelta( seconds=config.getint('session', 'timeout')) sessions = cls.search([ ('create_uid', '=', user), ]) find = False for session in sessions: timestamp = session.write_date or session.create_date if abs(timestamp - now) < timeout: if session.key == key: find = True else: cls.delete([session]) return find
def check(dbname, user, session): DatabaseOperationalError = backend.get('DatabaseOperationalError') for count in range(config.getint('database', 'retry'), -1, -1): with Transaction().start(dbname, user) as transaction: pool = _get_pool(dbname) Session = pool.get('ir.session') try: if not Session.check(user, session): return else: return user except DatabaseOperationalError: if count: continue raise finally: transaction.commit()
def list(self): now = time.time() timeout = config.getint('session', 'timeout') res = Database._list_cache if res and abs(Database._list_cache_timestamp - now) < timeout: return res conn = self.get_connection() cursor = conn.cursor() cursor.execute('SHOW DATABASES') res = [] for db_name, in cursor.fetchall(): try: database = Database(db_name).connect() except Exception: continue if database.test(): res.append(db_name) database.close() self.put_connection(conn) Database._list_cache = res Database._list_cache_timestamp = now return res
def init(cls): r""" Initialize a Tryton database. Should be called only once. Updates the tryton config and writes the configured number of retries into the class attribute _retry. Configuration via class attributes, e.g.:: >>> Tdb._db = 'db' >>> Tdb._configfile = '/path/to/configfile' >>> Tdb._company = 1 >>> Tdb._user = '******' >>> Tdb.init() Expects class attributes _db, _configfile, _company and _user to be set. """ config.update_etc(str(cls._configfile)) cls._retry = config.getint('database', 'retry') with Transaction().start(str(cls._db), int(cls._user), readonly=True): Pool().init()
def list(cursor): now = time.time() timeout = config.getint('session', 'timeout') res = Database._list_cache if res and abs(Database._list_cache_timestamp - now) < timeout: return res cursor.execute('SHOW DATABASES') res = [] for db_name, in cursor.fetchall(): try: database = Database(db_name).connect() except Exception: continue cursor2 = database.cursor() if cursor2.test(): res.append(db_name) cursor2.close(close=True) else: cursor2.close() database.close() Database._list_cache = res Database._list_cache_timestamp = now return res
def list(self): now = time.time() timeout = config.getint('session', 'timeout') res = Database._list_cache if res and abs(Database._list_cache_timestamp - now) < timeout: return res connection = self.get_connection() cursor = connection.cursor() cursor.execute('SELECT datname FROM pg_database ' 'WHERE datistemplate = false ORDER BY datname') res = [] for db_name, in cursor: try: with connect(self.dsn(db_name)) as conn: if self._test(conn): res.append(db_name) except Exception: continue self.put_connection(connection) Database._list_cache = res Database._list_cache_timestamp = now return res
def list(cursor): now = time.time() timeout = config.getint('session', 'timeout') res = Database._list_cache if res and abs(Database._list_cache_timestamp - now) < timeout: return res cursor.execute('SELECT datname FROM pg_database ' 'WHERE datistemplate = false ORDER BY datname') res = [] for db_name, in cursor.fetchall(): try: database = Database(db_name).connect() except Exception: continue cursor2 = database.cursor() if cursor2.test(): res.append(db_name) cursor2.close(close=True) else: cursor2.close(close=True) database.close() Database._list_cache = res Database._list_cache_timestamp = now return res
# This file is part of Tryton. The COPYRIGHT file at the top level of # this repository contains the full copyright notices and license terms. from decimal import Decimal from trytond.model import ModelView, ModelSQL, fields, Unique from trytond.cache import Cache from trytond.transaction import Transaction from trytond.pool import Pool, PoolMeta from trytond import backend from trytond.config import config __all__ = ['Employee', 'EmployeeCostPrice', 'price_digits'] __metaclass__ = PoolMeta price_digits = (16, config.getint( 'timesheet_cost', 'price_decimal', default=4)) class Employee: __name__ = 'company.employee' cost_price = fields.Function(fields.Numeric('Cost Price', digits=price_digits, help="Hourly cost price for this Employee"), 'get_cost_price') cost_prices = fields.One2Many('company.employee_cost_price', 'employee', 'Cost Prices', help="List of hourly cost price over time") _cost_prices_cache = Cache('company_employee.cost_prices') def get_cost_price(self, name): ''' Return the cost price at the date given in the context or the current date '''
#! -*- coding: utf8 -*- # The COPYRIGHT file at the top level of this repository contains # the full copyright notices and license terms. from trytond.model import fields from trytond.transaction import Transaction from trytond.pool import Pool, PoolMeta from trytond.pyson import Eval from trytond.modules.product import price_digits from trytond.config import config as config_ from decimal import Decimal __all__ = ['Contract', 'ContractConsumption', 'ContractLine'] __metaclass__ = PoolMeta DISCOUNT_DIGITS = config_.getint('product', 'discount_decimal', default=4) class Contract: 'Contract' __name__ = 'contract' contract_discount = fields.Numeric('Contract Discount', digits=(16, DISCOUNT_DIGITS), states={ 'readonly': Eval('state') != 'draft', }, depends=['state'], help='This discount will be applied in all lines after their own ' 'discount.') @classmethod def __setup__(cls):
''' import random import time from sql.aggregate import Count from trytond.pool import PoolMeta, Pool from trytond.model import ModelView, ModelSQL, fields from trytond.config import config from trytond.transaction import Transaction __all__ = ['Party', 'Badge'] __metaclass__ = PoolMeta code_size = config.getint('party-access-control', 'size', 5) timeout = config.getint('party-access-control', 'timeout', 120) class Party: __name__ = 'party.party' badges = fields.One2Many('access.control.badge', 'party', 'Badge') class Badge(ModelSQL, ModelView): "Badge" __name__ = 'access.control.badge' _rec_name = 'code' code = fields.Char('Code', select=True, required=True, readonly=True)
def _dispatch(request, pool, *args, **kwargs): DatabaseOperationalError = backend.get('DatabaseOperationalError') obj, method = get_object_method(request, pool) if method in obj.__rpc__: rpc = obj.__rpc__[method] else: raise UserError('Calling method %s on %s is not allowed' % (method, obj)) log_message = '%s.%s(*%s, **%s) from %s@%s/%s' log_args = (obj, method, args, kwargs, request.authorization.username, request.remote_addr, request.path) logger.info(log_message, *log_args) user = request.user_id session = None if request.authorization.type == 'session': session = request.authorization.get('session') for count in range(config.getint('database', 'retry'), -1, -1): with Transaction().start(pool.database_name, user, readonly=rpc.readonly, context={'session': session}) as transaction: Cache.clean(pool.database_name) try: PerfLog().on_enter(user, session, request.method, args, kwargs) except: perf_logger.exception('on_enter failed') try: c_args, c_kwargs, transaction.context, transaction.timestamp \ = rpc.convert(obj, *args, **kwargs) meth = getattr(obj, method) try: wrapped_meth = profile(meth) except: perf_logger.exception('profile failed') else: meth = wrapped_meth if (rpc.instantiate is None or not is_instance_method(obj, method)): result = rpc.result(meth(*c_args, **c_kwargs)) else: assert rpc.instantiate == 0 inst = c_args.pop(0) if hasattr(inst, method): result = rpc.result(meth(inst, *c_args, **c_kwargs)) else: result = [rpc.result(meth(i, *c_args, **c_kwargs)) for i in inst] except DatabaseOperationalError: if count and not rpc.readonly: transaction.rollback() continue logger.error(log_message, *log_args, exc_info=True) raise except (ConcurrencyException, UserError, UserWarning): logger.debug(log_message, *log_args, exc_info=True) raise except Exception: logger.error(log_message, *log_args, exc_info=True) raise # Need to commit to unlock SQLite database transaction.commit() Cache.resets(pool.database_name) if request.authorization.type == 'session': try: with Transaction().start(pool.database_name, 0) as transaction: Session = pool.get('ir.session') Session.reset(request.authorization.get('session')) except DatabaseOperationalError: logger.debug('Reset session failed', exc_info=True) logger.debug('Result: %s', result) try: PerfLog().on_leave(result) except: perf_logger.exception('on_leave failed') return result
STATES = { 'readonly': ~Eval('active', True), } DEPENDS = ['active'] TYPES = [ ('goods', 'Goods'), ('assets', 'Assets'), ('service', 'Service'), ] COST_PRICE_METHODS = [ ('fixed', 'Fixed'), ('average', 'Average'), ] price_digits = (16, config.getint('product', 'price_decimal', default=4)) class Template(ModelSQL, ModelView): "Product Template" __name__ = "product.template" name = fields.Char('Name', size=None, required=True, translate=True, select=True, states=STATES, depends=DEPENDS) type = fields.Selection(TYPES, 'Type', required=True, states=STATES, depends=DEPENDS) consumable = fields.Boolean('Consumable', states={ 'readonly': ~Eval('active', True), 'invisible': Eval('type', 'goods') != 'goods', }, depends=['active', 'type'])
def dispatch(host, port, protocol, database_name, user, session, object_type, object_name, method, *args, **kwargs): Database = backend.get('Database') DatabaseOperationalError = backend.get('DatabaseOperationalError') if object_type == 'common': if method == 'login': try: database = Database(database_name).connect() cursor = database.cursor() cursor.close() except Exception: return False res = security.login(database_name, user, session) with Transaction().start(database_name, 0): Cache.clean(database_name) Cache.resets(database_name) msg = res and 'successful login' or 'bad login or password' logger.info('%s \'%s\' from %s:%d using %s on database \'%s\'', msg, user, host, port, protocol, database_name) return res or False elif method == 'logout': name = security.logout(database_name, user, session) logger.info('logout \'%s\' from %s:%d ' 'using %s on database \'%s\'', name, host, port, protocol, database_name) return True elif method == 'version': return __version__ elif method == 'list_lang': return [ ('bg_BG', 'Български'), ('ca_ES', 'Català'), ('cs_CZ', 'Čeština'), ('de_DE', 'Deutsch'), ('en_US', 'English'), ('es_AR', 'Español (Argentina)'), ('es_EC', 'Español (Ecuador)'), ('es_ES', 'Español (España)'), ('es_CO', 'Español (Colombia)'), ('es_MX', 'Español (México)'), ('fr_FR', 'Français'), ('hu_HU', 'Magyar'), ('it_IT', 'Italiano'), ('lt_LT', 'Lietuvių'), ('nl_NL', 'Nederlands'), ('pt_BR', 'Português (Brasil)'), ('ru_RU', 'Russian'), ('sl_SI', 'Slovenščina'), ] elif method == 'db_exist': try: database = Database(*args, **kwargs).connect() cursor = database.cursor() cursor.close(close=True) return True except Exception: return False elif method == 'list': if not config.getboolean('database', 'list'): raise Exception('AccessDenied') with Transaction().start(None, 0, close=True) as transaction: return transaction.database.list(transaction.cursor) elif method == 'create': return create(*args, **kwargs) elif method == 'restore': return restore(*args, **kwargs) elif method == 'drop': return drop(*args, **kwargs) elif method == 'dump': return dump(*args, **kwargs) return elif object_type == 'system': database = Database(database_name).connect() database_list = Pool.database_list() pool = Pool(database_name) if database_name not in database_list: pool.init() if method == 'listMethods': res = [] for type in ('model', 'wizard', 'report'): for object_name, obj in pool.iterobject(type=type): for method in obj.__rpc__: res.append(type + '.' + object_name + '.' + method) return res elif method == 'methodSignature': return 'signatures not supported' elif method == 'methodHelp': res = [] args_list = args[0].split('.') object_type = args_list[0] object_name = '.'.join(args_list[1:-1]) method = args_list[-1] obj = pool.get(object_name, type=object_type) return pydoc.getdoc(getattr(obj, method)) for count in range(config.getint('database', 'retry'), -1, -1): try: user = security.check(database_name, user, session) except DatabaseOperationalError: if count: continue raise break database_list = Pool.database_list() pool = Pool(database_name) if database_name not in database_list: with Transaction().start(database_name, user, readonly=True) as transaction: pool.init() obj = pool.get(object_name, type=object_type) if method in obj.__rpc__: rpc = obj.__rpc__[method] else: raise UserError('Calling method %s on %s %s is not allowed!' % (method, object_type, object_name)) log_message = '%s.%s.%s(*%s, **%s) from %s@%s:%d/%s' log_args = (object_type, object_name, method, args, kwargs, user, host, port, database_name) logger.info(log_message, *log_args) for count in range(config.getint('database', 'retry'), -1, -1): with Transaction().start(database_name, user, readonly=rpc.readonly) as transaction: Cache.clean(database_name) try: c_args, c_kwargs, transaction.context, transaction.timestamp \ = rpc.convert(obj, *args, **kwargs) meth = getattr(obj, method) if (rpc.instantiate is None or not is_instance_method(obj, method)): result = rpc.result(meth(*c_args, **c_kwargs)) else: assert rpc.instantiate == 0 inst = c_args.pop(0) if hasattr(inst, method): result = rpc.result(meth(inst, *c_args, **c_kwargs)) else: result = [rpc.result(meth(i, *c_args, **c_kwargs)) for i in inst] if not rpc.readonly: transaction.cursor.commit() except DatabaseOperationalError: transaction.cursor.rollback() if count and not rpc.readonly: continue raise except (NotLogged, ConcurrencyException, UserError, UserWarning): logger.debug(log_message, *log_args, exc_info=True) transaction.cursor.rollback() raise except Exception: logger.error(log_message, *log_args, exc_info=True) transaction.cursor.rollback() raise Cache.resets(database_name) with Transaction().start(database_name, 0) as transaction: pool = Pool(database_name) Session = pool.get('ir.session') try: Session.reset(session) except DatabaseOperationalError: logger.debug('Reset session failed', exc_info=True) # Silently fail when reseting session transaction.cursor.rollback() else: transaction.cursor.commit() logger.debug('Result: %s', result) return result