Coverage for /home/martinb/.local/share/virtualenvs/camcops/lib/python3.6/site-packages/sqlalchemy/pool/impl.py : 31%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1# sqlalchemy/pool.py
2# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
3# <see AUTHORS file>
4#
5# This module is part of SQLAlchemy and is released under
6# the MIT License: http://www.opensource.org/licenses/mit-license.php
9"""Pool implementation classes.
11"""
13import traceback
14import weakref
16from .base import _ConnectionFairy
17from .base import _ConnectionRecord
18from .base import Pool
19from .. import exc
20from .. import util
21from ..util import chop_traceback
22from ..util import queue as sqla_queue
23from ..util import threading
26class QueuePool(Pool):
28 """A :class:`_pool.Pool`
29 that imposes a limit on the number of open connections.
31 :class:`.QueuePool` is the default pooling implementation used for
32 all :class:`_engine.Engine` objects, unless the SQLite dialect is in use.
34 """
36 def __init__(
37 self,
38 creator,
39 pool_size=5,
40 max_overflow=10,
41 timeout=30,
42 use_lifo=False,
43 **kw
44 ):
45 r"""
46 Construct a QueuePool.
48 :param creator: a callable function that returns a DB-API
49 connection object, same as that of :paramref:`_pool.Pool.creator`.
51 :param pool_size: The size of the pool to be maintained,
52 defaults to 5. This is the largest number of connections that
53 will be kept persistently in the pool. Note that the pool
54 begins with no connections; once this number of connections
55 is requested, that number of connections will remain.
56 ``pool_size`` can be set to 0 to indicate no size limit; to
57 disable pooling, use a :class:`~sqlalchemy.pool.NullPool`
58 instead.
60 :param max_overflow: The maximum overflow size of the
61 pool. When the number of checked-out connections reaches the
62 size set in pool_size, additional connections will be
63 returned up to this limit. When those additional connections
64 are returned to the pool, they are disconnected and
65 discarded. It follows then that the total number of
66 simultaneous connections the pool will allow is pool_size +
67 `max_overflow`, and the total number of "sleeping"
68 connections the pool will allow is pool_size. `max_overflow`
69 can be set to -1 to indicate no overflow limit; no limit
70 will be placed on the total number of concurrent
71 connections. Defaults to 10.
73 :param timeout: The number of seconds to wait before giving up
74 on returning a connection. Defaults to 30.
76 :param use_lifo: use LIFO (last-in-first-out) when retrieving
77 connections instead of FIFO (first-in-first-out). Using LIFO, a
78 server-side timeout scheme can reduce the number of connections used
79 during non-peak periods of use. When planning for server-side
80 timeouts, ensure that a recycle or pre-ping strategy is in use to
81 gracefully handle stale connections.
83 .. versionadded:: 1.3
85 .. seealso::
87 :ref:`pool_use_lifo`
89 :ref:`pool_disconnects`
91 :param \**kw: Other keyword arguments including
92 :paramref:`_pool.Pool.recycle`, :paramref:`_pool.Pool.echo`,
93 :paramref:`_pool.Pool.reset_on_return` and others are passed to the
94 :class:`_pool.Pool` constructor.
96 """
97 Pool.__init__(self, creator, **kw)
98 self._pool = sqla_queue.Queue(pool_size, use_lifo=use_lifo)
99 self._overflow = 0 - pool_size
100 self._max_overflow = max_overflow
101 self._timeout = timeout
102 self._overflow_lock = threading.Lock()
104 def _do_return_conn(self, conn):
105 try:
106 self._pool.put(conn, False)
107 except sqla_queue.Full:
108 try:
109 conn.close()
110 finally:
111 self._dec_overflow()
113 def _do_get(self):
114 use_overflow = self._max_overflow > -1
116 try:
117 wait = use_overflow and self._overflow >= self._max_overflow
118 return self._pool.get(wait, self._timeout)
119 except sqla_queue.Empty:
120 # don't do things inside of "except Empty", because when we say
121 # we timed out or can't connect and raise, Python 3 tells
122 # people the real error is queue.Empty which it isn't.
123 pass
124 if use_overflow and self._overflow >= self._max_overflow:
125 if not wait:
126 return self._do_get()
127 else:
128 raise exc.TimeoutError(
129 "QueuePool limit of size %d overflow %d reached, "
130 "connection timed out, timeout %d"
131 % (self.size(), self.overflow(), self._timeout),
132 code="3o7r",
133 )
135 if self._inc_overflow():
136 try:
137 return self._create_connection()
138 except:
139 with util.safe_reraise():
140 self._dec_overflow()
141 else:
142 return self._do_get()
144 def _inc_overflow(self):
145 if self._max_overflow == -1:
146 self._overflow += 1
147 return True
148 with self._overflow_lock:
149 if self._overflow < self._max_overflow:
150 self._overflow += 1
151 return True
152 else:
153 return False
155 def _dec_overflow(self):
156 if self._max_overflow == -1:
157 self._overflow -= 1
158 return True
159 with self._overflow_lock:
160 self._overflow -= 1
161 return True
163 def recreate(self):
164 self.logger.info("Pool recreating")
165 return self.__class__(
166 self._creator,
167 pool_size=self._pool.maxsize,
168 max_overflow=self._max_overflow,
169 timeout=self._timeout,
170 recycle=self._recycle,
171 echo=self.echo,
172 logging_name=self._orig_logging_name,
173 use_threadlocal=self._use_threadlocal,
174 reset_on_return=self._reset_on_return,
175 _dispatch=self.dispatch,
176 dialect=self._dialect,
177 )
179 def dispose(self):
180 while True:
181 try:
182 conn = self._pool.get(False)
183 conn.close()
184 except sqla_queue.Empty:
185 break
187 self._overflow = 0 - self.size()
188 self.logger.info("Pool disposed. %s", self.status())
190 def status(self):
191 return (
192 "Pool size: %d Connections in pool: %d "
193 "Current Overflow: %d Current Checked out "
194 "connections: %d"
195 % (
196 self.size(),
197 self.checkedin(),
198 self.overflow(),
199 self.checkedout(),
200 )
201 )
203 def size(self):
204 return self._pool.maxsize
206 def timeout(self):
207 return self._timeout
209 def checkedin(self):
210 return self._pool.qsize()
212 def overflow(self):
213 return self._overflow
215 def checkedout(self):
216 return self._pool.maxsize - self._pool.qsize() + self._overflow
219class NullPool(Pool):
221 """A Pool which does not pool connections.
223 Instead it literally opens and closes the underlying DB-API connection
224 per each connection open/close.
226 Reconnect-related functions such as ``recycle`` and connection
227 invalidation are not supported by this Pool implementation, since
228 no connections are held persistently.
230 """
232 def status(self):
233 return "NullPool"
235 def _do_return_conn(self, conn):
236 conn.close()
238 def _do_get(self):
239 return self._create_connection()
241 def recreate(self):
242 self.logger.info("Pool recreating")
244 return self.__class__(
245 self._creator,
246 recycle=self._recycle,
247 echo=self.echo,
248 logging_name=self._orig_logging_name,
249 use_threadlocal=self._use_threadlocal,
250 reset_on_return=self._reset_on_return,
251 _dispatch=self.dispatch,
252 dialect=self._dialect,
253 )
255 def dispose(self):
256 pass
259class SingletonThreadPool(Pool):
261 """A Pool that maintains one connection per thread.
263 Maintains one connection per each thread, never moving a connection to a
264 thread other than the one which it was created in.
266 .. warning:: the :class:`.SingletonThreadPool` will call ``.close()``
267 on arbitrary connections that exist beyond the size setting of
268 ``pool_size``, e.g. if more unique **thread identities**
269 than what ``pool_size`` states are used. This cleanup is
270 non-deterministic and not sensitive to whether or not the connections
271 linked to those thread identities are currently in use.
273 :class:`.SingletonThreadPool` may be improved in a future release,
274 however in its current status it is generally used only for test
275 scenarios using a SQLite ``:memory:`` database and is not recommended
276 for production use.
279 Options are the same as those of :class:`_pool.Pool`, as well as:
281 :param pool_size: The number of threads in which to maintain connections
282 at once. Defaults to five.
284 :class:`.SingletonThreadPool` is used by the SQLite dialect
285 automatically when a memory-based database is used.
286 See :ref:`sqlite_toplevel`.
288 """
290 def __init__(self, creator, pool_size=5, **kw):
291 Pool.__init__(self, creator, **kw)
292 self._conn = threading.local()
293 self._fairy = threading.local()
294 self._all_conns = set()
295 self.size = pool_size
297 def recreate(self):
298 self.logger.info("Pool recreating")
299 return self.__class__(
300 self._creator,
301 pool_size=self.size,
302 recycle=self._recycle,
303 echo=self.echo,
304 logging_name=self._orig_logging_name,
305 use_threadlocal=self._use_threadlocal,
306 reset_on_return=self._reset_on_return,
307 _dispatch=self.dispatch,
308 dialect=self._dialect,
309 )
311 def dispose(self):
312 """Dispose of this pool."""
314 for conn in self._all_conns:
315 try:
316 conn.close()
317 except Exception:
318 # pysqlite won't even let you close a conn from a thread
319 # that didn't create it
320 pass
322 self._all_conns.clear()
324 def _cleanup(self):
325 while len(self._all_conns) >= self.size:
326 c = self._all_conns.pop()
327 c.close()
329 def status(self):
330 return "SingletonThreadPool id:%d size: %d" % (
331 id(self),
332 len(self._all_conns),
333 )
335 def _do_return_conn(self, conn):
336 pass
338 def _do_get(self):
339 try:
340 c = self._conn.current()
341 if c:
342 return c
343 except AttributeError:
344 pass
345 c = self._create_connection()
346 self._conn.current = weakref.ref(c)
347 if len(self._all_conns) >= self.size:
348 self._cleanup()
349 self._all_conns.add(c)
350 return c
352 def connect(self):
353 # vendored from Pool to include use_threadlocal behavior
354 try:
355 rec = self._fairy.current()
356 except AttributeError:
357 pass
358 else:
359 if rec is not None:
360 return rec._checkout_existing()
362 return _ConnectionFairy._checkout(self, self._fairy)
364 def _return_conn(self, record):
365 try:
366 del self._fairy.current
367 except AttributeError:
368 pass
369 self._do_return_conn(record)
372class StaticPool(Pool):
374 """A Pool of exactly one connection, used for all requests.
376 Reconnect-related functions such as ``recycle`` and connection
377 invalidation (which is also used to support auto-reconnect) are not
378 currently supported by this Pool implementation but may be implemented
379 in a future release.
381 """
383 @util.memoized_property
384 def _conn(self):
385 return self._creator()
387 @util.memoized_property
388 def connection(self):
389 return _ConnectionRecord(self)
391 def status(self):
392 return "StaticPool"
394 def dispose(self):
395 if "_conn" in self.__dict__:
396 self._conn.close()
397 self._conn = None
399 def recreate(self):
400 self.logger.info("Pool recreating")
401 return self.__class__(
402 creator=self._creator,
403 recycle=self._recycle,
404 use_threadlocal=self._use_threadlocal,
405 reset_on_return=self._reset_on_return,
406 echo=self.echo,
407 logging_name=self._orig_logging_name,
408 _dispatch=self.dispatch,
409 dialect=self._dialect,
410 )
412 def _create_connection(self):
413 return self._conn
415 def _do_return_conn(self, conn):
416 pass
418 def _do_get(self):
419 return self.connection
422class AssertionPool(Pool):
424 """A :class:`_pool.Pool` that allows at most one checked out connection at
425 any given time.
427 This will raise an exception if more than one connection is checked out
428 at a time. Useful for debugging code that is using more connections
429 than desired.
431 """
433 def __init__(self, *args, **kw):
434 self._conn = None
435 self._checked_out = False
436 self._store_traceback = kw.pop("store_traceback", True)
437 self._checkout_traceback = None
438 Pool.__init__(self, *args, **kw)
440 def status(self):
441 return "AssertionPool"
443 def _do_return_conn(self, conn):
444 if not self._checked_out:
445 raise AssertionError("connection is not checked out")
446 self._checked_out = False
447 assert conn is self._conn
449 def dispose(self):
450 self._checked_out = False
451 if self._conn:
452 self._conn.close()
454 def recreate(self):
455 self.logger.info("Pool recreating")
456 return self.__class__(
457 self._creator,
458 echo=self.echo,
459 logging_name=self._orig_logging_name,
460 _dispatch=self.dispatch,
461 dialect=self._dialect,
462 )
464 def _do_get(self):
465 if self._checked_out:
466 if self._checkout_traceback:
467 suffix = " at:\n%s" % "".join(
468 chop_traceback(self._checkout_traceback)
469 )
470 else:
471 suffix = ""
472 raise AssertionError("connection is already checked out" + suffix)
474 if not self._conn:
475 self._conn = self._create_connection()
477 self._checked_out = True
478 if self._store_traceback:
479 self._checkout_traceback = traceback.format_stack()
480 return self._conn