mirror of
https://github.com/psycopg/psycopg2.git
synced 2024-11-29 12:23:42 +03:00
Merge commit '2_4_6'
This commit is contained in:
commit
7a1d1791d3
25
NEWS
25
NEWS
|
@ -1,3 +1,28 @@
|
||||||
|
What's new in psycopg 2.4.6
|
||||||
|
---------------------------
|
||||||
|
|
||||||
|
- Fixed 'cursor()' arguments propagation in connection subclasses
|
||||||
|
and overriding of the 'cursor_factory' argument. Thanks to
|
||||||
|
Corry Haines for the report and the initial patch (ticket #105).
|
||||||
|
- Dropped GIL release during string adaptation around a function call
|
||||||
|
invoking a Python API function, which could cause interpreter crash.
|
||||||
|
Thanks to Manu Cupcic for the report (ticket #110).
|
||||||
|
- Close a green connection if there is an error in the callback.
|
||||||
|
Maybe a harsh solution but it leaves the program responsive
|
||||||
|
(ticket #113).
|
||||||
|
- 'register_hstore()', 'register_composite()', 'tpc_recover()' work with
|
||||||
|
RealDictConnection and Cursor (ticket #114).
|
||||||
|
- Fixed broken pool for Zope and connections re-init across ZSQL methods
|
||||||
|
in the same request (tickets #123, #125, #142).
|
||||||
|
- connect() raises an exception instead of swallowing keyword arguments
|
||||||
|
when a connection string is specified as well (ticket #131).
|
||||||
|
- Discard any result produced by 'executemany()' (ticket #133).
|
||||||
|
- Fixed pickling of FixedOffsetTimezone objects (ticket #135).
|
||||||
|
- Release the GIL around PQgetResult calls after COPY (ticket #140).
|
||||||
|
- Fixed empty strings handling in composite caster (ticket #141).
|
||||||
|
- Fixed pickling of DictRow and RealDictRow objects.
|
||||||
|
|
||||||
|
|
||||||
What's new in psycopg 2.4.5
|
What's new in psycopg 2.4.5
|
||||||
---------------------------
|
---------------------------
|
||||||
|
|
||||||
|
|
|
@ -16,7 +16,7 @@
|
||||||
# their work without bothering about the module dependencies.
|
# their work without bothering about the module dependencies.
|
||||||
|
|
||||||
|
|
||||||
ALLOWED_PSYCOPG_VERSIONS = ('2.4-beta1', '2.4-beta2', '2.4', '2.4.1', '2.4.2', '2.4.3', '2.4.4', '2.4.5')
|
ALLOWED_PSYCOPG_VERSIONS = ('2.4', '2.4.1', '2.4.4', '2.4.5', '2.4.6')
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
|
|
|
@ -52,6 +52,11 @@ class DB(TM, dbi_db.DB):
|
||||||
# connection, so we avoid to (re)initialize it risking errors.
|
# connection, so we avoid to (re)initialize it risking errors.
|
||||||
conn = pool.getconn(self.dsn)
|
conn = pool.getconn(self.dsn)
|
||||||
if init:
|
if init:
|
||||||
|
# use set_session where available as in these versions
|
||||||
|
# set_isolation_level generates an extra query.
|
||||||
|
if psycopg2.__version__ >= '2.4.2':
|
||||||
|
conn.set_session(isolation_level=int(self.tilevel))
|
||||||
|
else:
|
||||||
conn.set_isolation_level(int(self.tilevel))
|
conn.set_isolation_level(int(self.tilevel))
|
||||||
conn.set_client_encoding(self.encoding)
|
conn.set_client_encoding(self.encoding)
|
||||||
for tc in self.typecasts:
|
for tc in self.typecasts:
|
||||||
|
@ -66,7 +71,7 @@ class DB(TM, dbi_db.DB):
|
||||||
pool.putconn(self.dsn, conn, close)
|
pool.putconn(self.dsn, conn, close)
|
||||||
|
|
||||||
def getcursor(self):
|
def getcursor(self):
|
||||||
conn = self.getconn()
|
conn = self.getconn(False)
|
||||||
return conn.cursor()
|
return conn.cursor()
|
||||||
|
|
||||||
def _finish(self, *ignored):
|
def _finish(self, *ignored):
|
||||||
|
|
|
@ -78,8 +78,10 @@ or DSN for short) is a string... (TODO: finish docs)
|
||||||
</td>
|
</td>
|
||||||
<td align="left" valign="top">
|
<td align="left" valign="top">
|
||||||
<select name="tilevel:int">
|
<select name="tilevel:int">
|
||||||
|
<option value="4">Read uncommitted</option>
|
||||||
<option value="1">Read committed</option>
|
<option value="1">Read committed</option>
|
||||||
<option value="2" selected="YES">Serializable</option>
|
<option value="2" selected="YES">Repeatable read</option>
|
||||||
|
<option value="3">Serializable</option>
|
||||||
</select>
|
</select>
|
||||||
</td>
|
</td>
|
||||||
</tr>
|
</tr>
|
||||||
|
|
|
@ -44,11 +44,17 @@
|
||||||
</td>
|
</td>
|
||||||
<td align="left" valign="top">
|
<td align="left" valign="top">
|
||||||
<select name="tilevel:int">
|
<select name="tilevel:int">
|
||||||
|
<option value="4"
|
||||||
|
<dtml-if expr="tilevel==4">selected="YES"</dtml-if>>
|
||||||
|
Read uncommitted</option>
|
||||||
<option value="1"
|
<option value="1"
|
||||||
<dtml-if expr="tilevel==1">selected="YES"</dtml-if>>
|
<dtml-if expr="tilevel==1">selected="YES"</dtml-if>>
|
||||||
Read committed</option>
|
Read committed</option>
|
||||||
<option value="2"
|
<option value="2"
|
||||||
<dtml-if expr="tilevel==2">selected="YES"</dtml-if>>
|
<dtml-if expr="tilevel==2">selected="YES"</dtml-if>>
|
||||||
|
Repeatable read</option>
|
||||||
|
<option value="3"
|
||||||
|
<dtml-if expr="tilevel==3">selected="YES"</dtml-if>>
|
||||||
Serializable</option>
|
Serializable</option>
|
||||||
</select>
|
</select>
|
||||||
</td>
|
</td>
|
||||||
|
|
|
@ -19,7 +19,151 @@
|
||||||
# ZPsycopgDA code in db.py.
|
# ZPsycopgDA code in db.py.
|
||||||
|
|
||||||
import threading
|
import threading
|
||||||
import psycopg2.pool
|
import psycopg2
|
||||||
|
from psycopg2.pool import PoolError
|
||||||
|
|
||||||
|
|
||||||
|
class AbstractConnectionPool(object):
|
||||||
|
"""Generic key-based pooling code."""
|
||||||
|
|
||||||
|
def __init__(self, minconn, maxconn, *args, **kwargs):
|
||||||
|
"""Initialize the connection pool.
|
||||||
|
|
||||||
|
New 'minconn' connections are created immediately calling 'connfunc'
|
||||||
|
with given parameters. The connection pool will support a maximum of
|
||||||
|
about 'maxconn' connections.
|
||||||
|
"""
|
||||||
|
self.minconn = minconn
|
||||||
|
self.maxconn = maxconn
|
||||||
|
self.closed = False
|
||||||
|
|
||||||
|
self._args = args
|
||||||
|
self._kwargs = kwargs
|
||||||
|
|
||||||
|
self._pool = []
|
||||||
|
self._used = {}
|
||||||
|
self._rused = {} # id(conn) -> key map
|
||||||
|
self._keys = 0
|
||||||
|
|
||||||
|
for i in range(self.minconn):
|
||||||
|
self._connect()
|
||||||
|
|
||||||
|
def _connect(self, key=None):
|
||||||
|
"""Create a new connection and assign it to 'key' if not None."""
|
||||||
|
conn = psycopg2.connect(*self._args, **self._kwargs)
|
||||||
|
if key is not None:
|
||||||
|
self._used[key] = conn
|
||||||
|
self._rused[id(conn)] = key
|
||||||
|
else:
|
||||||
|
self._pool.append(conn)
|
||||||
|
return conn
|
||||||
|
|
||||||
|
def _getkey(self):
|
||||||
|
"""Return a new unique key."""
|
||||||
|
self._keys += 1
|
||||||
|
return self._keys
|
||||||
|
|
||||||
|
def _getconn(self, key=None):
|
||||||
|
"""Get a free connection and assign it to 'key' if not None."""
|
||||||
|
if self.closed: raise PoolError("connection pool is closed")
|
||||||
|
if key is None: key = self._getkey()
|
||||||
|
|
||||||
|
if key in self._used:
|
||||||
|
return self._used[key]
|
||||||
|
|
||||||
|
if self._pool:
|
||||||
|
self._used[key] = conn = self._pool.pop()
|
||||||
|
self._rused[id(conn)] = key
|
||||||
|
return conn
|
||||||
|
else:
|
||||||
|
if len(self._used) == self.maxconn:
|
||||||
|
raise PoolError("connection pool exausted")
|
||||||
|
return self._connect(key)
|
||||||
|
|
||||||
|
def _putconn(self, conn, key=None, close=False):
|
||||||
|
"""Put away a connection."""
|
||||||
|
if self.closed: raise PoolError("connection pool is closed")
|
||||||
|
if key is None: key = self._rused[id(conn)]
|
||||||
|
|
||||||
|
if not key:
|
||||||
|
raise PoolError("trying to put unkeyed connection")
|
||||||
|
|
||||||
|
if len(self._pool) < self.minconn and not close:
|
||||||
|
self._pool.append(conn)
|
||||||
|
else:
|
||||||
|
conn.close()
|
||||||
|
|
||||||
|
# here we check for the presence of key because it can happen that a
|
||||||
|
# thread tries to put back a connection after a call to close
|
||||||
|
if not self.closed or key in self._used:
|
||||||
|
del self._used[key]
|
||||||
|
del self._rused[id(conn)]
|
||||||
|
|
||||||
|
def _closeall(self):
|
||||||
|
"""Close all connections.
|
||||||
|
|
||||||
|
Note that this can lead to some code fail badly when trying to use
|
||||||
|
an already closed connection. If you call .closeall() make sure
|
||||||
|
your code can deal with it.
|
||||||
|
"""
|
||||||
|
if self.closed: raise PoolError("connection pool is closed")
|
||||||
|
for conn in self._pool + list(self._used.values()):
|
||||||
|
try:
|
||||||
|
conn.close()
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
self.closed = True
|
||||||
|
|
||||||
|
|
||||||
|
class PersistentConnectionPool(AbstractConnectionPool):
|
||||||
|
"""A pool that assigns persistent connections to different threads.
|
||||||
|
|
||||||
|
Note that this connection pool generates by itself the required keys
|
||||||
|
using the current thread id. This means that until a thread puts away
|
||||||
|
a connection it will always get the same connection object by successive
|
||||||
|
`!getconn()` calls. This also means that a thread can't use more than one
|
||||||
|
single connection from the pool.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, minconn, maxconn, *args, **kwargs):
|
||||||
|
"""Initialize the threading lock."""
|
||||||
|
import threading
|
||||||
|
AbstractConnectionPool.__init__(
|
||||||
|
self, minconn, maxconn, *args, **kwargs)
|
||||||
|
self._lock = threading.Lock()
|
||||||
|
|
||||||
|
# we we'll need the thread module, to determine thread ids, so we
|
||||||
|
# import it here and copy it in an instance variable
|
||||||
|
import thread
|
||||||
|
self.__thread = thread
|
||||||
|
|
||||||
|
def getconn(self):
|
||||||
|
"""Generate thread id and return a connection."""
|
||||||
|
key = self.__thread.get_ident()
|
||||||
|
self._lock.acquire()
|
||||||
|
try:
|
||||||
|
return self._getconn(key)
|
||||||
|
finally:
|
||||||
|
self._lock.release()
|
||||||
|
|
||||||
|
def putconn(self, conn=None, close=False):
|
||||||
|
"""Put away an unused connection."""
|
||||||
|
key = self.__thread.get_ident()
|
||||||
|
self._lock.acquire()
|
||||||
|
try:
|
||||||
|
if not conn: conn = self._used[key]
|
||||||
|
self._putconn(conn, key, close)
|
||||||
|
finally:
|
||||||
|
self._lock.release()
|
||||||
|
|
||||||
|
def closeall(self):
|
||||||
|
"""Close all connections (even the one currently in use.)"""
|
||||||
|
self._lock.acquire()
|
||||||
|
try:
|
||||||
|
self._closeall()
|
||||||
|
finally:
|
||||||
|
self._lock.release()
|
||||||
|
|
||||||
|
|
||||||
_connections_pool = {}
|
_connections_pool = {}
|
||||||
_connections_lock = threading.Lock()
|
_connections_lock = threading.Lock()
|
||||||
|
@ -29,7 +173,7 @@ def getpool(dsn, create=True):
|
||||||
try:
|
try:
|
||||||
if not _connections_pool.has_key(dsn) and create:
|
if not _connections_pool.has_key(dsn) and create:
|
||||||
_connections_pool[dsn] = \
|
_connections_pool[dsn] = \
|
||||||
psycopg2.pool.PersistentConnectionPool(4, 200, dsn)
|
PersistentConnectionPool(4, 200, dsn)
|
||||||
finally:
|
finally:
|
||||||
_connections_lock.release()
|
_connections_lock.release()
|
||||||
return _connections_pool[dsn]
|
return _connections_pool[dsn]
|
||||||
|
|
|
@ -61,9 +61,15 @@ The ``connection`` class
|
||||||
|
|
||||||
.. method:: commit()
|
.. method:: commit()
|
||||||
|
|
||||||
Commit any pending transaction to the database. Psycopg can be set to
|
Commit any pending transaction to the database.
|
||||||
perform automatic commits at each operation, see
|
|
||||||
`~connection.set_isolation_level()`.
|
By default, Psycopg opens a transaction before executing the first
|
||||||
|
command: if `!commit()` is not called, the effect of any data
|
||||||
|
manipulation will be lost.
|
||||||
|
|
||||||
|
The connection can be also set in "autocommit" mode: no transaction is
|
||||||
|
automatically open, commands have immediate effect. See
|
||||||
|
:ref:`transactions-control` for details.
|
||||||
|
|
||||||
|
|
||||||
.. index::
|
.. index::
|
||||||
|
|
|
@ -154,6 +154,15 @@ can be enabled using the `register_hstore()` function.
|
||||||
|
|
||||||
.. autofunction:: register_hstore
|
.. autofunction:: register_hstore
|
||||||
|
|
||||||
|
.. versionchanged:: 2.4
|
||||||
|
added the *oid* parameter. If not specified, the typecaster is
|
||||||
|
installed also if |hstore| is not installed in the :sql:`public`
|
||||||
|
schema.
|
||||||
|
|
||||||
|
.. versionchanged:: 2.4.3
|
||||||
|
added support for |hstore| array.
|
||||||
|
|
||||||
|
|
||||||
.. |hstore| replace:: :sql:`hstore`
|
.. |hstore| replace:: :sql:`hstore`
|
||||||
.. _hstore: http://www.postgresql.org/docs/current/static/hstore.html
|
.. _hstore: http://www.postgresql.org/docs/current/static/hstore.html
|
||||||
|
|
||||||
|
|
|
@ -7,7 +7,7 @@ import sys
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
if len(sys.argv) != 3:
|
if len(sys.argv) != 3:
|
||||||
print >>sys.stderr, "usage: %s index.rst text-dir"
|
sys.stderr.write("usage: %s index.rst text-dir\n")
|
||||||
return 2
|
return 2
|
||||||
|
|
||||||
_, index, txt_dir = sys.argv
|
_, index, txt_dir = sys.argv
|
||||||
|
@ -18,7 +18,11 @@ def main():
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
def iter_file_base(fn):
|
def iter_file_base(fn):
|
||||||
have_line = iter(open(fn)).next
|
f = open(fn)
|
||||||
|
if sys.version_info[0] >= 3:
|
||||||
|
have_line = iter(f).__next__
|
||||||
|
else:
|
||||||
|
have_line = iter(f).next
|
||||||
|
|
||||||
while not have_line().startswith('.. toctree'):
|
while not have_line().startswith('.. toctree'):
|
||||||
pass
|
pass
|
||||||
|
@ -28,7 +32,7 @@ def iter_file_base(fn):
|
||||||
yield os.path.splitext(os.path.basename(fn))[0]
|
yield os.path.splitext(os.path.basename(fn))[0]
|
||||||
|
|
||||||
n = 0
|
n = 0
|
||||||
while 1:
|
while True:
|
||||||
line = have_line()
|
line = have_line()
|
||||||
if line.isspace():
|
if line.isspace():
|
||||||
continue
|
continue
|
||||||
|
@ -37,18 +41,21 @@ def iter_file_base(fn):
|
||||||
n += 1
|
n += 1
|
||||||
yield line.strip()
|
yield line.strip()
|
||||||
|
|
||||||
|
f.close()
|
||||||
|
|
||||||
if n < 5:
|
if n < 5:
|
||||||
# maybe format changed?
|
# maybe format changed?
|
||||||
raise Exception("Not enough files found. Format change in index.rst?")
|
raise Exception("Not enough files found. Format change in index.rst?")
|
||||||
|
|
||||||
def emit(basename, txt_dir):
|
def emit(basename, txt_dir):
|
||||||
for line in open(os.path.join(txt_dir, basename + ".txt")):
|
f = open(os.path.join(txt_dir, basename + ".txt"))
|
||||||
|
for line in f:
|
||||||
line = line.replace("``", "'")
|
line = line.replace("``", "'")
|
||||||
sys.stdout.write(line)
|
sys.stdout.write(line)
|
||||||
|
f.close()
|
||||||
|
|
||||||
# some space between sections
|
# some space between sections
|
||||||
print
|
sys.stdout.write("\n\n")
|
||||||
print
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|
|
@ -97,6 +97,9 @@ many placeholders can use the same values::
|
||||||
... VALUES (%(int)s, %(date)s, %(date)s, %(str)s);""",
|
... VALUES (%(int)s, %(date)s, %(date)s, %(str)s);""",
|
||||||
... {'int': 10, 'str': "O'Reilly", 'date': datetime.date(2005, 11, 18)})
|
... {'int': 10, 'str': "O'Reilly", 'date': datetime.date(2005, 11, 18)})
|
||||||
|
|
||||||
|
When parameters are used, in order to include a literal ``%`` in the query you
|
||||||
|
can use the ``%%`` string.
|
||||||
|
|
||||||
While the mechanism resembles regular Python strings manipulation, there are a
|
While the mechanism resembles regular Python strings manipulation, there are a
|
||||||
few subtle differences you should care about when passing parameters to a
|
few subtle differences you should care about when passing parameters to a
|
||||||
query:
|
query:
|
||||||
|
@ -196,7 +199,7 @@ argument of the `~cursor.execute()` method::
|
||||||
Adaptation of Python values to SQL types
|
Adaptation of Python values to SQL types
|
||||||
----------------------------------------
|
----------------------------------------
|
||||||
|
|
||||||
Many standards Python types are adapted into SQL and returned as Python
|
Many standard Python types are adapted into SQL and returned as Python
|
||||||
objects when a query is executed.
|
objects when a query is executed.
|
||||||
|
|
||||||
If you need to convert other Python types to and from PostgreSQL data types,
|
If you need to convert other Python types to and from PostgreSQL data types,
|
||||||
|
@ -513,12 +516,12 @@ issued by all the cursors created by the same connection. Should any command
|
||||||
fail, the transaction will be aborted and no further command will be executed
|
fail, the transaction will be aborted and no further command will be executed
|
||||||
until a call to the `~connection.rollback()` method.
|
until a call to the `~connection.rollback()` method.
|
||||||
|
|
||||||
The connection is responsible to terminate its transaction, calling either the
|
The connection is responsible for terminating its transaction, calling either
|
||||||
`~connection.commit()` or `~connection.rollback()` method. Committed
|
the `~connection.commit()` or `~connection.rollback()` method. Committed
|
||||||
changes are immediately made persistent into the database. Closing the
|
changes are immediately made persistent into the database. Closing the
|
||||||
connection using the `~connection.close()` method or destroying the
|
connection using the `~connection.close()` method or destroying the
|
||||||
connection object (using `!del` or letting it fall out of scope)
|
connection object (using `!del` or letting it fall out of scope)
|
||||||
will result in an implicit `!rollback()` call.
|
will result in an implicit rollback.
|
||||||
|
|
||||||
It is possible to set the connection in *autocommit* mode: this way all the
|
It is possible to set the connection in *autocommit* mode: this way all the
|
||||||
commands executed will be immediately committed and no rollback is possible. A
|
commands executed will be immediately committed and no rollback is possible. A
|
||||||
|
|
|
@ -146,13 +146,9 @@ def connect(dsn=None,
|
||||||
Using *async*=True an asynchronous connection will be created.
|
Using *async*=True an asynchronous connection will be created.
|
||||||
|
|
||||||
Any other keyword parameter will be passed to the underlying client
|
Any other keyword parameter will be passed to the underlying client
|
||||||
library: the list of supported parameter depends on the library version.
|
library: the list of supported parameters depends on the library version.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
if dsn is None:
|
|
||||||
# Note: reproducing the behaviour of the previous C implementation:
|
|
||||||
# keyword are silently swallowed if a DSN is specified. I would have
|
|
||||||
# raised an exception. File under "histerical raisins".
|
|
||||||
items = []
|
items = []
|
||||||
if database is not None:
|
if database is not None:
|
||||||
items.append(('dbname', database))
|
items.append(('dbname', database))
|
||||||
|
@ -162,21 +158,24 @@ def connect(dsn=None,
|
||||||
items.append(('password', password))
|
items.append(('password', password))
|
||||||
if host is not None:
|
if host is not None:
|
||||||
items.append(('host', host))
|
items.append(('host', host))
|
||||||
# Reproducing the previous C implementation behaviour: swallow a
|
if port is not None:
|
||||||
# negative port. The libpq would raise an exception for it.
|
|
||||||
if port is not None and int(port) > 0:
|
|
||||||
items.append(('port', port))
|
items.append(('port', port))
|
||||||
|
|
||||||
items.extend(
|
items.extend([(k, v) for (k, v) in kwargs.iteritems() if v is not None])
|
||||||
[(k, v) for (k, v) in kwargs.iteritems() if v is not None])
|
|
||||||
|
if dsn is not None and items:
|
||||||
|
raise TypeError(
|
||||||
|
"'%s' is an invalid keyword argument when the dsn is specified"
|
||||||
|
% items[0][0])
|
||||||
|
|
||||||
|
if dsn is None:
|
||||||
|
if not items:
|
||||||
|
raise TypeError('missing dsn and no parameters')
|
||||||
|
else:
|
||||||
dsn = " ".join(["%s=%s" % (k, _param_escape(str(v)))
|
dsn = " ".join(["%s=%s" % (k, _param_escape(str(v)))
|
||||||
for (k, v) in items])
|
for (k, v) in items])
|
||||||
|
|
||||||
if not dsn:
|
return _connect(dsn, connection_factory=connection_factory, async=async)
|
||||||
raise InterfaceError('missing dsn and no parameters')
|
|
||||||
|
|
||||||
return _connect(dsn,
|
|
||||||
connection_factory=connection_factory, async=async)
|
|
||||||
|
|
||||||
|
|
||||||
__all__ = filter(lambda k: not k.startswith('_'), locals().keys())
|
__all__ = filter(lambda k: not k.startswith('_'), locals().keys())
|
||||||
|
|
|
@ -116,9 +116,9 @@ def register_adapter(typ, callable):
|
||||||
# The SQL_IN class is the official adapter for tuples starting from 2.0.6.
|
# The SQL_IN class is the official adapter for tuples starting from 2.0.6.
|
||||||
class SQL_IN(object):
|
class SQL_IN(object):
|
||||||
"""Adapt any iterable to an SQL quotable object."""
|
"""Adapt any iterable to an SQL quotable object."""
|
||||||
|
|
||||||
def __init__(self, seq):
|
def __init__(self, seq):
|
||||||
self._seq = seq
|
self._seq = seq
|
||||||
|
self._conn = None
|
||||||
|
|
||||||
def prepare(self, conn):
|
def prepare(self, conn):
|
||||||
self._conn = conn
|
self._conn = conn
|
||||||
|
@ -127,6 +127,7 @@ class SQL_IN(object):
|
||||||
# this is the important line: note how every object in the
|
# this is the important line: note how every object in the
|
||||||
# list is adapted and then how getquoted() is called on it
|
# list is adapted and then how getquoted() is called on it
|
||||||
pobjs = [adapt(o) for o in self._seq]
|
pobjs = [adapt(o) for o in self._seq]
|
||||||
|
if self._conn is not None:
|
||||||
for obj in pobjs:
|
for obj in pobjs:
|
||||||
if hasattr(obj, 'prepare'):
|
if hasattr(obj, 'prepare'):
|
||||||
obj.prepare(self._conn)
|
obj.prepare(self._conn)
|
||||||
|
|
142
lib/extras.py
142
lib/extras.py
|
@ -54,46 +54,46 @@ class DictCursorBase(_cursor):
|
||||||
else:
|
else:
|
||||||
raise NotImplementedError(
|
raise NotImplementedError(
|
||||||
"DictCursorBase can't be instantiated without a row factory.")
|
"DictCursorBase can't be instantiated without a row factory.")
|
||||||
_cursor.__init__(self, *args, **kwargs)
|
super(DictCursorBase, self).__init__(*args, **kwargs)
|
||||||
self._query_executed = 0
|
self._query_executed = 0
|
||||||
self._prefetch = 0
|
self._prefetch = 0
|
||||||
self.row_factory = row_factory
|
self.row_factory = row_factory
|
||||||
|
|
||||||
def fetchone(self):
|
def fetchone(self):
|
||||||
if self._prefetch:
|
if self._prefetch:
|
||||||
res = _cursor.fetchone(self)
|
res = super(DictCursorBase, self).fetchone()
|
||||||
if self._query_executed:
|
if self._query_executed:
|
||||||
self._build_index()
|
self._build_index()
|
||||||
if not self._prefetch:
|
if not self._prefetch:
|
||||||
res = _cursor.fetchone(self)
|
res = super(DictCursorBase, self).fetchone()
|
||||||
return res
|
return res
|
||||||
|
|
||||||
def fetchmany(self, size=None):
|
def fetchmany(self, size=None):
|
||||||
if self._prefetch:
|
if self._prefetch:
|
||||||
res = _cursor.fetchmany(self, size)
|
res = super(DictCursorBase, self).fetchmany(size)
|
||||||
if self._query_executed:
|
if self._query_executed:
|
||||||
self._build_index()
|
self._build_index()
|
||||||
if not self._prefetch:
|
if not self._prefetch:
|
||||||
res = _cursor.fetchmany(self, size)
|
res = super(DictCursorBase, self).fetchmany(size)
|
||||||
return res
|
return res
|
||||||
|
|
||||||
def fetchall(self):
|
def fetchall(self):
|
||||||
if self._prefetch:
|
if self._prefetch:
|
||||||
res = _cursor.fetchall(self)
|
res = super(DictCursorBase, self).fetchall()
|
||||||
if self._query_executed:
|
if self._query_executed:
|
||||||
self._build_index()
|
self._build_index()
|
||||||
if not self._prefetch:
|
if not self._prefetch:
|
||||||
res = _cursor.fetchall(self)
|
res = super(DictCursorBase, self).fetchall()
|
||||||
return res
|
return res
|
||||||
|
|
||||||
def __iter__(self):
|
def __iter__(self):
|
||||||
if self._prefetch:
|
if self._prefetch:
|
||||||
res = _cursor.__iter__(self)
|
res = super(DictCursorBase, self).__iter__()
|
||||||
first = res.next()
|
first = res.next()
|
||||||
if self._query_executed:
|
if self._query_executed:
|
||||||
self._build_index()
|
self._build_index()
|
||||||
if not self._prefetch:
|
if not self._prefetch:
|
||||||
res = _cursor.__iter__(self)
|
res = super(DictCursorBase, self).__iter__()
|
||||||
first = res.next()
|
first = res.next()
|
||||||
|
|
||||||
yield first
|
yield first
|
||||||
|
@ -103,29 +103,27 @@ class DictCursorBase(_cursor):
|
||||||
|
|
||||||
class DictConnection(_connection):
|
class DictConnection(_connection):
|
||||||
"""A connection that uses `DictCursor` automatically."""
|
"""A connection that uses `DictCursor` automatically."""
|
||||||
def cursor(self, name=None):
|
def cursor(self, *args, **kwargs):
|
||||||
if name is None:
|
kwargs.setdefault('cursor_factory', DictCursor)
|
||||||
return _connection.cursor(self, cursor_factory=DictCursor)
|
return super(DictConnection, self).cursor(*args, **kwargs)
|
||||||
else:
|
|
||||||
return _connection.cursor(self, name, cursor_factory=DictCursor)
|
|
||||||
|
|
||||||
class DictCursor(DictCursorBase):
|
class DictCursor(DictCursorBase):
|
||||||
"""A cursor that keeps a list of column name -> index mappings."""
|
"""A cursor that keeps a list of column name -> index mappings."""
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
kwargs['row_factory'] = DictRow
|
kwargs['row_factory'] = DictRow
|
||||||
DictCursorBase.__init__(self, *args, **kwargs)
|
super(DictCursor, self).__init__(*args, **kwargs)
|
||||||
self._prefetch = 1
|
self._prefetch = 1
|
||||||
|
|
||||||
def execute(self, query, vars=None):
|
def execute(self, query, vars=None):
|
||||||
self.index = {}
|
self.index = {}
|
||||||
self._query_executed = 1
|
self._query_executed = 1
|
||||||
return _cursor.execute(self, query, vars)
|
return super(DictCursor, self).execute(query, vars)
|
||||||
|
|
||||||
def callproc(self, procname, vars=None):
|
def callproc(self, procname, vars=None):
|
||||||
self.index = {}
|
self.index = {}
|
||||||
self._query_executed = 1
|
self._query_executed = 1
|
||||||
return _cursor.callproc(self, procname, vars)
|
return super(DictCursor, self).callproc(procname, vars)
|
||||||
|
|
||||||
def _build_index(self):
|
def _build_index(self):
|
||||||
if self._query_executed == 1 and self.description:
|
if self._query_executed == 1 and self.description:
|
||||||
|
@ -186,7 +184,14 @@ class DictRow(list):
|
||||||
def __contains__(self, x):
|
def __contains__(self, x):
|
||||||
return x in self._index
|
return x in self._index
|
||||||
|
|
||||||
# grop the crusty Py2 methods
|
def __getstate__(self):
|
||||||
|
return self[:], self._index.copy()
|
||||||
|
|
||||||
|
def __setstate__(self, data):
|
||||||
|
self[:] = data[0]
|
||||||
|
self._index = data[1]
|
||||||
|
|
||||||
|
# drop the crusty Py2 methods
|
||||||
if sys.version_info[0] > 2:
|
if sys.version_info[0] > 2:
|
||||||
items = iteritems; del iteritems
|
items = iteritems; del iteritems
|
||||||
keys = iterkeys; del iterkeys
|
keys = iterkeys; del iterkeys
|
||||||
|
@ -196,11 +201,9 @@ class DictRow(list):
|
||||||
|
|
||||||
class RealDictConnection(_connection):
|
class RealDictConnection(_connection):
|
||||||
"""A connection that uses `RealDictCursor` automatically."""
|
"""A connection that uses `RealDictCursor` automatically."""
|
||||||
def cursor(self, name=None):
|
def cursor(self, *args, **kwargs):
|
||||||
if name is None:
|
kwargs.setdefault('cursor_factory', RealDictCursor)
|
||||||
return _connection.cursor(self, cursor_factory=RealDictCursor)
|
return super(RealDictConnection, self).cursor(*args, **kwargs)
|
||||||
else:
|
|
||||||
return _connection.cursor(self, name, cursor_factory=RealDictCursor)
|
|
||||||
|
|
||||||
class RealDictCursor(DictCursorBase):
|
class RealDictCursor(DictCursorBase):
|
||||||
"""A cursor that uses a real dict as the base type for rows.
|
"""A cursor that uses a real dict as the base type for rows.
|
||||||
|
@ -210,21 +213,20 @@ class RealDictCursor(DictCursorBase):
|
||||||
to access database rows both as a dictionary and a list, then use
|
to access database rows both as a dictionary and a list, then use
|
||||||
the generic `DictCursor` instead of `!RealDictCursor`.
|
the generic `DictCursor` instead of `!RealDictCursor`.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
kwargs['row_factory'] = RealDictRow
|
kwargs['row_factory'] = RealDictRow
|
||||||
DictCursorBase.__init__(self, *args, **kwargs)
|
super(RealDictCursor, self).__init__(*args, **kwargs)
|
||||||
self._prefetch = 0
|
self._prefetch = 0
|
||||||
|
|
||||||
def execute(self, query, vars=None):
|
def execute(self, query, vars=None):
|
||||||
self.column_mapping = []
|
self.column_mapping = []
|
||||||
self._query_executed = 1
|
self._query_executed = 1
|
||||||
return _cursor.execute(self, query, vars)
|
return super(RealDictCursor, self).execute(query, vars)
|
||||||
|
|
||||||
def callproc(self, procname, vars=None):
|
def callproc(self, procname, vars=None):
|
||||||
self.column_mapping = []
|
self.column_mapping = []
|
||||||
self._query_executed = 1
|
self._query_executed = 1
|
||||||
return _cursor.callproc(self, procname, vars)
|
return super(RealDictCursor, self).callproc(procname, vars)
|
||||||
|
|
||||||
def _build_index(self):
|
def _build_index(self):
|
||||||
if self._query_executed == 1 and self.description:
|
if self._query_executed == 1 and self.description:
|
||||||
|
@ -250,12 +252,19 @@ class RealDictRow(dict):
|
||||||
name = self._column_mapping[name]
|
name = self._column_mapping[name]
|
||||||
return dict.__setitem__(self, name, value)
|
return dict.__setitem__(self, name, value)
|
||||||
|
|
||||||
|
def __getstate__(self):
|
||||||
|
return (self.copy(), self._column_mapping[:])
|
||||||
|
|
||||||
|
def __setstate__(self, data):
|
||||||
|
self.update(data[0])
|
||||||
|
self._column_mapping = data[1]
|
||||||
|
|
||||||
|
|
||||||
class NamedTupleConnection(_connection):
|
class NamedTupleConnection(_connection):
|
||||||
"""A connection that uses `NamedTupleCursor` automatically."""
|
"""A connection that uses `NamedTupleCursor` automatically."""
|
||||||
def cursor(self, *args, **kwargs):
|
def cursor(self, *args, **kwargs):
|
||||||
kwargs['cursor_factory'] = NamedTupleCursor
|
kwargs.setdefault('cursor_factory', NamedTupleCursor)
|
||||||
return _connection.cursor(self, *args, **kwargs)
|
return super(NamedTupleConnection, self).cursor(*args, **kwargs)
|
||||||
|
|
||||||
class NamedTupleCursor(_cursor):
|
class NamedTupleCursor(_cursor):
|
||||||
"""A cursor that generates results as `~collections.namedtuple`.
|
"""A cursor that generates results as `~collections.namedtuple`.
|
||||||
|
@ -277,18 +286,18 @@ class NamedTupleCursor(_cursor):
|
||||||
|
|
||||||
def execute(self, query, vars=None):
|
def execute(self, query, vars=None):
|
||||||
self.Record = None
|
self.Record = None
|
||||||
return _cursor.execute(self, query, vars)
|
return super(NamedTupleCursor, self).execute(query, vars)
|
||||||
|
|
||||||
def executemany(self, query, vars):
|
def executemany(self, query, vars):
|
||||||
self.Record = None
|
self.Record = None
|
||||||
return _cursor.executemany(self, query, vars)
|
return super(NamedTupleCursor, self).executemany(query, vars)
|
||||||
|
|
||||||
def callproc(self, procname, vars=None):
|
def callproc(self, procname, vars=None):
|
||||||
self.Record = None
|
self.Record = None
|
||||||
return _cursor.callproc(self, procname, vars)
|
return super(NamedTupleCursor, self).callproc(procname, vars)
|
||||||
|
|
||||||
def fetchone(self):
|
def fetchone(self):
|
||||||
t = _cursor.fetchone(self)
|
t = super(NamedTupleCursor, self).fetchone()
|
||||||
if t is not None:
|
if t is not None:
|
||||||
nt = self.Record
|
nt = self.Record
|
||||||
if nt is None:
|
if nt is None:
|
||||||
|
@ -296,21 +305,21 @@ class NamedTupleCursor(_cursor):
|
||||||
return nt(*t)
|
return nt(*t)
|
||||||
|
|
||||||
def fetchmany(self, size=None):
|
def fetchmany(self, size=None):
|
||||||
ts = _cursor.fetchmany(self, size)
|
ts = super(NamedTupleCursor, self).fetchmany(size)
|
||||||
nt = self.Record
|
nt = self.Record
|
||||||
if nt is None:
|
if nt is None:
|
||||||
nt = self.Record = self._make_nt()
|
nt = self.Record = self._make_nt()
|
||||||
return [nt(*t) for t in ts]
|
return [nt(*t) for t in ts]
|
||||||
|
|
||||||
def fetchall(self):
|
def fetchall(self):
|
||||||
ts = _cursor.fetchall(self)
|
ts = super(NamedTupleCursor, self).fetchall()
|
||||||
nt = self.Record
|
nt = self.Record
|
||||||
if nt is None:
|
if nt is None:
|
||||||
nt = self.Record = self._make_nt()
|
nt = self.Record = self._make_nt()
|
||||||
return [nt(*t) for t in ts]
|
return [nt(*t) for t in ts]
|
||||||
|
|
||||||
def __iter__(self):
|
def __iter__(self):
|
||||||
it = _cursor.__iter__(self)
|
it = super(NamedTupleCursor, self).__iter__()
|
||||||
t = it.next()
|
t = it.next()
|
||||||
|
|
||||||
nt = self.Record
|
nt = self.Record
|
||||||
|
@ -372,25 +381,23 @@ class LoggingConnection(_connection):
|
||||||
raise self.ProgrammingError(
|
raise self.ProgrammingError(
|
||||||
"LoggingConnection object has not been initialize()d")
|
"LoggingConnection object has not been initialize()d")
|
||||||
|
|
||||||
def cursor(self, name=None):
|
def cursor(self, *args, **kwargs):
|
||||||
self._check()
|
self._check()
|
||||||
if name is None:
|
kwargs.setdefault('cursor_factory', LoggingCursor)
|
||||||
return _connection.cursor(self, cursor_factory=LoggingCursor)
|
return super(LoggingConnection, self).cursor(*args, **kwargs)
|
||||||
else:
|
|
||||||
return _connection.cursor(self, name, cursor_factory=LoggingCursor)
|
|
||||||
|
|
||||||
class LoggingCursor(_cursor):
|
class LoggingCursor(_cursor):
|
||||||
"""A cursor that logs queries using its connection logging facilities."""
|
"""A cursor that logs queries using its connection logging facilities."""
|
||||||
|
|
||||||
def execute(self, query, vars=None):
|
def execute(self, query, vars=None):
|
||||||
try:
|
try:
|
||||||
return _cursor.execute(self, query, vars)
|
return super(LoggingCursor, self).execute(query, vars)
|
||||||
finally:
|
finally:
|
||||||
self.connection.log(self.query, self)
|
self.connection.log(self.query, self)
|
||||||
|
|
||||||
def callproc(self, procname, vars=None):
|
def callproc(self, procname, vars=None):
|
||||||
try:
|
try:
|
||||||
return _cursor.callproc(self, procname, vars)
|
return super(LoggingCursor, self).callproc(procname, vars)
|
||||||
finally:
|
finally:
|
||||||
self.connection.log(self.query, self)
|
self.connection.log(self.query, self)
|
||||||
|
|
||||||
|
@ -415,12 +422,9 @@ class MinTimeLoggingConnection(LoggingConnection):
|
||||||
if t > self._mintime:
|
if t > self._mintime:
|
||||||
return msg + os.linesep + " (execution time: %d ms)" % t
|
return msg + os.linesep + " (execution time: %d ms)" % t
|
||||||
|
|
||||||
def cursor(self, name=None):
|
def cursor(self, *args, **kwargs):
|
||||||
self._check()
|
kwargs.setdefault('cursor_factory', MinTimeLoggingCursor)
|
||||||
if name is None:
|
return LoggingConnection.cursor(self, *args, **kwargs)
|
||||||
return _connection.cursor(self, cursor_factory=MinTimeLoggingCursor)
|
|
||||||
else:
|
|
||||||
return _connection.cursor(self, name, cursor_factory=MinTimeLoggingCursor)
|
|
||||||
|
|
||||||
class MinTimeLoggingCursor(LoggingCursor):
|
class MinTimeLoggingCursor(LoggingCursor):
|
||||||
"""The cursor sub-class companion to `MinTimeLoggingConnection`."""
|
"""The cursor sub-class companion to `MinTimeLoggingConnection`."""
|
||||||
|
@ -580,6 +584,18 @@ def wait_select(conn):
|
||||||
raise OperationalError("bad state from poll: %s" % state)
|
raise OperationalError("bad state from poll: %s" % state)
|
||||||
|
|
||||||
|
|
||||||
|
def _solve_conn_curs(conn_or_curs):
|
||||||
|
"""Return the connection and a DBAPI cursor from a connection or cursor."""
|
||||||
|
if hasattr(conn_or_curs, 'execute'):
|
||||||
|
conn = conn_or_curs.connection
|
||||||
|
curs = conn.cursor(cursor_factory=_cursor)
|
||||||
|
else:
|
||||||
|
conn = conn_or_curs
|
||||||
|
curs = conn.cursor(cursor_factory=_cursor)
|
||||||
|
|
||||||
|
return conn, curs
|
||||||
|
|
||||||
|
|
||||||
class HstoreAdapter(object):
|
class HstoreAdapter(object):
|
||||||
"""Adapt a Python dict to the hstore syntax."""
|
"""Adapt a Python dict to the hstore syntax."""
|
||||||
def __init__(self, wrapped):
|
def __init__(self, wrapped):
|
||||||
|
@ -688,12 +704,7 @@ class HstoreAdapter(object):
|
||||||
def get_oids(self, conn_or_curs):
|
def get_oids(self, conn_or_curs):
|
||||||
"""Return the lists of OID of the hstore and hstore[] types.
|
"""Return the lists of OID of the hstore and hstore[] types.
|
||||||
"""
|
"""
|
||||||
if hasattr(conn_or_curs, 'execute'):
|
conn, curs = _solve_conn_curs(conn_or_curs)
|
||||||
conn = conn_or_curs.connection
|
|
||||||
curs = conn_or_curs
|
|
||||||
else:
|
|
||||||
conn = conn_or_curs
|
|
||||||
curs = conn_or_curs.cursor()
|
|
||||||
|
|
||||||
# Store the transaction status of the connection to revert it after use
|
# Store the transaction status of the connection to revert it after use
|
||||||
conn_status = conn.status
|
conn_status = conn.status
|
||||||
|
@ -744,7 +755,6 @@ def register_hstore(conn_or_curs, globally=False, unicode=False,
|
||||||
'hstore'::regtype::oid`. Analogously you can obtain a value for *array_oid*
|
'hstore'::regtype::oid`. Analogously you can obtain a value for *array_oid*
|
||||||
using a query such as :sql:`SELECT 'hstore[]'::regtype::oid`.
|
using a query such as :sql:`SELECT 'hstore[]'::regtype::oid`.
|
||||||
|
|
||||||
|
|
||||||
Note that, when passing a dictionary from Python to the database, both
|
Note that, when passing a dictionary from Python to the database, both
|
||||||
strings and unicode keys and values are supported. Dictionaries returned
|
strings and unicode keys and values are supported. Dictionaries returned
|
||||||
from the database have keys/values according to the *unicode* parameter.
|
from the database have keys/values according to the *unicode* parameter.
|
||||||
|
@ -752,15 +762,6 @@ def register_hstore(conn_or_curs, globally=False, unicode=False,
|
||||||
The |hstore| contrib module must be already installed in the database
|
The |hstore| contrib module must be already installed in the database
|
||||||
(executing the ``hstore.sql`` script in your ``contrib`` directory).
|
(executing the ``hstore.sql`` script in your ``contrib`` directory).
|
||||||
Raise `~psycopg2.ProgrammingError` if the type is not found.
|
Raise `~psycopg2.ProgrammingError` if the type is not found.
|
||||||
|
|
||||||
.. versionchanged:: 2.4
|
|
||||||
added the *oid* parameter. If not specified, the typecaster is
|
|
||||||
installed also if |hstore| is not installed in the :sql:`public`
|
|
||||||
schema.
|
|
||||||
|
|
||||||
.. versionchanged:: 2.4.3
|
|
||||||
added support for |hstore| array.
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
if oid is None:
|
if oid is None:
|
||||||
oid = HstoreAdapter.get_oids(conn_or_curs)
|
oid = HstoreAdapter.get_oids(conn_or_curs)
|
||||||
|
@ -874,9 +875,9 @@ class CompositeCaster(object):
|
||||||
for m in self._re_tokenize.finditer(s):
|
for m in self._re_tokenize.finditer(s):
|
||||||
if m is None:
|
if m is None:
|
||||||
raise psycopg2.InterfaceError("can't parse type: %r" % s)
|
raise psycopg2.InterfaceError("can't parse type: %r" % s)
|
||||||
if m.group(1):
|
if m.group(1) is not None:
|
||||||
rv.append(None)
|
rv.append(None)
|
||||||
elif m.group(2):
|
elif m.group(2) is not None:
|
||||||
rv.append(self._re_undouble.sub(r"\1", m.group(2)))
|
rv.append(self._re_undouble.sub(r"\1", m.group(2)))
|
||||||
else:
|
else:
|
||||||
rv.append(m.group(3))
|
rv.append(m.group(3))
|
||||||
|
@ -899,12 +900,7 @@ class CompositeCaster(object):
|
||||||
|
|
||||||
Raise `ProgrammingError` if the type is not found.
|
Raise `ProgrammingError` if the type is not found.
|
||||||
"""
|
"""
|
||||||
if hasattr(conn_or_curs, 'execute'):
|
conn, curs = _solve_conn_curs(conn_or_curs)
|
||||||
conn = conn_or_curs.connection
|
|
||||||
curs = conn_or_curs
|
|
||||||
else:
|
|
||||||
conn = conn_or_curs
|
|
||||||
curs = conn_or_curs.cursor()
|
|
||||||
|
|
||||||
# Store the transaction status of the connection to revert it after use
|
# Store the transaction status of the connection to revert it after use
|
||||||
conn_status = conn.status
|
conn_status = conn.status
|
||||||
|
|
|
@ -217,6 +217,10 @@ class PersistentConnectionPool(AbstractConnectionPool):
|
||||||
|
|
||||||
def __init__(self, minconn, maxconn, *args, **kwargs):
|
def __init__(self, minconn, maxconn, *args, **kwargs):
|
||||||
"""Initialize the threading lock."""
|
"""Initialize the threading lock."""
|
||||||
|
import warnings
|
||||||
|
warnings.warn("deprecated: use ZPsycopgDA.pool implementation",
|
||||||
|
DeprecationWarning)
|
||||||
|
|
||||||
import threading
|
import threading
|
||||||
AbstractConnectionPool.__init__(
|
AbstractConnectionPool.__init__(
|
||||||
self, minconn, maxconn, *args, **kwargs)
|
self, minconn, maxconn, *args, **kwargs)
|
||||||
|
|
|
@ -63,8 +63,7 @@ class FixedOffsetTimezone(datetime.tzinfo):
|
||||||
try:
|
try:
|
||||||
return cls._cache[key]
|
return cls._cache[key]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
tz = datetime.tzinfo.__new__(cls, offset, name)
|
tz = super(FixedOffsetTimezone, cls).__new__(cls, offset, name)
|
||||||
tz.__init__(offset, name)
|
|
||||||
cls._cache[key] = tz
|
cls._cache[key] = tz
|
||||||
return tz
|
return tz
|
||||||
|
|
||||||
|
@ -73,6 +72,10 @@ class FixedOffsetTimezone(datetime.tzinfo):
|
||||||
return "psycopg2.tz.FixedOffsetTimezone(offset=%r, name=%r)" \
|
return "psycopg2.tz.FixedOffsetTimezone(offset=%r, name=%r)" \
|
||||||
% (offset_mins, self._name)
|
% (offset_mins, self._name)
|
||||||
|
|
||||||
|
def __getinitargs__(self):
|
||||||
|
offset_mins = self._offset.seconds // 60 + self._offset.days * 24 * 60
|
||||||
|
return (offset_mins, self._name)
|
||||||
|
|
||||||
def utcoffset(self, dt):
|
def utcoffset(self, dt):
|
||||||
return self._offset
|
return self._offset
|
||||||
|
|
||||||
|
@ -104,7 +107,6 @@ class LocalTimezone(datetime.tzinfo):
|
||||||
|
|
||||||
This is the exact implementation from the Python 2.3 documentation.
|
This is the exact implementation from the Python 2.3 documentation.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def utcoffset(self, dt):
|
def utcoffset(self, dt):
|
||||||
if self._isdst(dt):
|
if self._isdst(dt):
|
||||||
return DSTOFFSET
|
return DSTOFFSET
|
||||||
|
|
|
@ -73,16 +73,7 @@ qstring_quote(qstringObject *self)
|
||||||
|
|
||||||
/* encode the string into buffer */
|
/* encode the string into buffer */
|
||||||
Bytes_AsStringAndSize(str, &s, &len);
|
Bytes_AsStringAndSize(str, &s, &len);
|
||||||
|
if (!(buffer = psycopg_escape_string(self->conn, s, len, NULL, &qlen))) {
|
||||||
/* Call qstring_escape with the GIL released, then reacquire the GIL
|
|
||||||
before verifying that the results can fit into a Python string; raise
|
|
||||||
an exception if not. */
|
|
||||||
|
|
||||||
Py_BEGIN_ALLOW_THREADS
|
|
||||||
buffer = psycopg_escape_string(self->conn, s, len, NULL, &qlen);
|
|
||||||
Py_END_ALLOW_THREADS
|
|
||||||
|
|
||||||
if (buffer == NULL) {
|
|
||||||
Py_DECREF(str);
|
Py_DECREF(str);
|
||||||
PyErr_NoMemory();
|
PyErr_NoMemory();
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
|
@ -141,6 +141,7 @@ HIDDEN void conn_notifies_process(connectionObject *self);
|
||||||
RAISES_NEG HIDDEN int conn_setup(connectionObject *self, PGconn *pgconn);
|
RAISES_NEG HIDDEN int conn_setup(connectionObject *self, PGconn *pgconn);
|
||||||
HIDDEN int conn_connect(connectionObject *self, long int async);
|
HIDDEN int conn_connect(connectionObject *self, long int async);
|
||||||
HIDDEN void conn_close(connectionObject *self);
|
HIDDEN void conn_close(connectionObject *self);
|
||||||
|
HIDDEN void conn_close_locked(connectionObject *self);
|
||||||
RAISES_NEG HIDDEN int conn_commit(connectionObject *self);
|
RAISES_NEG HIDDEN int conn_commit(connectionObject *self);
|
||||||
RAISES_NEG HIDDEN int conn_rollback(connectionObject *self);
|
RAISES_NEG HIDDEN int conn_rollback(connectionObject *self);
|
||||||
RAISES_NEG HIDDEN int conn_set_session(connectionObject *self, const char *isolevel,
|
RAISES_NEG HIDDEN int conn_set_session(connectionObject *self, const char *isolevel,
|
||||||
|
|
|
@ -896,7 +896,7 @@ conn_poll(connectionObject *self)
|
||||||
/* fetch the tuples (if there are any) and build the result. We
|
/* fetch the tuples (if there are any) and build the result. We
|
||||||
* don't care if pq_fetch return 0 or 1, but if there was an error,
|
* don't care if pq_fetch return 0 or 1, but if there was an error,
|
||||||
* we want to signal it to the caller. */
|
* we want to signal it to the caller. */
|
||||||
if (pq_fetch(curs) == -1) {
|
if (pq_fetch(curs, 0) == -1) {
|
||||||
res = PSYCO_POLL_ERROR;
|
res = PSYCO_POLL_ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -922,12 +922,24 @@ conn_close(connectionObject *self)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* sets this connection as closed even for other threads; also note that
|
/* sets this connection as closed even for other threads; */
|
||||||
we need to check the value of pgconn, because we get called even when
|
|
||||||
the connection fails! */
|
|
||||||
Py_BEGIN_ALLOW_THREADS;
|
Py_BEGIN_ALLOW_THREADS;
|
||||||
pthread_mutex_lock(&self->lock);
|
pthread_mutex_lock(&self->lock);
|
||||||
|
|
||||||
|
conn_close_locked(self);
|
||||||
|
|
||||||
|
pthread_mutex_unlock(&self->lock);
|
||||||
|
Py_END_ALLOW_THREADS;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* conn_close_locked - shut down the connection with the lock already taken */
|
||||||
|
|
||||||
|
void conn_close_locked(connectionObject *self)
|
||||||
|
{
|
||||||
|
if (self->closed) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
/* We used to call pq_abort_locked here, but the idea of issuing a
|
/* We used to call pq_abort_locked here, but the idea of issuing a
|
||||||
* rollback on close/GC has been considered inappropriate.
|
* rollback on close/GC has been considered inappropriate.
|
||||||
*
|
*
|
||||||
|
@ -937,9 +949,10 @@ conn_close(connectionObject *self)
|
||||||
* transaction though: to avoid these problems the transaction should be
|
* transaction though: to avoid these problems the transaction should be
|
||||||
* closed only in status CONN_STATUS_READY.
|
* closed only in status CONN_STATUS_READY.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
self->closed = 1;
|
self->closed = 1;
|
||||||
|
|
||||||
|
/* we need to check the value of pgconn, because we get called even when
|
||||||
|
* the connection fails! */
|
||||||
if (self->pgconn) {
|
if (self->pgconn) {
|
||||||
PQfinish(self->pgconn);
|
PQfinish(self->pgconn);
|
||||||
self->pgconn = NULL;
|
self->pgconn = NULL;
|
||||||
|
@ -947,9 +960,6 @@ conn_close(connectionObject *self)
|
||||||
PQfreeCancel(self->cancel);
|
PQfreeCancel(self->cancel);
|
||||||
self->cancel = NULL;
|
self->cancel = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
pthread_mutex_unlock(&self->lock);
|
|
||||||
Py_END_ALLOW_THREADS;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* conn_commit - commit on a connection */
|
/* conn_commit - commit on a connection */
|
||||||
|
|
|
@ -42,7 +42,7 @@
|
||||||
/* cursor method - allocate a new cursor */
|
/* cursor method - allocate a new cursor */
|
||||||
|
|
||||||
#define psyco_conn_cursor_doc \
|
#define psyco_conn_cursor_doc \
|
||||||
"cursor(name=None, cursor_factory=extensions.cursor, withhold=None) -- new cursor\n\n" \
|
"cursor(name=None, cursor_factory=extensions.cursor, withhold=False) -- new cursor\n\n" \
|
||||||
"Return a new cursor.\n\nThe ``cursor_factory`` argument can be used to\n" \
|
"Return a new cursor.\n\nThe ``cursor_factory`` argument can be used to\n" \
|
||||||
"create non-standard cursors by passing a class different from the\n" \
|
"create non-standard cursors by passing a class different from the\n" \
|
||||||
"default. Note that the new class *should* be a sub-class of\n" \
|
"default. Note that the new class *should* be a sub-class of\n" \
|
||||||
|
@ -50,25 +50,25 @@
|
||||||
":rtype: `extensions.cursor`"
|
":rtype: `extensions.cursor`"
|
||||||
|
|
||||||
static PyObject *
|
static PyObject *
|
||||||
psyco_conn_cursor(connectionObject *self, PyObject *args, PyObject *keywds)
|
psyco_conn_cursor(connectionObject *self, PyObject *args, PyObject *kwargs)
|
||||||
{
|
{
|
||||||
const char *name = NULL;
|
PyObject *obj;
|
||||||
PyObject *obj, *factory = NULL, *withhold = NULL;
|
PyObject *name = Py_None;
|
||||||
|
PyObject *factory = (PyObject *)&cursorType;
|
||||||
|
PyObject *withhold = Py_False;
|
||||||
|
|
||||||
static char *kwlist[] = {"name", "cursor_factory", "withhold", NULL};
|
static char *kwlist[] = {"name", "cursor_factory", "withhold", NULL};
|
||||||
|
|
||||||
if (!PyArg_ParseTupleAndKeywords(args, keywds, "|sOO", kwlist,
|
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOO", kwlist,
|
||||||
&name, &factory, &withhold)) {
|
&name, &factory, &withhold)) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (withhold != NULL) {
|
if (PyObject_IsTrue(withhold) && (name == Py_None)) {
|
||||||
if (PyObject_IsTrue(withhold) && name == NULL) {
|
|
||||||
PyErr_SetString(ProgrammingError,
|
PyErr_SetString(ProgrammingError,
|
||||||
"'withhold=True can be specified only for named cursors");
|
"'withhold=True can be specified only for named cursors");
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
EXC_IF_CONN_CLOSED(self);
|
EXC_IF_CONN_CLOSED(self);
|
||||||
|
|
||||||
|
@ -80,23 +80,20 @@ psyco_conn_cursor(connectionObject *self, PyObject *args, PyObject *keywds)
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (name != NULL && self->async == 1) {
|
if (name != Py_None && self->async == 1) {
|
||||||
PyErr_SetString(ProgrammingError,
|
PyErr_SetString(ProgrammingError,
|
||||||
"asynchronous connections "
|
"asynchronous connections "
|
||||||
"cannot produce named cursors");
|
"cannot produce named cursors");
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
Dprintf("psyco_conn_cursor: new cursor for connection at %p", self);
|
Dprintf("psyco_conn_cursor: new %s cursor for connection at %p",
|
||||||
Dprintf("psyco_conn_cursor: parameters: name = %s", name);
|
(name == Py_None ? "unnamed" : "named"), self);
|
||||||
|
|
||||||
if (factory == NULL) factory = (PyObject *)&cursorType;
|
if (!(obj = PyObject_CallFunctionObjArgs(factory, self, name, NULL))) {
|
||||||
if (name)
|
return NULL;
|
||||||
obj = PyObject_CallFunction(factory, "Os", self, name);
|
}
|
||||||
else
|
|
||||||
obj = PyObject_CallFunctionObjArgs(factory, self, NULL);
|
|
||||||
|
|
||||||
if (obj == NULL) return NULL;
|
|
||||||
if (PyObject_IsInstance(obj, (PyObject *)&cursorType) == 0) {
|
if (PyObject_IsInstance(obj, (PyObject *)&cursorType) == 0) {
|
||||||
PyErr_SetString(PyExc_TypeError,
|
PyErr_SetString(PyExc_TypeError,
|
||||||
"cursor factory must be subclass of psycopg2._psycopg.cursor");
|
"cursor factory must be subclass of psycopg2._psycopg.cursor");
|
||||||
|
@ -104,7 +101,7 @@ psyco_conn_cursor(connectionObject *self, PyObject *args, PyObject *keywds)
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (withhold != NULL && PyObject_IsTrue(withhold))
|
if (PyObject_IsTrue(withhold))
|
||||||
((cursorObject*)obj)->withhold = 1;
|
((cursorObject*)obj)->withhold = 1;
|
||||||
|
|
||||||
Dprintf("psyco_conn_cursor: new cursor at %p: refcnt = "
|
Dprintf("psyco_conn_cursor: new cursor at %p: refcnt = "
|
||||||
|
|
|
@ -63,7 +63,7 @@ psyco_curs_close(cursorObject *self, PyObject *args)
|
||||||
|
|
||||||
EXC_IF_NO_MARK(self);
|
EXC_IF_NO_MARK(self);
|
||||||
PyOS_snprintf(buffer, 127, "CLOSE \"%s\"", self->name);
|
PyOS_snprintf(buffer, 127, "CLOSE \"%s\"", self->name);
|
||||||
if (pq_execute(self, buffer, 0) == -1) return NULL;
|
if (pq_execute(self, buffer, 0, 0) == -1) return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
self->closed = 1;
|
self->closed = 1;
|
||||||
|
@ -365,7 +365,8 @@ _psyco_curs_merge_query_args(cursorObject *self,
|
||||||
|
|
||||||
RAISES_NEG static int
|
RAISES_NEG static int
|
||||||
_psyco_curs_execute(cursorObject *self,
|
_psyco_curs_execute(cursorObject *self,
|
||||||
PyObject *operation, PyObject *vars, long int async)
|
PyObject *operation, PyObject *vars,
|
||||||
|
long int async, int no_result)
|
||||||
{
|
{
|
||||||
int res = -1;
|
int res = -1;
|
||||||
int tmp;
|
int tmp;
|
||||||
|
@ -432,7 +433,7 @@ _psyco_curs_execute(cursorObject *self,
|
||||||
|
|
||||||
/* At this point, the SQL statement must be str, not unicode */
|
/* At this point, the SQL statement must be str, not unicode */
|
||||||
|
|
||||||
tmp = pq_execute(self, Bytes_AS_STRING(self->query), async);
|
tmp = pq_execute(self, Bytes_AS_STRING(self->query), async, no_result);
|
||||||
Dprintf("psyco_curs_execute: res = %d, pgres = %p", tmp, self->pgres);
|
Dprintf("psyco_curs_execute: res = %d, pgres = %p", tmp, self->pgres);
|
||||||
if (tmp < 0) { goto exit; }
|
if (tmp < 0) { goto exit; }
|
||||||
|
|
||||||
|
@ -479,7 +480,7 @@ psyco_curs_execute(cursorObject *self, PyObject *args, PyObject *kwargs)
|
||||||
EXC_IF_ASYNC_IN_PROGRESS(self, execute);
|
EXC_IF_ASYNC_IN_PROGRESS(self, execute);
|
||||||
EXC_IF_TPC_PREPARED(self->conn, execute);
|
EXC_IF_TPC_PREPARED(self->conn, execute);
|
||||||
|
|
||||||
if (0 > _psyco_curs_execute(self, operation, vars, self->conn->async)) {
|
if (0 > _psyco_curs_execute(self, operation, vars, self->conn->async, 0)) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -524,7 +525,7 @@ psyco_curs_executemany(cursorObject *self, PyObject *args, PyObject *kwargs)
|
||||||
}
|
}
|
||||||
|
|
||||||
while ((v = PyIter_Next(vars)) != NULL) {
|
while ((v = PyIter_Next(vars)) != NULL) {
|
||||||
if (0 > _psyco_curs_execute(self, operation, v, 0)) {
|
if (0 > _psyco_curs_execute(self, operation, v, 0, 1)) {
|
||||||
Py_DECREF(v);
|
Py_DECREF(v);
|
||||||
Py_XDECREF(iter);
|
Py_XDECREF(iter);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -655,7 +656,7 @@ _psyco_curs_prefetch(cursorObject *self)
|
||||||
if (self->pgres == NULL) {
|
if (self->pgres == NULL) {
|
||||||
Dprintf("_psyco_curs_prefetch: trying to fetch data");
|
Dprintf("_psyco_curs_prefetch: trying to fetch data");
|
||||||
do {
|
do {
|
||||||
i = pq_fetch(self);
|
i = pq_fetch(self, 0);
|
||||||
Dprintf("_psycopg_curs_prefetch: result = %d", i);
|
Dprintf("_psycopg_curs_prefetch: result = %d", i);
|
||||||
} while(i == 1);
|
} while(i == 1);
|
||||||
}
|
}
|
||||||
|
@ -757,7 +758,7 @@ psyco_curs_fetchone(cursorObject *self, PyObject *args)
|
||||||
EXC_IF_ASYNC_IN_PROGRESS(self, fetchone);
|
EXC_IF_ASYNC_IN_PROGRESS(self, fetchone);
|
||||||
EXC_IF_TPC_PREPARED(self->conn, fetchone);
|
EXC_IF_TPC_PREPARED(self->conn, fetchone);
|
||||||
PyOS_snprintf(buffer, 127, "FETCH FORWARD 1 FROM \"%s\"", self->name);
|
PyOS_snprintf(buffer, 127, "FETCH FORWARD 1 FROM \"%s\"", self->name);
|
||||||
if (pq_execute(self, buffer, 0) == -1) return NULL;
|
if (pq_execute(self, buffer, 0, 0) == -1) return NULL;
|
||||||
if (_psyco_curs_prefetch(self) < 0) return NULL;
|
if (_psyco_curs_prefetch(self) < 0) return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -808,7 +809,7 @@ psyco_curs_next_named(cursorObject *self)
|
||||||
|
|
||||||
PyOS_snprintf(buffer, 127, "FETCH FORWARD %ld FROM \"%s\"",
|
PyOS_snprintf(buffer, 127, "FETCH FORWARD %ld FROM \"%s\"",
|
||||||
self->itersize, self->name);
|
self->itersize, self->name);
|
||||||
if (pq_execute(self, buffer, 0) == -1) return NULL;
|
if (pq_execute(self, buffer, 0, 0) == -1) return NULL;
|
||||||
if (_psyco_curs_prefetch(self) < 0) return NULL;
|
if (_psyco_curs_prefetch(self) < 0) return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -877,7 +878,7 @@ psyco_curs_fetchmany(cursorObject *self, PyObject *args, PyObject *kwords)
|
||||||
EXC_IF_TPC_PREPARED(self->conn, fetchone);
|
EXC_IF_TPC_PREPARED(self->conn, fetchone);
|
||||||
PyOS_snprintf(buffer, 127, "FETCH FORWARD %d FROM \"%s\"",
|
PyOS_snprintf(buffer, 127, "FETCH FORWARD %d FROM \"%s\"",
|
||||||
(int)size, self->name);
|
(int)size, self->name);
|
||||||
if (pq_execute(self, buffer, 0) == -1) { goto exit; }
|
if (pq_execute(self, buffer, 0, 0) == -1) { goto exit; }
|
||||||
if (_psyco_curs_prefetch(self) < 0) { goto exit; }
|
if (_psyco_curs_prefetch(self) < 0) { goto exit; }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -952,7 +953,7 @@ psyco_curs_fetchall(cursorObject *self, PyObject *args)
|
||||||
EXC_IF_ASYNC_IN_PROGRESS(self, fetchall);
|
EXC_IF_ASYNC_IN_PROGRESS(self, fetchall);
|
||||||
EXC_IF_TPC_PREPARED(self->conn, fetchall);
|
EXC_IF_TPC_PREPARED(self->conn, fetchall);
|
||||||
PyOS_snprintf(buffer, 127, "FETCH FORWARD ALL FROM \"%s\"", self->name);
|
PyOS_snprintf(buffer, 127, "FETCH FORWARD ALL FROM \"%s\"", self->name);
|
||||||
if (pq_execute(self, buffer, 0) == -1) { goto exit; }
|
if (pq_execute(self, buffer, 0, 0) == -1) { goto exit; }
|
||||||
if (_psyco_curs_prefetch(self) < 0) { goto exit; }
|
if (_psyco_curs_prefetch(self) < 0) { goto exit; }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1045,7 +1046,8 @@ psyco_curs_callproc(cursorObject *self, PyObject *args)
|
||||||
|
|
||||||
if (!(operation = Bytes_FromString(sql))) { goto exit; }
|
if (!(operation = Bytes_FromString(sql))) { goto exit; }
|
||||||
|
|
||||||
if (0 <= _psyco_curs_execute(self, operation, parameters, self->conn->async)) {
|
if (0 <= _psyco_curs_execute(self, operation, parameters,
|
||||||
|
self->conn->async, 0)) {
|
||||||
Py_INCREF(parameters);
|
Py_INCREF(parameters);
|
||||||
res = parameters;
|
res = parameters;
|
||||||
}
|
}
|
||||||
|
@ -1172,7 +1174,7 @@ psyco_curs_scroll(cursorObject *self, PyObject *args, PyObject *kwargs)
|
||||||
else {
|
else {
|
||||||
PyOS_snprintf(buffer, 127, "MOVE %d FROM \"%s\"", value, self->name);
|
PyOS_snprintf(buffer, 127, "MOVE %d FROM \"%s\"", value, self->name);
|
||||||
}
|
}
|
||||||
if (pq_execute(self, buffer, 0) == -1) return NULL;
|
if (pq_execute(self, buffer, 0, 0) == -1) return NULL;
|
||||||
if (_psyco_curs_prefetch(self) < 0) return NULL;
|
if (_psyco_curs_prefetch(self) < 0) return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1352,7 +1354,7 @@ psyco_curs_copy_from(cursorObject *self, PyObject *args, PyObject *kwargs)
|
||||||
Py_INCREF(file);
|
Py_INCREF(file);
|
||||||
self->copyfile = file;
|
self->copyfile = file;
|
||||||
|
|
||||||
if (pq_execute(self, query, 0) >= 0) {
|
if (pq_execute(self, query, 0, 0) >= 0) {
|
||||||
res = Py_None;
|
res = Py_None;
|
||||||
Py_INCREF(Py_None);
|
Py_INCREF(Py_None);
|
||||||
}
|
}
|
||||||
|
@ -1448,7 +1450,7 @@ psyco_curs_copy_to(cursorObject *self, PyObject *args, PyObject *kwargs)
|
||||||
Py_INCREF(file);
|
Py_INCREF(file);
|
||||||
self->copyfile = file;
|
self->copyfile = file;
|
||||||
|
|
||||||
if (pq_execute(self, query, 0) >= 0) {
|
if (pq_execute(self, query, 0, 0) >= 0) {
|
||||||
res = Py_None;
|
res = Py_None;
|
||||||
Py_INCREF(Py_None);
|
Py_INCREF(Py_None);
|
||||||
}
|
}
|
||||||
|
@ -1522,7 +1524,7 @@ psyco_curs_copy_expert(cursorObject *self, PyObject *args, PyObject *kwargs)
|
||||||
self->copyfile = file;
|
self->copyfile = file;
|
||||||
|
|
||||||
/* At this point, the SQL statement must be str, not unicode */
|
/* At this point, the SQL statement must be str, not unicode */
|
||||||
if (pq_execute(self, Bytes_AS_STRING(sql), 0) >= 0) {
|
if (pq_execute(self, Bytes_AS_STRING(sql), 0, 0) >= 0) {
|
||||||
res = Py_None;
|
res = Py_None;
|
||||||
Py_INCREF(res);
|
Py_INCREF(res);
|
||||||
}
|
}
|
||||||
|
@ -1724,7 +1726,7 @@ cursor_setup(cursorObject *self, connectionObject *conn, const char *name)
|
||||||
|
|
||||||
if (name) {
|
if (name) {
|
||||||
if (!(self->name = psycopg_escape_identifier_easy(name, 0))) {
|
if (!(self->name = psycopg_escape_identifier_easy(name, 0))) {
|
||||||
return 1;
|
return -1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1733,7 +1735,7 @@ cursor_setup(cursorObject *self, connectionObject *conn, const char *name)
|
||||||
(PyObject *)&connectionType) == 0) {
|
(PyObject *)&connectionType) == 0) {
|
||||||
PyErr_SetString(PyExc_TypeError,
|
PyErr_SetString(PyExc_TypeError,
|
||||||
"argument 1 must be subclass of psycopg2._psycopg.connection");
|
"argument 1 must be subclass of psycopg2._psycopg.connection");
|
||||||
return 1;
|
return -1;
|
||||||
} */
|
} */
|
||||||
Py_INCREF(conn);
|
Py_INCREF(conn);
|
||||||
self->conn = conn;
|
self->conn = conn;
|
||||||
|
@ -1808,15 +1810,35 @@ cursor_dealloc(PyObject* obj)
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
cursor_init(PyObject *obj, PyObject *args, PyObject *kwds)
|
cursor_init(PyObject *obj, PyObject *args, PyObject *kwargs)
|
||||||
{
|
{
|
||||||
const char *name = NULL;
|
|
||||||
PyObject *conn;
|
PyObject *conn;
|
||||||
|
PyObject *name = Py_None;
|
||||||
|
const char *cname;
|
||||||
|
|
||||||
if (!PyArg_ParseTuple(args, "O|s", &conn, &name))
|
static char *kwlist[] = {"conn", "name", NULL};
|
||||||
|
|
||||||
|
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!|O", kwlist,
|
||||||
|
&connectionType, &conn, &name)) {
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
return cursor_setup((cursorObject *)obj, (connectionObject *)conn, name);
|
if (name == Py_None) {
|
||||||
|
cname = NULL;
|
||||||
|
} else {
|
||||||
|
Py_INCREF(name); /* for ensure_bytes */
|
||||||
|
if (!(name = psycopg_ensure_bytes(name))) {
|
||||||
|
/* name has had a ref stolen */
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
Py_DECREF(name);
|
||||||
|
|
||||||
|
if (!(cname = Bytes_AsString(name))) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return cursor_setup((cursorObject *)obj, (connectionObject *)conn, cname);
|
||||||
}
|
}
|
||||||
|
|
||||||
static PyObject *
|
static PyObject *
|
||||||
|
|
|
@ -34,7 +34,7 @@
|
||||||
HIDDEN PyObject *wait_callback = NULL;
|
HIDDEN PyObject *wait_callback = NULL;
|
||||||
|
|
||||||
static PyObject *have_wait_callback(void);
|
static PyObject *have_wait_callback(void);
|
||||||
static void psyco_clear_result_blocking(connectionObject *conn);
|
static void green_panic(connectionObject *conn);
|
||||||
|
|
||||||
/* Register a callback function to block waiting for data.
|
/* Register a callback function to block waiting for data.
|
||||||
*
|
*
|
||||||
|
@ -178,7 +178,7 @@ psyco_exec_green(connectionObject *conn, const char *command)
|
||||||
conn->async_status = ASYNC_WRITE;
|
conn->async_status = ASYNC_WRITE;
|
||||||
|
|
||||||
if (0 != psyco_wait(conn)) {
|
if (0 != psyco_wait(conn)) {
|
||||||
psyco_clear_result_blocking(conn);
|
green_panic(conn);
|
||||||
goto end;
|
goto end;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -192,22 +192,21 @@ end:
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/* Discard the result of the currenly executed query, blocking.
|
/* There has been a communication error during query execution. It may have
|
||||||
*
|
* happened e.g. for a network error or an error in the callback, and we
|
||||||
* This function doesn't honour the wait callback: it can be used in case of
|
* cannot tell the two apart.
|
||||||
* emergency if the callback fails in order to put the connection back into a
|
* Trying to PQcancel or PQgetResult to put the connection back into a working
|
||||||
* consistent state.
|
* state doesn't work nice (issue #113): the program blocks and the
|
||||||
*
|
* interpreter won't even respond to SIGINT. PQreset could work async, but the
|
||||||
* If any command was issued before clearing the result, libpq would fail with
|
* python program would have then a connection made but not configured where
|
||||||
* the error "another command is already in progress".
|
* it is probably not designed to handled. So for the moment we do the kindest
|
||||||
|
* thing we can: we close the connection. A long-running program should
|
||||||
|
* already have a way to discard broken connections; a short-lived one would
|
||||||
|
* benefit of working ctrl-c.
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
psyco_clear_result_blocking(connectionObject *conn)
|
green_panic(connectionObject *conn)
|
||||||
{
|
{
|
||||||
PGresult *res;
|
Dprintf("green_panic: closing the connection");
|
||||||
|
conn_close_locked(conn);
|
||||||
Dprintf("psyco_clear_result_blocking");
|
|
||||||
while (NULL != (res = PQgetResult(conn->pgconn))) {
|
|
||||||
PQclear(res);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -829,12 +829,16 @@ pq_flush(connectionObject *conn)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* pq_execute - execute a query, possibly asynchronously
|
/* pq_execute - execute a query, possibly asynchronously
|
||||||
|
*
|
||||||
this fucntion locks the connection object
|
* With no_result an eventual query result is discarded.
|
||||||
this function call Py_*_ALLOW_THREADS macros */
|
* Currently only used to implement cursor.executemany().
|
||||||
|
*
|
||||||
|
* This function locks the connection object
|
||||||
|
* This function call Py_*_ALLOW_THREADS macros
|
||||||
|
*/
|
||||||
|
|
||||||
RAISES_NEG int
|
RAISES_NEG int
|
||||||
pq_execute(cursorObject *curs, const char *query, int async)
|
pq_execute(cursorObject *curs, const char *query, int async, int no_result)
|
||||||
{
|
{
|
||||||
PGresult *pgres = NULL;
|
PGresult *pgres = NULL;
|
||||||
char *error = NULL;
|
char *error = NULL;
|
||||||
|
@ -938,7 +942,7 @@ pq_execute(cursorObject *curs, const char *query, int async)
|
||||||
to respect the old DBAPI-2.0 compatible behaviour */
|
to respect the old DBAPI-2.0 compatible behaviour */
|
||||||
if (async == 0) {
|
if (async == 0) {
|
||||||
Dprintf("pq_execute: entering syncronous DBAPI compatibility mode");
|
Dprintf("pq_execute: entering syncronous DBAPI compatibility mode");
|
||||||
if (pq_fetch(curs) < 0) return -1;
|
if (pq_fetch(curs, no_result) < 0) return -1;
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
PyObject *tmp;
|
PyObject *tmp;
|
||||||
|
@ -976,7 +980,7 @@ pq_send_query(connectionObject *conn, const char *query)
|
||||||
|
|
||||||
/* Return the last result available on the connection.
|
/* Return the last result available on the connection.
|
||||||
*
|
*
|
||||||
* The function will block will block only if a command is active and the
|
* The function will block only if a command is active and the
|
||||||
* necessary response data has not yet been read by PQconsumeInput.
|
* necessary response data has not yet been read by PQconsumeInput.
|
||||||
*
|
*
|
||||||
* The result should be disposed using PQclear()
|
* The result should be disposed using PQclear()
|
||||||
|
@ -1316,7 +1320,13 @@ _pq_copy_in_v3(cursorObject *curs)
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
/* and finally we grab the operation result from the backend */
|
/* and finally we grab the operation result from the backend */
|
||||||
while ((curs->pgres = PQgetResult(curs->conn->pgconn)) != NULL) {
|
for (;;) {
|
||||||
|
Py_BEGIN_ALLOW_THREADS;
|
||||||
|
curs->pgres = PQgetResult(curs->conn->pgconn);
|
||||||
|
Py_END_ALLOW_THREADS;
|
||||||
|
|
||||||
|
if (NULL == curs->pgres)
|
||||||
|
break;
|
||||||
if (PQresultStatus(curs->pgres) == PGRES_FATAL_ERROR)
|
if (PQresultStatus(curs->pgres) == PGRES_FATAL_ERROR)
|
||||||
pq_raise(curs->conn, curs, NULL);
|
pq_raise(curs->conn, curs, NULL);
|
||||||
IFCLEARPGRES(curs->pgres);
|
IFCLEARPGRES(curs->pgres);
|
||||||
|
@ -1386,7 +1396,13 @@ _pq_copy_out_v3(cursorObject *curs)
|
||||||
|
|
||||||
/* and finally we grab the operation result from the backend */
|
/* and finally we grab the operation result from the backend */
|
||||||
IFCLEARPGRES(curs->pgres);
|
IFCLEARPGRES(curs->pgres);
|
||||||
while ((curs->pgres = PQgetResult(curs->conn->pgconn)) != NULL) {
|
for (;;) {
|
||||||
|
Py_BEGIN_ALLOW_THREADS;
|
||||||
|
curs->pgres = PQgetResult(curs->conn->pgconn);
|
||||||
|
Py_END_ALLOW_THREADS;
|
||||||
|
|
||||||
|
if (NULL == curs->pgres)
|
||||||
|
break;
|
||||||
if (PQresultStatus(curs->pgres) == PGRES_FATAL_ERROR)
|
if (PQresultStatus(curs->pgres) == PGRES_FATAL_ERROR)
|
||||||
pq_raise(curs->conn, curs, NULL);
|
pq_raise(curs->conn, curs, NULL);
|
||||||
IFCLEARPGRES(curs->pgres);
|
IFCLEARPGRES(curs->pgres);
|
||||||
|
@ -1399,7 +1415,7 @@ exit:
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
pq_fetch(cursorObject *curs)
|
pq_fetch(cursorObject *curs, int no_result)
|
||||||
{
|
{
|
||||||
int pgstatus, ex = -1;
|
int pgstatus, ex = -1;
|
||||||
const char *rowcount;
|
const char *rowcount;
|
||||||
|
@ -1463,10 +1479,18 @@ pq_fetch(cursorObject *curs)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case PGRES_TUPLES_OK:
|
case PGRES_TUPLES_OK:
|
||||||
Dprintf("pq_fetch: data from a SELECT (got tuples)");
|
if (!no_result) {
|
||||||
|
Dprintf("pq_fetch: got tuples");
|
||||||
curs->rowcount = PQntuples(curs->pgres);
|
curs->rowcount = PQntuples(curs->pgres);
|
||||||
if (0 == _pq_fetch_tuples(curs)) { ex = 0; }
|
if (0 == _pq_fetch_tuples(curs)) { ex = 0; }
|
||||||
/* don't clear curs->pgres, because it contains the results! */
|
/* don't clear curs->pgres, because it contains the results! */
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
Dprintf("pq_fetch: got tuples, discarding them");
|
||||||
|
IFCLEARPGRES(curs->pgres);
|
||||||
|
curs->rowcount = -1;
|
||||||
|
ex = 0;
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case PGRES_EMPTY_QUERY:
|
case PGRES_EMPTY_QUERY:
|
||||||
|
|
|
@ -35,8 +35,9 @@
|
||||||
|
|
||||||
/* exported functions */
|
/* exported functions */
|
||||||
HIDDEN PGresult *pq_get_last_result(connectionObject *conn);
|
HIDDEN PGresult *pq_get_last_result(connectionObject *conn);
|
||||||
RAISES_NEG HIDDEN int pq_fetch(cursorObject *curs);
|
RAISES_NEG HIDDEN int pq_fetch(cursorObject *curs, int no_result);
|
||||||
RAISES_NEG HIDDEN int pq_execute(cursorObject *curs, const char *query, int async);
|
RAISES_NEG HIDDEN int pq_execute(cursorObject *curs, const char *query,
|
||||||
|
int async, int no_result);
|
||||||
HIDDEN int pq_send_query(connectionObject *conn, const char *query);
|
HIDDEN int pq_send_query(connectionObject *conn, const char *query);
|
||||||
HIDDEN int pq_begin_locked(connectionObject *conn, PGresult **pgres,
|
HIDDEN int pq_begin_locked(connectionObject *conn, PGresult **pgres,
|
||||||
char **error, PyThreadState **tstate);
|
char **error, PyThreadState **tstate);
|
||||||
|
|
|
@ -433,6 +433,8 @@ static struct {
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
#if PY_VERSION_HEX >= 0x02050000
|
||||||
|
|
||||||
/* Error.__reduce_ex__
|
/* Error.__reduce_ex__
|
||||||
*
|
*
|
||||||
* The method is required to make exceptions picklable: set the cursor
|
* The method is required to make exceptions picklable: set the cursor
|
||||||
|
@ -484,6 +486,8 @@ error:
|
||||||
return rv;
|
return rv;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#endif /* PY_VERSION_HEX >= 0x02050000 */
|
||||||
|
|
||||||
static int
|
static int
|
||||||
psyco_errors_init(void)
|
psyco_errors_init(void)
|
||||||
{
|
{
|
||||||
|
@ -498,8 +502,10 @@ psyco_errors_init(void)
|
||||||
PyObject *descr = NULL;
|
PyObject *descr = NULL;
|
||||||
int rv = -1;
|
int rv = -1;
|
||||||
|
|
||||||
|
#if PY_VERSION_HEX >= 0x02050000
|
||||||
static PyMethodDef psyco_error_reduce_ex_def =
|
static PyMethodDef psyco_error_reduce_ex_def =
|
||||||
{"__reduce_ex__", psyco_error_reduce_ex, METH_VARARGS, "pickle helper"};
|
{"__reduce_ex__", psyco_error_reduce_ex, METH_VARARGS, "pickle helper"};
|
||||||
|
#endif
|
||||||
|
|
||||||
for (i=0; exctable[i].name; i++) {
|
for (i=0; exctable[i].name; i++) {
|
||||||
if (!(dict = PyDict_New())) { goto exit; }
|
if (!(dict = PyDict_New())) { goto exit; }
|
||||||
|
|
|
@ -35,6 +35,11 @@
|
||||||
# error "psycopg requires Python >= 2.4"
|
# error "psycopg requires Python >= 2.4"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if PY_VERSION_HEX < 0x02050000
|
||||||
|
/* Function missing in Py 2.4 */
|
||||||
|
#define PyErr_WarnEx(cat,msg,lvl) PyErr_Warn(cat,msg)
|
||||||
|
#endif
|
||||||
|
|
||||||
#if PY_VERSION_HEX < 0x02050000 && !defined(PY_SSIZE_T_MIN)
|
#if PY_VERSION_HEX < 0x02050000 && !defined(PY_SSIZE_T_MIN)
|
||||||
typedef int Py_ssize_t;
|
typedef int Py_ssize_t;
|
||||||
#define PY_SSIZE_T_MIN INT_MIN
|
#define PY_SSIZE_T_MIN INT_MIN
|
||||||
|
|
|
@ -212,8 +212,10 @@ typecast_array_scan(const char *str, Py_ssize_t strlength,
|
||||||
PyList_Append(array, sub);
|
PyList_Append(array, sub);
|
||||||
Py_DECREF(sub);
|
Py_DECREF(sub);
|
||||||
|
|
||||||
if (stack_index == MAX_DIMENSIONS)
|
if (stack_index == MAX_DIMENSIONS) {
|
||||||
|
PyErr_SetString(DataError, "excessive array dimensions");
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
stack[stack_index++] = array;
|
stack[stack_index++] = array;
|
||||||
array = sub;
|
array = sub;
|
||||||
|
@ -224,9 +226,11 @@ typecast_array_scan(const char *str, Py_ssize_t strlength,
|
||||||
}
|
}
|
||||||
|
|
||||||
else if (state == ASCAN_END) {
|
else if (state == ASCAN_END) {
|
||||||
if (--stack_index < 0)
|
if (stack_index == 0) {
|
||||||
|
PyErr_SetString(DataError, "unbalanced braces in array");
|
||||||
return -1;
|
return -1;
|
||||||
array = stack[stack_index];
|
}
|
||||||
|
array = stack[--stack_index];
|
||||||
}
|
}
|
||||||
|
|
||||||
else if (state == ASCAN_EOF)
|
else if (state == ASCAN_EOF)
|
||||||
|
@ -253,7 +257,11 @@ typecast_GENERIC_ARRAY_cast(const char *str, Py_ssize_t len, PyObject *curs)
|
||||||
if (str[0] == '[')
|
if (str[0] == '[')
|
||||||
typecast_array_cleanup(&str, &len);
|
typecast_array_cleanup(&str, &len);
|
||||||
if (str[0] != '{') {
|
if (str[0] != '{') {
|
||||||
PyErr_SetString(Error, "array does not start with '{'");
|
PyErr_SetString(DataError, "array does not start with '{'");
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
if (str[1] == '\0') {
|
||||||
|
PyErr_SetString(DataError, "malformed array: '{'");
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -28,6 +28,7 @@
|
||||||
#include "psycopg/psycopg.h"
|
#include "psycopg/psycopg.h"
|
||||||
|
|
||||||
#include "psycopg/xid.h"
|
#include "psycopg/xid.h"
|
||||||
|
#include "psycopg/cursor.h"
|
||||||
|
|
||||||
|
|
||||||
static const char xid_doc[] =
|
static const char xid_doc[] =
|
||||||
|
@ -660,8 +661,11 @@ xid_recover(PyObject *conn)
|
||||||
PyObject *tmp;
|
PyObject *tmp;
|
||||||
Py_ssize_t len, i;
|
Py_ssize_t len, i;
|
||||||
|
|
||||||
/* curs = conn.cursor() */
|
/* curs = conn.cursor()
|
||||||
if (!(curs = PyObject_CallMethod(conn, "cursor", NULL))) { goto exit; }
|
* (sort of. Use the real cursor in case the connection returns
|
||||||
|
* somenthing non-dbapi -- see ticket #114) */
|
||||||
|
if (!(curs = PyObject_CallFunctionObjArgs(
|
||||||
|
(PyObject *)&cursorType, conn, NULL))) { goto exit; }
|
||||||
|
|
||||||
/* curs.execute(...) */
|
/* curs.execute(...) */
|
||||||
if (!(tmp = PyObject_CallMethod(curs, "execute", "s",
|
if (!(tmp = PyObject_CallMethod(curs, "execute", "s",
|
||||||
|
|
81
sandbox/test_green_error.py
Normal file
81
sandbox/test_green_error.py
Normal file
|
@ -0,0 +1,81 @@
|
||||||
|
#!/usr/bin/env python
|
||||||
|
"""Test for issue #113: test with error during green processing
|
||||||
|
"""
|
||||||
|
|
||||||
|
DSN = 'dbname=test'
|
||||||
|
|
||||||
|
import eventlet.patcher
|
||||||
|
eventlet.patcher.monkey_patch()
|
||||||
|
|
||||||
|
import os
|
||||||
|
import signal
|
||||||
|
from time import sleep
|
||||||
|
|
||||||
|
import psycopg2
|
||||||
|
from psycopg2 import extensions
|
||||||
|
from eventlet.hubs import trampoline
|
||||||
|
|
||||||
|
|
||||||
|
# register a test wait callback that fails if SIGHUP is received
|
||||||
|
|
||||||
|
panic = []
|
||||||
|
|
||||||
|
def wait_cb(conn):
|
||||||
|
"""A wait callback useful to allow eventlet to work with Psycopg."""
|
||||||
|
while 1:
|
||||||
|
if panic:
|
||||||
|
raise Exception('whatever')
|
||||||
|
|
||||||
|
state = conn.poll()
|
||||||
|
if state == extensions.POLL_OK:
|
||||||
|
break
|
||||||
|
elif state == extensions.POLL_READ:
|
||||||
|
trampoline(conn.fileno(), read=True)
|
||||||
|
elif state == extensions.POLL_WRITE:
|
||||||
|
trampoline(conn.fileno(), write=True)
|
||||||
|
else:
|
||||||
|
raise psycopg2.OperationalError(
|
||||||
|
"Bad result from poll: %r" % state)
|
||||||
|
|
||||||
|
extensions.set_wait_callback(wait_cb)
|
||||||
|
|
||||||
|
|
||||||
|
# SIGHUP handler to inject a fail in the callback
|
||||||
|
|
||||||
|
def handler(signum, frame):
|
||||||
|
panic.append(True)
|
||||||
|
|
||||||
|
signal.signal(signal.SIGHUP, handler)
|
||||||
|
|
||||||
|
|
||||||
|
# Simulate another green thread working
|
||||||
|
|
||||||
|
def worker():
|
||||||
|
while 1:
|
||||||
|
print "I'm working"
|
||||||
|
sleep(1)
|
||||||
|
|
||||||
|
eventlet.spawn(worker)
|
||||||
|
|
||||||
|
|
||||||
|
# You can unplug the network cable etc. here.
|
||||||
|
# Kill -HUP will raise an exception in the callback.
|
||||||
|
|
||||||
|
print "PID", os.getpid()
|
||||||
|
conn = psycopg2.connect(DSN)
|
||||||
|
curs = conn.cursor()
|
||||||
|
try:
|
||||||
|
for i in range(1000):
|
||||||
|
curs.execute("select %s, pg_sleep(1)", (i,))
|
||||||
|
r = curs.fetchone()
|
||||||
|
print "selected", r
|
||||||
|
|
||||||
|
except BaseException, e:
|
||||||
|
print "got exception:", e.__class__.__name__, e
|
||||||
|
|
||||||
|
if conn.closed:
|
||||||
|
print "the connection is closed"
|
||||||
|
else:
|
||||||
|
conn.rollback()
|
||||||
|
curs.execute("select 1")
|
||||||
|
print curs.fetchone()
|
2
setup.py
2
setup.py
|
@ -73,7 +73,7 @@ except ImportError:
|
||||||
# Take a look at http://www.python.org/dev/peps/pep-0386/
|
# Take a look at http://www.python.org/dev/peps/pep-0386/
|
||||||
# for a consistent versioning pattern.
|
# for a consistent versioning pattern.
|
||||||
|
|
||||||
PSYCOPG_VERSION = '2.4.5'
|
PSYCOPG_VERSION = '2.4.6'
|
||||||
|
|
||||||
version_flags = ['dt', 'dec']
|
version_flags = ['dt', 'dec']
|
||||||
|
|
||||||
|
|
|
@ -458,8 +458,8 @@ class ConnectionTwoPhaseTests(unittest.TestCase):
|
||||||
cnn.close()
|
cnn.close()
|
||||||
return rv
|
return rv
|
||||||
|
|
||||||
def connect(self):
|
def connect(self, **kwargs):
|
||||||
conn = psycopg2.connect(dsn)
|
conn = psycopg2.connect(dsn, **kwargs)
|
||||||
self._conns.append(conn)
|
self._conns.append(conn)
|
||||||
return conn
|
return conn
|
||||||
|
|
||||||
|
@ -760,6 +760,20 @@ class ConnectionTwoPhaseTests(unittest.TestCase):
|
||||||
cnn.tpc_prepare()
|
cnn.tpc_prepare()
|
||||||
self.assertRaises(psycopg2.ProgrammingError, cnn.cancel)
|
self.assertRaises(psycopg2.ProgrammingError, cnn.cancel)
|
||||||
|
|
||||||
|
def test_tpc_recover_non_dbapi_connection(self):
|
||||||
|
from psycopg2.extras import RealDictConnection
|
||||||
|
cnn = self.connect(connection_factory=RealDictConnection)
|
||||||
|
cnn.tpc_begin('dict-connection')
|
||||||
|
cnn.tpc_prepare()
|
||||||
|
cnn.reset()
|
||||||
|
|
||||||
|
xids = cnn.tpc_recover()
|
||||||
|
xid = [ xid for xid in xids if xid.database == dbname ][0]
|
||||||
|
self.assertEqual(None, xid.format_id)
|
||||||
|
self.assertEqual('dict-connection', xid.gtrid)
|
||||||
|
self.assertEqual(None, xid.bqual)
|
||||||
|
|
||||||
|
|
||||||
from testutils import skip_if_tpc_disabled
|
from testutils import skip_if_tpc_disabled
|
||||||
decorate_all_tests(ConnectionTwoPhaseTests, skip_if_tpc_disabled)
|
decorate_all_tests(ConnectionTwoPhaseTests, skip_if_tpc_disabled)
|
||||||
|
|
||||||
|
|
|
@ -165,6 +165,10 @@ class CursorTests(unittest.TestCase):
|
||||||
del curs
|
del curs
|
||||||
self.assert_(w() is None)
|
self.assert_(w() is None)
|
||||||
|
|
||||||
|
def test_null_name(self):
|
||||||
|
curs = self.conn.cursor(None)
|
||||||
|
self.assertEqual(curs.name, None)
|
||||||
|
|
||||||
def test_invalid_name(self):
|
def test_invalid_name(self):
|
||||||
curs = self.conn.cursor()
|
curs = self.conn.cursor()
|
||||||
curs.execute("create temp table invname (data int);")
|
curs.execute("create temp table invname (data int);")
|
||||||
|
|
|
@ -539,6 +539,24 @@ class FixedOffsetTimezoneTests(unittest.TestCase):
|
||||||
self.assert_(FixedOffsetTimezone(9 * 60) is not FixedOffsetTimezone(9 * 60, 'FOO'))
|
self.assert_(FixedOffsetTimezone(9 * 60) is not FixedOffsetTimezone(9 * 60, 'FOO'))
|
||||||
self.assert_(FixedOffsetTimezone(name='FOO') is not FixedOffsetTimezone(9 * 60, 'FOO'))
|
self.assert_(FixedOffsetTimezone(name='FOO') is not FixedOffsetTimezone(9 * 60, 'FOO'))
|
||||||
|
|
||||||
|
def test_pickle(self):
|
||||||
|
# ticket #135
|
||||||
|
import pickle
|
||||||
|
|
||||||
|
tz11 = FixedOffsetTimezone(60)
|
||||||
|
tz12 = FixedOffsetTimezone(120)
|
||||||
|
for proto in [-1, 0, 1, 2]:
|
||||||
|
tz21, tz22 = pickle.loads(pickle.dumps([tz11, tz12], proto))
|
||||||
|
self.assertEqual(tz11, tz21)
|
||||||
|
self.assertEqual(tz12, tz22)
|
||||||
|
|
||||||
|
tz11 = FixedOffsetTimezone(60, name='foo')
|
||||||
|
tz12 = FixedOffsetTimezone(120, name='bar')
|
||||||
|
for proto in [-1, 0, 1, 2]:
|
||||||
|
tz21, tz22 = pickle.loads(pickle.dumps([tz11, tz12], proto))
|
||||||
|
self.assertEqual(tz11, tz21)
|
||||||
|
self.assertEqual(tz12, tz22)
|
||||||
|
|
||||||
def test_suite():
|
def test_suite():
|
||||||
return unittest.TestLoader().loadTestsFromName(__name__)
|
return unittest.TestLoader().loadTestsFromName(__name__)
|
||||||
|
|
||||||
|
|
|
@ -35,6 +35,16 @@ class ExtrasDictCursorTests(unittest.TestCase):
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
self.conn.close()
|
self.conn.close()
|
||||||
|
|
||||||
|
def testDictConnCursorArgs(self):
|
||||||
|
self.conn.close()
|
||||||
|
self.conn = psycopg2.connect(dsn, connection_factory=psycopg2.extras.DictConnection)
|
||||||
|
cur = self.conn.cursor()
|
||||||
|
self.assert_(isinstance(cur, psycopg2.extras.DictCursor))
|
||||||
|
self.assertEqual(cur.name, None)
|
||||||
|
# overridable
|
||||||
|
cur = self.conn.cursor('foo', cursor_factory=psycopg2.extras.NamedTupleCursor)
|
||||||
|
self.assertEqual(cur.name, 'foo')
|
||||||
|
self.assert_(isinstance(cur, psycopg2.extras.NamedTupleCursor))
|
||||||
|
|
||||||
def testDictCursorWithPlainCursorFetchOne(self):
|
def testDictCursorWithPlainCursorFetchOne(self):
|
||||||
self._testWithPlainCursor(lambda curs: curs.fetchone())
|
self._testWithPlainCursor(lambda curs: curs.fetchone())
|
||||||
|
@ -195,6 +205,32 @@ class ExtrasDictCursorTests(unittest.TestCase):
|
||||||
for i, r in enumerate(curs):
|
for i, r in enumerate(curs):
|
||||||
self.assertEqual(i + 1, curs.rownumber)
|
self.assertEqual(i + 1, curs.rownumber)
|
||||||
|
|
||||||
|
def testPickleDictRow(self):
|
||||||
|
import pickle
|
||||||
|
curs = self.conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
|
||||||
|
curs.execute("select 10 as a, 20 as b")
|
||||||
|
r = curs.fetchone()
|
||||||
|
d = pickle.dumps(r)
|
||||||
|
r1 = pickle.loads(d)
|
||||||
|
self.assertEqual(r, r1)
|
||||||
|
self.assertEqual(r[0], r1[0])
|
||||||
|
self.assertEqual(r[1], r1[1])
|
||||||
|
self.assertEqual(r['a'], r1['a'])
|
||||||
|
self.assertEqual(r['b'], r1['b'])
|
||||||
|
self.assertEqual(r._index, r1._index)
|
||||||
|
|
||||||
|
def testPickleRealDictRow(self):
|
||||||
|
import pickle
|
||||||
|
curs = self.conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
|
||||||
|
curs.execute("select 10 as a, 20 as b")
|
||||||
|
r = curs.fetchone()
|
||||||
|
d = pickle.dumps(r)
|
||||||
|
r1 = pickle.loads(d)
|
||||||
|
self.assertEqual(r, r1)
|
||||||
|
self.assertEqual(r['a'], r1['a'])
|
||||||
|
self.assertEqual(r['b'], r1['b'])
|
||||||
|
self.assertEqual(r._column_mapping, r1._column_mapping)
|
||||||
|
|
||||||
|
|
||||||
class NamedTupleCursorTest(unittest.TestCase):
|
class NamedTupleCursorTest(unittest.TestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
|
@ -219,6 +255,12 @@ class NamedTupleCursorTest(unittest.TestCase):
|
||||||
if self.conn is not None:
|
if self.conn is not None:
|
||||||
self.conn.close()
|
self.conn.close()
|
||||||
|
|
||||||
|
@skip_if_no_namedtuple
|
||||||
|
def test_cursor_args(self):
|
||||||
|
cur = self.conn.cursor('foo', cursor_factory=psycopg2.extras.DictCursor)
|
||||||
|
self.assertEqual(cur.name, 'foo')
|
||||||
|
self.assert_(isinstance(cur, psycopg2.extras.DictCursor))
|
||||||
|
|
||||||
@skip_if_no_namedtuple
|
@skip_if_no_namedtuple
|
||||||
def test_fetchone(self):
|
def test_fetchone(self):
|
||||||
curs = self.conn.cursor()
|
curs = self.conn.cursor()
|
||||||
|
|
|
@ -79,6 +79,9 @@ class GreenTests(unittest.TestCase):
|
||||||
warnings.warn("sending a large query didn't trigger block on write.")
|
warnings.warn("sending a large query didn't trigger block on write.")
|
||||||
|
|
||||||
def test_error_in_callback(self):
|
def test_error_in_callback(self):
|
||||||
|
# behaviour changed after issue #113: if there is an error in the
|
||||||
|
# callback for the moment we don't have a way to reset the connection
|
||||||
|
# without blocking (ticket #113) so just close it.
|
||||||
conn = self.conn
|
conn = self.conn
|
||||||
curs = conn.cursor()
|
curs = conn.cursor()
|
||||||
curs.execute("select 1") # have a BEGIN
|
curs.execute("select 1") # have a BEGIN
|
||||||
|
@ -88,11 +91,21 @@ class GreenTests(unittest.TestCase):
|
||||||
psycopg2.extensions.set_wait_callback(lambda conn: 1//0)
|
psycopg2.extensions.set_wait_callback(lambda conn: 1//0)
|
||||||
self.assertRaises(ZeroDivisionError, curs.execute, "select 2")
|
self.assertRaises(ZeroDivisionError, curs.execute, "select 2")
|
||||||
|
|
||||||
|
self.assert_(conn.closed)
|
||||||
|
|
||||||
|
def test_dont_freak_out(self):
|
||||||
|
# if there is an error in a green query, don't freak out and close
|
||||||
|
# the connection
|
||||||
|
conn = self.conn
|
||||||
|
curs = conn.cursor()
|
||||||
|
self.assertRaises(psycopg2.ProgrammingError,
|
||||||
|
curs.execute, "select the unselectable")
|
||||||
|
|
||||||
# check that the connection is left in an usable state
|
# check that the connection is left in an usable state
|
||||||
psycopg2.extensions.set_wait_callback(psycopg2.extras.wait_select)
|
self.assert_(not conn.closed)
|
||||||
conn.rollback()
|
conn.rollback()
|
||||||
curs.execute("select 2")
|
curs.execute("select 1")
|
||||||
self.assertEqual(2, curs.fetchone()[0])
|
self.assertEqual(curs.fetchone()[0], 1)
|
||||||
|
|
||||||
|
|
||||||
def test_suite():
|
def test_suite():
|
||||||
|
|
|
@ -40,10 +40,10 @@ class ConnectTestCase(unittest.TestCase):
|
||||||
psycopg2._connect = self._connect_orig
|
psycopg2._connect = self._connect_orig
|
||||||
|
|
||||||
def test_there_has_to_be_something(self):
|
def test_there_has_to_be_something(self):
|
||||||
self.assertRaises(psycopg2.InterfaceError, psycopg2.connect)
|
self.assertRaises(TypeError, psycopg2.connect)
|
||||||
self.assertRaises(psycopg2.InterfaceError, psycopg2.connect,
|
self.assertRaises(TypeError, psycopg2.connect,
|
||||||
connection_factory=lambda dsn, async=False: None)
|
connection_factory=lambda dsn, async=False: None)
|
||||||
self.assertRaises(psycopg2.InterfaceError, psycopg2.connect,
|
self.assertRaises(TypeError, psycopg2.connect,
|
||||||
async=True)
|
async=True)
|
||||||
|
|
||||||
def test_no_keywords(self):
|
def test_no_keywords(self):
|
||||||
|
@ -127,6 +127,14 @@ class ConnectTestCase(unittest.TestCase):
|
||||||
psycopg2.connect(database=r"\every thing'")
|
psycopg2.connect(database=r"\every thing'")
|
||||||
self.assertEqual(self.args[0], r"dbname='\\every thing\''")
|
self.assertEqual(self.args[0], r"dbname='\\every thing\''")
|
||||||
|
|
||||||
|
def test_no_kwargs_swallow(self):
|
||||||
|
self.assertRaises(TypeError,
|
||||||
|
psycopg2.connect, 'dbname=foo', database='foo')
|
||||||
|
self.assertRaises(TypeError,
|
||||||
|
psycopg2.connect, 'dbname=foo', user='postgres')
|
||||||
|
self.assertRaises(TypeError,
|
||||||
|
psycopg2.connect, 'dbname=foo', no_such_param='meh')
|
||||||
|
|
||||||
|
|
||||||
class ExceptionsTestCase(unittest.TestCase):
|
class ExceptionsTestCase(unittest.TestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
|
|
|
@ -200,6 +200,13 @@ class TypesBasicTests(unittest.TestCase):
|
||||||
r = self.execute("SELECT %s AS foo", (ss,))
|
r = self.execute("SELECT %s AS foo", (ss,))
|
||||||
self.failUnlessEqual(ss, r)
|
self.failUnlessEqual(ss, r)
|
||||||
|
|
||||||
|
def testArrayMalformed(self):
|
||||||
|
curs = self.conn.cursor()
|
||||||
|
ss = ['', '{', '{}}', '{' * 20 + '}' * 20]
|
||||||
|
for s in ss:
|
||||||
|
self.assertRaises(psycopg2.DataError,
|
||||||
|
psycopg2.extensions.STRINGARRAY, b(s), curs)
|
||||||
|
|
||||||
@testutils.skip_from_python(3)
|
@testutils.skip_from_python(3)
|
||||||
def testTypeRoundtripBuffer(self):
|
def testTypeRoundtripBuffer(self):
|
||||||
o1 = buffer("".join(map(chr, range(256))))
|
o1 = buffer("".join(map(chr, range(256))))
|
||||||
|
|
|
@ -424,6 +424,30 @@ class HstoreTestCase(unittest.TestCase):
|
||||||
psycopg2.extensions.string_types.pop(oid)
|
psycopg2.extensions.string_types.pop(oid)
|
||||||
psycopg2.extensions.string_types.pop(aoid)
|
psycopg2.extensions.string_types.pop(aoid)
|
||||||
|
|
||||||
|
@skip_if_no_hstore
|
||||||
|
def test_non_dbapi_connection(self):
|
||||||
|
from psycopg2.extras import RealDictConnection
|
||||||
|
from psycopg2.extras import register_hstore
|
||||||
|
|
||||||
|
conn = psycopg2.connect(dsn, connection_factory=RealDictConnection)
|
||||||
|
try:
|
||||||
|
register_hstore(conn)
|
||||||
|
curs = conn.cursor()
|
||||||
|
curs.execute("select ''::hstore as x")
|
||||||
|
self.assertEqual(curs.fetchone()['x'], {})
|
||||||
|
finally:
|
||||||
|
conn.close()
|
||||||
|
|
||||||
|
conn = psycopg2.connect(dsn, connection_factory=RealDictConnection)
|
||||||
|
try:
|
||||||
|
curs = conn.cursor()
|
||||||
|
register_hstore(curs)
|
||||||
|
curs.execute("select ''::hstore as x")
|
||||||
|
self.assertEqual(curs.fetchone()['x'], {})
|
||||||
|
finally:
|
||||||
|
conn.close()
|
||||||
|
|
||||||
|
|
||||||
def skip_if_no_composite(f):
|
def skip_if_no_composite(f):
|
||||||
def skip_if_no_composite_(self):
|
def skip_if_no_composite_(self):
|
||||||
if self.conn.server_version < 80000:
|
if self.conn.server_version < 80000:
|
||||||
|
@ -479,6 +503,7 @@ class AdaptTypeTestCase(unittest.TestCase):
|
||||||
self.assertEqual(CompositeCaster.tokenize(s), v)
|
self.assertEqual(CompositeCaster.tokenize(s), v)
|
||||||
|
|
||||||
ok("(,)", [None, None])
|
ok("(,)", [None, None])
|
||||||
|
ok('(,"")', [None, ''])
|
||||||
ok('(hello,,10.234,2010-11-11)', ['hello', None, '10.234', '2010-11-11'])
|
ok('(hello,,10.234,2010-11-11)', ['hello', None, '10.234', '2010-11-11'])
|
||||||
ok('(10,"""")', ['10', '"'])
|
ok('(10,"""")', ['10', '"'])
|
||||||
ok('(10,",")', ['10', ','])
|
ok('(10,",")', ['10', ','])
|
||||||
|
@ -532,6 +557,26 @@ class AdaptTypeTestCase(unittest.TestCase):
|
||||||
self.assertEqual(v.astring, "hello")
|
self.assertEqual(v.astring, "hello")
|
||||||
self.assertEqual(v.adate, date(2011,1,2))
|
self.assertEqual(v.adate, date(2011,1,2))
|
||||||
|
|
||||||
|
@skip_if_no_composite
|
||||||
|
def test_empty_string(self):
|
||||||
|
# issue #141
|
||||||
|
self._create_type("type_ss", [('s1', 'text'), ('s2', 'text')])
|
||||||
|
curs = self.conn.cursor()
|
||||||
|
psycopg2.extras.register_composite("type_ss", curs)
|
||||||
|
|
||||||
|
def ok(t):
|
||||||
|
curs.execute("select %s::type_ss", (t,))
|
||||||
|
rv = curs.fetchone()[0]
|
||||||
|
self.assertEqual(t, rv)
|
||||||
|
|
||||||
|
ok(('a', 'b'))
|
||||||
|
ok(('a', ''))
|
||||||
|
ok(('', 'b'))
|
||||||
|
ok(('a', None))
|
||||||
|
ok((None, 'b'))
|
||||||
|
ok(('', ''))
|
||||||
|
ok((None, None))
|
||||||
|
|
||||||
@skip_if_no_composite
|
@skip_if_no_composite
|
||||||
def test_cast_nested(self):
|
def test_cast_nested(self):
|
||||||
self._create_type("type_is",
|
self._create_type("type_is",
|
||||||
|
@ -712,6 +757,30 @@ class AdaptTypeTestCase(unittest.TestCase):
|
||||||
self.assertEqual(r[0], (2, 'test2'))
|
self.assertEqual(r[0], (2, 'test2'))
|
||||||
self.assertEqual(r[1], [(3, 'testc', 2), (4, 'testd', 2)])
|
self.assertEqual(r[1], [(3, 'testc', 2), (4, 'testd', 2)])
|
||||||
|
|
||||||
|
@skip_if_no_hstore
|
||||||
|
def test_non_dbapi_connection(self):
|
||||||
|
from psycopg2.extras import RealDictConnection
|
||||||
|
from psycopg2.extras import register_composite
|
||||||
|
self._create_type("type_ii", [("a", "integer"), ("b", "integer")])
|
||||||
|
|
||||||
|
conn = psycopg2.connect(dsn, connection_factory=RealDictConnection)
|
||||||
|
try:
|
||||||
|
register_composite('type_ii', conn)
|
||||||
|
curs = conn.cursor()
|
||||||
|
curs.execute("select '(1,2)'::type_ii as x")
|
||||||
|
self.assertEqual(curs.fetchone()['x'], (1,2))
|
||||||
|
finally:
|
||||||
|
conn.close()
|
||||||
|
|
||||||
|
conn = psycopg2.connect(dsn, connection_factory=RealDictConnection)
|
||||||
|
try:
|
||||||
|
curs = conn.cursor()
|
||||||
|
register_composite('type_ii', conn)
|
||||||
|
curs.execute("select '(1,2)'::type_ii as x")
|
||||||
|
self.assertEqual(curs.fetchone()['x'], (1,2))
|
||||||
|
finally:
|
||||||
|
conn.close()
|
||||||
|
|
||||||
def _create_type(self, name, fields):
|
def _create_type(self, name, fields):
|
||||||
curs = self.conn.cursor()
|
curs = self.conn.cursor()
|
||||||
try:
|
try:
|
||||||
|
|
Loading…
Reference in New Issue
Block a user