mirror of
https://github.com/psycopg/psycopg2.git
synced 2025-04-23 09:51:59 +03:00
Compare commits
80 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
40c1eea15d | ||
|
12d114c731 | ||
|
3b7f57b6d8 | ||
|
06f64fbe70 | ||
|
e9335b08f8 | ||
|
26e7f1d71d | ||
|
5a660b80f5 | ||
|
e3ecae75a0 | ||
|
75b98b561b | ||
|
12c2fafa86 | ||
|
e5ab0b3987 | ||
|
f0c38f0b37 | ||
|
221d0d66de | ||
|
a201307185 | ||
|
cefef286a6 | ||
|
df7018a5d4 | ||
|
ba71c05860 | ||
|
25ae646dcf | ||
|
2245b56dc1 | ||
|
a516517a23 | ||
|
1d786d0380 | ||
|
65fbe9159a | ||
|
e2bb7ff8da | ||
|
d1e1243ba8 | ||
|
f597c36f42 | ||
|
d6da4ed09f | ||
|
69b2fa282c | ||
|
a13c72cf32 | ||
|
e840e00278 | ||
|
98ea06d8b4 | ||
|
a8ef13620a | ||
|
211e949741 | ||
|
0e86fc164f | ||
|
d43e23ddc6 | ||
|
6c27cdd20e | ||
|
c10c1186a5 | ||
|
5624ad4ec5 | ||
|
1487800b6d | ||
|
ca1845477d | ||
|
f739576f0a | ||
|
434fbb02b1 | ||
|
9ac01d060d | ||
|
9e8923b884 | ||
|
3f6497d587 | ||
|
4f1e4a03d1 | ||
|
8aaa4eabca | ||
|
7e9e11ee27 | ||
|
0442fd924f | ||
|
58c53025d1 | ||
|
8d8cc38590 | ||
|
429ebfc764 | ||
|
865b36e005 | ||
|
09cf64dda4 | ||
|
0e08fbb20b | ||
|
96248d0f09 | ||
|
bf843fc5f0 | ||
|
68d5d070fe | ||
|
9b2f4c7d77 | ||
|
468951de25 | ||
|
889b1d826e | ||
|
9f4b5b37a3 | ||
|
eb36e75b89 | ||
|
8fd228dd28 | ||
|
1e94018473 | ||
|
cc4cabebf0 | ||
|
3a13599a99 | ||
|
c862554fdc | ||
|
5f320e52f4 | ||
|
0fc1e3a8c7 | ||
|
361522cde8 | ||
|
ad5af45ba6 | ||
|
b8fbe599ac | ||
|
9a1dac6125 | ||
|
244a58e5c7 | ||
|
35086c9ef0 | ||
|
e93357ba17 | ||
|
ec4aa95554 | ||
|
29e96179f2 | ||
|
5d3a5c242e | ||
|
979c4fc1a6 |
2
INSTALL
2
INSTALL
|
@ -93,7 +93,7 @@ You can compile psycopg under Windows platform with mingw32
|
||||||
Dev-C++ (http://www.bloodshed.net/devcpp.html) and Code::Blocks
|
Dev-C++ (http://www.bloodshed.net/devcpp.html) and Code::Blocks
|
||||||
(http://www.codeblocks.org). gcc binaries should be in your PATH.
|
(http://www.codeblocks.org). gcc binaries should be in your PATH.
|
||||||
|
|
||||||
You need a PostgreSQL with include and libary files installed. At least v8.0
|
You need a PostgreSQL with include and library files installed. At least v8.0
|
||||||
is required.
|
is required.
|
||||||
|
|
||||||
First you need to create a libpython2X.a as described in
|
First you need to create a libpython2X.a as described in
|
||||||
|
|
14
MANIFEST.in
14
MANIFEST.in
|
@ -1,16 +1,12 @@
|
||||||
recursive-include psycopg *.c *.h *.manifest
|
recursive-include psycopg *.c *.h *.manifest
|
||||||
recursive-include lib *.py
|
recursive-include lib *.py
|
||||||
recursive-include tests *.py
|
recursive-include tests *.py
|
||||||
recursive-include ZPsycopgDA *.py *.gif *.dtml
|
|
||||||
recursive-include psycopg2da *
|
|
||||||
recursive-include examples *.py somehackers.jpg whereareyou.jpg
|
recursive-include examples *.py somehackers.jpg whereareyou.jpg
|
||||||
recursive-include debian *
|
include doc/Makefile doc/README doc/HACKING doc/SUCCESS doc/COPYING* doc/*.txt
|
||||||
recursive-include doc README HACKING SUCCESS COPYING* ChangeLog-1.x pep-0249.txt
|
include doc/src/Makefile doc/src/conf.py doc/src/*.rst doc/src/_static/*
|
||||||
recursive-include doc *.txt *.html *.css *.js Makefile
|
recursive-include doc/src/tools *.py
|
||||||
recursive-include doc/src *.rst *.py *.css Makefile
|
include doc/html/*.html doc/html/*.js doc/html/_sources/*.txt doc/html/_static/*
|
||||||
recursive-include doc/html *
|
|
||||||
prune doc/src/_build
|
|
||||||
recursive-include scripts *.py *.sh
|
recursive-include scripts *.py *.sh
|
||||||
include scripts/maketypes.sh scripts/buildtypes.py
|
include scripts/maketypes.sh scripts/buildtypes.py
|
||||||
include AUTHORS README INSTALL LICENSE NEWS ChangeLog
|
include AUTHORS README INSTALL LICENSE NEWS
|
||||||
include PKG-INFO MANIFEST.in MANIFEST setup.py setup.cfg Makefile
|
include PKG-INFO MANIFEST.in MANIFEST setup.py setup.cfg Makefile
|
||||||
|
|
67
NEWS
67
NEWS
|
@ -1,3 +1,58 @@
|
||||||
|
Current release
|
||||||
|
---------------
|
||||||
|
|
||||||
|
What's new in psycopg 2.5.3
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
- Work around `pip issue #1630 <https://github.com/pypa/pip/issues/1630>`__
|
||||||
|
making installation via ``pip -e git+url`` impossible (:ticket:`#18`).
|
||||||
|
- Copy operations correctly set the `cursor.rowcount` attribute
|
||||||
|
(:ticket:`#180`).
|
||||||
|
- It is now possible to call `get_transaction_status()` on closed connections.
|
||||||
|
- Fixed unsafe access to object names causing assertion failures in
|
||||||
|
Python 3 debug builds (:ticket:`#188`).
|
||||||
|
- Mark the connection closed if found broken on `poll()` (from :ticket:`#192`
|
||||||
|
discussion)
|
||||||
|
- Fixed handling of dsn and closed attributes in connection subclasses
|
||||||
|
failing to connect (from :ticket:`#192` discussion).
|
||||||
|
- Added arbitrary but stable order to `Range` objects, thanks to
|
||||||
|
Chris Withers (:ticket:`#193`).
|
||||||
|
- Avoid blocking async connections on connect (:ticket:`#194`). Thanks to
|
||||||
|
Adam Petrovich for the bug report and diagnosis.
|
||||||
|
- Don't segfault using poorly defined cursor subclasses which forgot to call
|
||||||
|
the superclass init (:ticket:`#195`).
|
||||||
|
- Mark the connection closed when a Socket connection is broken, as it
|
||||||
|
happens for TCP connections instead (:ticket:`#196`).
|
||||||
|
- Fixed overflow opening a lobject with an oid not fitting in a signed int
|
||||||
|
(:ticket:`#203`).
|
||||||
|
- Fixed handling of explicit default ``cursor_factory=None`` in
|
||||||
|
`connection.cursor()` (:ticket:`#210`).
|
||||||
|
- Fixed possible segfault in named cursors creation.
|
||||||
|
- Fixed debug build on Windows, thanks to James Emerton.
|
||||||
|
|
||||||
|
|
||||||
|
What's new in psycopg 2.5.2
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
- Fixed segfault pickling the exception raised on connection error
|
||||||
|
(:ticket:`#170`).
|
||||||
|
- Meaningful connection errors report a meaningful message, thanks to
|
||||||
|
Alexey Borzenkov (:ticket:`#173`).
|
||||||
|
- Manually creating `lobject` with the wrong parameter doesn't segfault
|
||||||
|
(:ticket:`#187`).
|
||||||
|
|
||||||
|
|
||||||
|
What's new in psycopg 2.5.1
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
- Fixed build on Solaris 10 and 11 where the round() function is already
|
||||||
|
declared (:ticket:`#146`).
|
||||||
|
- Fixed comparison of `Range` with non-range objects (:ticket:`#164`).
|
||||||
|
Thanks to Chris Withers for the patch.
|
||||||
|
- Fixed double-free on connection dealloc (:ticket:`#166`). Thanks to
|
||||||
|
Gangadharan S.A. for the report and fix suggestion.
|
||||||
|
|
||||||
|
|
||||||
What's new in psycopg 2.5
|
What's new in psycopg 2.5
|
||||||
-------------------------
|
-------------------------
|
||||||
|
|
||||||
|
@ -181,7 +236,7 @@ New features and changes:
|
||||||
ISO885916, LATIN10, SHIFT_JIS_2004.
|
ISO885916, LATIN10, SHIFT_JIS_2004.
|
||||||
- Dropped repeated dictionary lookups with unicode query/parameters.
|
- Dropped repeated dictionary lookups with unicode query/parameters.
|
||||||
|
|
||||||
- Improvements to the named cusors:
|
- Improvements to the named cursors:
|
||||||
|
|
||||||
- More efficient iteration on named cursors, fetching 'itersize'
|
- More efficient iteration on named cursors, fetching 'itersize'
|
||||||
records at time from the backend.
|
records at time from the backend.
|
||||||
|
@ -244,7 +299,7 @@ Main new features:
|
||||||
- `dict` to `hstore` adapter and `hstore` to `dict` typecaster, using both
|
- `dict` to `hstore` adapter and `hstore` to `dict` typecaster, using both
|
||||||
9.0 and pre-9.0 syntax.
|
9.0 and pre-9.0 syntax.
|
||||||
- Two-phase commit protocol support as per DBAPI specification.
|
- Two-phase commit protocol support as per DBAPI specification.
|
||||||
- Support for payload in notifications received from the backed.
|
- Support for payload in notifications received from the backend.
|
||||||
- `namedtuple`-returning cursor.
|
- `namedtuple`-returning cursor.
|
||||||
- Query execution cancel.
|
- Query execution cancel.
|
||||||
|
|
||||||
|
@ -284,7 +339,7 @@ Bux fixes:
|
||||||
The old register_tstz_w_secs() function is deprecated and will raise a
|
The old register_tstz_w_secs() function is deprecated and will raise a
|
||||||
warning if called.
|
warning if called.
|
||||||
- Exceptions raised by the column iterator are propagated.
|
- Exceptions raised by the column iterator are propagated.
|
||||||
- Exceptions raised by executemany() interators are propagated.
|
- Exceptions raised by executemany() iterators are propagated.
|
||||||
|
|
||||||
|
|
||||||
What's new in psycopg 2.2.1
|
What's new in psycopg 2.2.1
|
||||||
|
@ -401,7 +456,7 @@ New features:
|
||||||
|
|
||||||
Bug fixes:
|
Bug fixes:
|
||||||
|
|
||||||
- Fixed exeception in setup.py.
|
- Fixed exception in setup.py.
|
||||||
- More robust detection of PostgreSQL development versions.
|
- More robust detection of PostgreSQL development versions.
|
||||||
- Fixed exception in RealDictCursor, introduced in 2.0.10.
|
- Fixed exception in RealDictCursor, introduced in 2.0.10.
|
||||||
|
|
||||||
|
@ -783,7 +838,7 @@ What's new in psycopg 1.99.11
|
||||||
|
|
||||||
* changed 'tuple_factory' cursor attribute name to 'row_factory'.
|
* changed 'tuple_factory' cursor attribute name to 'row_factory'.
|
||||||
|
|
||||||
* the .cursor attribute is gone and connections and cursors are propely
|
* the .cursor attribute is gone and connections and cursors are properly
|
||||||
gc-managed.
|
gc-managed.
|
||||||
|
|
||||||
* fixes to the async core.
|
* fixes to the async core.
|
||||||
|
@ -832,7 +887,7 @@ What's new in psycopg 1.99.8
|
||||||
* now cursors support .fileno() and .isready() methods, to be used in
|
* now cursors support .fileno() and .isready() methods, to be used in
|
||||||
select() calls.
|
select() calls.
|
||||||
* .copy_from() and .copy_in() methods are back in (still using the old
|
* .copy_from() and .copy_in() methods are back in (still using the old
|
||||||
protocol, will be updated to use new one in next releasae.)
|
protocol, will be updated to use new one in next release.)
|
||||||
* fixed memory corruption bug reported on win32 platform.
|
* fixed memory corruption bug reported on win32 platform.
|
||||||
|
|
||||||
What's new in psycopg 1.99.7
|
What's new in psycopg 1.99.7
|
||||||
|
|
1744
doc/ChangeLog-1.x
1744
doc/ChangeLog-1.x
File diff suppressed because it is too large
Load Diff
|
@ -23,7 +23,7 @@ Date: 23 Oct 2001 09:53:11 +0600
|
||||||
|
|
||||||
We use psycopg and psycopg zope adapter since fisrt public
|
We use psycopg and psycopg zope adapter since fisrt public
|
||||||
release (it seems version 0.4). Now it works on 3 our sites and in intranet
|
release (it seems version 0.4). Now it works on 3 our sites and in intranet
|
||||||
applications. We had few problems, but all problems were quckly
|
applications. We had few problems, but all problems were quickly
|
||||||
solved. The strong side of psycopg is that it's code is well organized
|
solved. The strong side of psycopg is that it's code is well organized
|
||||||
and easy to understand. When I found a problem with non-ISO datestyle in first
|
and easy to understand. When I found a problem with non-ISO datestyle in first
|
||||||
version of psycopg, it took for me 15 or 20 minutes to learn code and
|
version of psycopg, it took for me 15 or 20 minutes to learn code and
|
||||||
|
|
|
@ -255,7 +255,7 @@ Cursor Objects
|
||||||
display_size, internal_size, precision, scale,
|
display_size, internal_size, precision, scale,
|
||||||
null_ok). The first two items (name and type_code) are
|
null_ok). The first two items (name and type_code) are
|
||||||
mandatory, the other five are optional and must be set to
|
mandatory, the other five are optional and must be set to
|
||||||
None if meaningfull values are not provided.
|
None if meaningful values are not provided.
|
||||||
|
|
||||||
This attribute will be None for operations that
|
This attribute will be None for operations that
|
||||||
do not return rows or if the cursor has not had an
|
do not return rows or if the cursor has not had an
|
||||||
|
|
|
@ -295,8 +295,8 @@ The ``connection`` class
|
||||||
|
|
||||||
.. attribute:: closed
|
.. attribute:: closed
|
||||||
|
|
||||||
Read-only attribute reporting whether the database connection is open
|
Read-only integer attribute: 0 if the connection is open, nonzero if
|
||||||
(0) or closed (1).
|
it is closed or broken.
|
||||||
|
|
||||||
|
|
||||||
.. method:: cancel
|
.. method:: cancel
|
||||||
|
@ -348,7 +348,7 @@ The ``connection`` class
|
||||||
pair: Transaction; Autocommit
|
pair: Transaction; Autocommit
|
||||||
pair: Transaction; Isolation level
|
pair: Transaction; Isolation level
|
||||||
|
|
||||||
.. method:: set_session([isolation_level,] [readonly,] [deferrable,] [autocommit])
|
.. method:: set_session(isolation_level=None, readonly=None, deferrable=None, autocommit=None)
|
||||||
|
|
||||||
Set one or more parameters for the next transactions or statements in
|
Set one or more parameters for the next transactions or statements in
|
||||||
the current session. See |SET TRANSACTION|_ for further details.
|
the current session. See |SET TRANSACTION|_ for further details.
|
||||||
|
@ -370,6 +370,7 @@ The ``connection`` class
|
||||||
PostgreSQL session setting but an alias for setting the
|
PostgreSQL session setting but an alias for setting the
|
||||||
`autocommit` attribute.
|
`autocommit` attribute.
|
||||||
|
|
||||||
|
Parameter passed as `!None` (the default for all) will not be changed.
|
||||||
The parameters *isolation_level*, *readonly* and *deferrable* also
|
The parameters *isolation_level*, *readonly* and *deferrable* also
|
||||||
accept the string ``DEFAULT`` as a value: the effect is to reset the
|
accept the string ``DEFAULT`` as a value: the effect is to reset the
|
||||||
parameter to the server default.
|
parameter to the server default.
|
||||||
|
@ -613,6 +614,8 @@ The ``connection`` class
|
||||||
`psycopg2.extensions`: see :ref:`connection-status-constants`
|
`psycopg2.extensions`: see :ref:`connection-status-constants`
|
||||||
for the available values.
|
for the available values.
|
||||||
|
|
||||||
|
The status is undefined for `closed` connectons.
|
||||||
|
|
||||||
|
|
||||||
.. method:: lobject([oid [, mode [, new_oid [, new_file [, lobject_factory]]]]])
|
.. method:: lobject([oid [, mode [, new_oid [, new_file [, lobject_factory]]]]])
|
||||||
|
|
||||||
|
@ -678,7 +681,7 @@ The ``connection`` class
|
||||||
|
|
||||||
Return one of the constants defined in :ref:`poll-constants`. If it
|
Return one of the constants defined in :ref:`poll-constants`. If it
|
||||||
returns `~psycopg2.extensions.POLL_OK` then the connection has been
|
returns `~psycopg2.extensions.POLL_OK` then the connection has been
|
||||||
estabilished or the query results are available on the client.
|
established or the query results are available on the client.
|
||||||
Otherwise wait until the file descriptor returned by `fileno()` is
|
Otherwise wait until the file descriptor returned by `fileno()` is
|
||||||
ready to read or to write, as explained in :ref:`async-support`.
|
ready to read or to write, as explained in :ref:`async-support`.
|
||||||
`poll()` should be also used by the function installed by
|
`poll()` should be also used by the function installed by
|
||||||
|
|
|
@ -215,6 +215,8 @@ The ``cursor`` class
|
||||||
exactly the one that would be sent to the database running the
|
exactly the one that would be sent to the database running the
|
||||||
`~cursor.execute()` method or similar.
|
`~cursor.execute()` method or similar.
|
||||||
|
|
||||||
|
The returned string is always a bytes string.
|
||||||
|
|
||||||
>>> cur.mogrify("INSERT INTO test (num, data) VALUES (%s, %s)", (42, 'bar'))
|
>>> cur.mogrify("INSERT INTO test (num, data) VALUES (%s, %s)", (42, 'bar'))
|
||||||
"INSERT INTO test (num, data) VALUES (42, E'bar')"
|
"INSERT INTO test (num, data) VALUES (42, E'bar')"
|
||||||
|
|
||||||
|
@ -332,10 +334,6 @@ The ``cursor`` class
|
||||||
`~psycopg2.ProgrammingError` is raised and the cursor position is
|
`~psycopg2.ProgrammingError` is raised and the cursor position is
|
||||||
not changed.
|
not changed.
|
||||||
|
|
||||||
The method can be used both for client-side cursors and
|
|
||||||
:ref:`server-side cursors <server-side-cursors>`. Server-side cursors
|
|
||||||
can usually scroll backwards only if declared `~cursor.scrollable`.
|
|
||||||
|
|
||||||
.. note::
|
.. note::
|
||||||
|
|
||||||
According to the |DBAPI|_, the exception raised for a cursor out
|
According to the |DBAPI|_, the exception raised for a cursor out
|
||||||
|
@ -347,6 +345,13 @@ The ``cursor`` class
|
||||||
except (ProgrammingError, IndexError), exc:
|
except (ProgrammingError, IndexError), exc:
|
||||||
deal_with_it(exc)
|
deal_with_it(exc)
|
||||||
|
|
||||||
|
The method can be used both for client-side cursors and
|
||||||
|
:ref:`server-side cursors <server-side-cursors>`. Server-side cursors
|
||||||
|
can usually scroll backwards only if declared `~cursor.scrollable`.
|
||||||
|
Moving out-of-bound in a server-side cursor doesn't result in an
|
||||||
|
exception, if the backend doesn't raise any (Postgres doesn't tell us
|
||||||
|
in a reliable way if we went out of bound).
|
||||||
|
|
||||||
|
|
||||||
.. attribute:: arraysize
|
.. attribute:: arraysize
|
||||||
|
|
||||||
|
@ -424,8 +429,8 @@ The ``cursor`` class
|
||||||
.. attribute:: query
|
.. attribute:: query
|
||||||
|
|
||||||
Read-only attribute containing the body of the last query sent to the
|
Read-only attribute containing the body of the last query sent to the
|
||||||
backend (including bound arguments). `!None` if no query has been
|
backend (including bound arguments) as bytes string. `!None` if no
|
||||||
executed yet:
|
query has been executed yet:
|
||||||
|
|
||||||
>>> cur.execute("INSERT INTO test (num, data) VALUES (%s, %s)", (42, 'bar'))
|
>>> cur.execute("INSERT INTO test (num, data) VALUES (%s, %s)", (42, 'bar'))
|
||||||
>>> cur.query
|
>>> cur.query
|
||||||
|
|
|
@ -25,7 +25,7 @@ functionalities defined by the |DBAPI|_.
|
||||||
|
|
||||||
.. class:: cursor(conn, name=None)
|
.. class:: cursor(conn, name=None)
|
||||||
|
|
||||||
It is the class usually returnded by the `connection.cursor()`
|
It is the class usually returned by the `connection.cursor()`
|
||||||
method. It is exposed by the `extensions` module in order to allow
|
method. It is exposed by the `extensions` module in order to allow
|
||||||
subclassing to extend its behaviour: the subclass should be passed to the
|
subclassing to extend its behaviour: the subclass should be passed to the
|
||||||
`!cursor()` method using the `cursor_factory` parameter. See
|
`!cursor()` method using the `cursor_factory` parameter. See
|
||||||
|
@ -352,8 +352,8 @@ details.
|
||||||
`register_type()` to be used.
|
`register_type()` to be used.
|
||||||
|
|
||||||
:param oids: tuple of OIDs of the PostgreSQL type to convert. It should
|
:param oids: tuple of OIDs of the PostgreSQL type to convert. It should
|
||||||
probably be the oid of the array type (e.g. the ``typarray`` field in
|
probably contain the oid of the array type (e.g. the ``typarray``
|
||||||
the ``pg_type`` table.
|
field in the ``pg_type`` table).
|
||||||
:param name: the name of the new type adapter.
|
:param name: the name of the new type adapter.
|
||||||
:param base_caster: a Psycopg typecaster, e.g. created using the
|
:param base_caster: a Psycopg typecaster, e.g. created using the
|
||||||
`new_type()` function. The caster should be able to parse a single
|
`new_type()` function. The caster should be able to parse a single
|
||||||
|
@ -366,11 +366,12 @@ details.
|
||||||
.. note::
|
.. note::
|
||||||
|
|
||||||
The function can be used to create a generic array typecaster,
|
The function can be used to create a generic array typecaster,
|
||||||
returning a list of strings: just use the `~psycopg2.STRING` as base
|
returning a list of strings: just use `psycopg2.STRING` as base
|
||||||
typecaster. For instance, if you want to receive from the database an
|
typecaster. For instance, if you want to receive an array of
|
||||||
array of :sql:`macaddr`, each address represented by string, you can
|
:sql:`macaddr` from the database, each address represented by string,
|
||||||
use::
|
you can use::
|
||||||
|
|
||||||
|
# select typarray from pg_type where typname = 'macaddr' -> 1040
|
||||||
psycopg2.extensions.register_type(
|
psycopg2.extensions.register_type(
|
||||||
psycopg2.extensions.new_array_type(
|
psycopg2.extensions.new_array_type(
|
||||||
(1040,), 'MACADDR[]', psycopg2.STRING))
|
(1040,), 'MACADDR[]', psycopg2.STRING))
|
||||||
|
@ -427,7 +428,7 @@ The module exports a few exceptions in addition to the :ref:`standard ones
|
||||||
|
|
||||||
(subclasses `~psycopg2.OperationalError`)
|
(subclasses `~psycopg2.OperationalError`)
|
||||||
|
|
||||||
Error causing transaction rollback (deadlocks, serialisation failures,
|
Error causing transaction rollback (deadlocks, serialization failures,
|
||||||
etc). It can be trapped specifically to detect a deadlock.
|
etc). It can be trapped specifically to detect a deadlock.
|
||||||
|
|
||||||
.. versionadded:: 2.0.7
|
.. versionadded:: 2.0.7
|
||||||
|
@ -515,7 +516,7 @@ set to one of the following constants:
|
||||||
:sql:`SERIALIZABLE` isolation level. This is the strictest transactions
|
:sql:`SERIALIZABLE` isolation level. This is the strictest transactions
|
||||||
isolation level, equivalent to having the transactions executed serially
|
isolation level, equivalent to having the transactions executed serially
|
||||||
rather than concurrently. However applications using this level must be
|
rather than concurrently. However applications using this level must be
|
||||||
prepared to retry reansactions due to serialization failures.
|
prepared to retry transactions due to serialization failures.
|
||||||
|
|
||||||
Starting from PostgreSQL 9.1, this mode monitors for conditions which
|
Starting from PostgreSQL 9.1, this mode monitors for conditions which
|
||||||
could make execution of a concurrent set of serializable transactions
|
could make execution of a concurrent set of serializable transactions
|
||||||
|
|
|
@ -41,7 +41,7 @@ If you want to use a `!connection` subclass you can pass it as the
|
||||||
Dictionary-like cursor
|
Dictionary-like cursor
|
||||||
^^^^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
The dict cursors allow to access to the retrieved records using an iterface
|
The dict cursors allow to access to the retrieved records using an interface
|
||||||
similar to the Python dictionaries instead of the tuples.
|
similar to the Python dictionaries instead of the tuples.
|
||||||
|
|
||||||
>>> dict_cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
|
>>> dict_cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
|
||||||
|
@ -189,6 +189,20 @@ the `Json` adapter::
|
||||||
Reading from the database, |pgjson| values will be automatically converted to
|
Reading from the database, |pgjson| values will be automatically converted to
|
||||||
Python objects.
|
Python objects.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
If you are using the PostgreSQL :sql:`json` data type but you want to read
|
||||||
|
it as string in Python instead of having it parsed, your can either cast
|
||||||
|
the column to :sql:`text` in the query (it is an efficient operation, that
|
||||||
|
doesn't involve a copy)::
|
||||||
|
|
||||||
|
cur.execute("select jsondata::text from mytable")
|
||||||
|
|
||||||
|
or you can register a no-op `!loads()` function with
|
||||||
|
`register_default_json()`::
|
||||||
|
|
||||||
|
psycopg2.extras.register_default_json(loads=lambda x: x)
|
||||||
|
|
||||||
.. note::
|
.. note::
|
||||||
|
|
||||||
You can use `~psycopg2.extensions.register_adapter()` to adapt any Python
|
You can use `~psycopg2.extensions.register_adapter()` to adapt any Python
|
||||||
|
@ -204,7 +218,7 @@ Python objects.
|
||||||
effects.
|
effects.
|
||||||
|
|
||||||
If you want to customize the adaptation from Python to PostgreSQL you can
|
If you want to customize the adaptation from Python to PostgreSQL you can
|
||||||
either provide a custom `!dumps()` function to `!Json`::
|
either provide a custom `!dumps()` function to `Json`::
|
||||||
|
|
||||||
curs.execute("insert into mytable (jsondata) values (%s)",
|
curs.execute("insert into mytable (jsondata) values (%s)",
|
||||||
[Json({'a': 100}, dumps=simplejson.dumps)])
|
[Json({'a': 100}, dumps=simplejson.dumps)])
|
||||||
|
@ -423,8 +437,16 @@ user-defined |range| types can be adapted using `register_range()`.
|
||||||
|
|
||||||
`!Range` objects are immutable, hashable, and support the ``in`` operator
|
`!Range` objects are immutable, hashable, and support the ``in`` operator
|
||||||
(checking if an element is within the range). They can be tested for
|
(checking if an element is within the range). They can be tested for
|
||||||
equivalence but not for ordering. Empty ranges evaluate to `!False` in
|
equivalence. Empty ranges evaluate to `!False` in boolean context,
|
||||||
boolean context, nonempty evaluate to `!True`.
|
nonempty evaluate to `!True`.
|
||||||
|
|
||||||
|
.. versionchanged:: 2.5.3
|
||||||
|
|
||||||
|
`!Range` objects can be sorted although, as on the server-side, this
|
||||||
|
ordering is not particularly meangingful. It is only meant to be used
|
||||||
|
by programs assuming objects using `!Range` as primary key can be
|
||||||
|
sorted on them. In previous versions comparing `!Range`\s raises
|
||||||
|
`!TypeError`.
|
||||||
|
|
||||||
Although it is possible to instantiate `!Range` objects, the class doesn't
|
Although it is possible to instantiate `!Range` objects, the class doesn't
|
||||||
have an adapter registered, so you cannot normally pass these instances as
|
have an adapter registered, so you cannot normally pass these instances as
|
||||||
|
@ -453,6 +475,17 @@ automatically casted into instances of these classes.
|
||||||
.. autoclass:: DateTimeRange
|
.. autoclass:: DateTimeRange
|
||||||
.. autoclass:: DateTimeTZRange
|
.. autoclass:: DateTimeTZRange
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
Python lacks a representation for :sql:`infinity` date so Psycopg converts
|
||||||
|
the value to `date.max` and such. When written into the database these
|
||||||
|
dates will assume their literal value (e.g. :sql:`9999-12-31` instead of
|
||||||
|
:sql:`infinity`). Check :ref:`infinite-dates-handling` for an example of
|
||||||
|
an alternative adapter to map `date.max` to :sql:`infinity`. An
|
||||||
|
alternative dates adapter will be used automatically by the `DateRange`
|
||||||
|
adapter and so on.
|
||||||
|
|
||||||
|
|
||||||
Custom |range| types (created with |CREATE TYPE|_ :sql:`... AS RANGE`) can be
|
Custom |range| types (created with |CREATE TYPE|_ :sql:`... AS RANGE`) can be
|
||||||
adapted to a custom `Range` subclass:
|
adapted to a custom `Range` subclass:
|
||||||
|
|
||||||
|
|
|
@ -121,10 +121,22 @@ Psycopg converts :sql:`decimal`\/\ :sql:`numeric` database types into Python `!D
|
||||||
psycopg2.extensions.register_type(DEC2FLOAT)
|
psycopg2.extensions.register_type(DEC2FLOAT)
|
||||||
|
|
||||||
See :ref:`type-casting-from-sql-to-python` to read the relevant
|
See :ref:`type-casting-from-sql-to-python` to read the relevant
|
||||||
documentation. If you find `!psycopg2.extensions.DECIMAL` not avalable, use
|
documentation. If you find `!psycopg2.extensions.DECIMAL` not available, use
|
||||||
`!psycopg2._psycopg.DECIMAL` instead.
|
`!psycopg2._psycopg.DECIMAL` instead.
|
||||||
|
|
||||||
|
|
||||||
|
.. _faq-json-adapt:
|
||||||
|
.. cssclass:: faq
|
||||||
|
|
||||||
|
Psycopg automatically converts PostgreSQL :sql:`json` data into Python objects. How can I receive strings instead?
|
||||||
|
The easiest way to avoid JSON parsing is to register a no-op function with
|
||||||
|
`~psycopg2.extras.register_default_json()`::
|
||||||
|
|
||||||
|
psycopg2.extras.register_default_json(loads=lambda x: x)
|
||||||
|
|
||||||
|
See :ref:`adapt-json` for further details.
|
||||||
|
|
||||||
|
|
||||||
.. _faq-bytea-9.0:
|
.. _faq-bytea-9.0:
|
||||||
.. cssclass:: faq
|
.. cssclass:: faq
|
||||||
|
|
||||||
|
|
|
@ -15,16 +15,10 @@ Psycopg 2 is mostly implemented in C as a libpq_ wrapper, resulting in being
|
||||||
both efficient and secure. It features client-side and :ref:`server-side
|
both efficient and secure. It features client-side and :ref:`server-side
|
||||||
<server-side-cursors>` cursors, :ref:`asynchronous communication
|
<server-side-cursors>` cursors, :ref:`asynchronous communication
|
||||||
<async-support>` and :ref:`notifications <async-notify>`, |COPY-TO-FROM|__
|
<async-support>` and :ref:`notifications <async-notify>`, |COPY-TO-FROM|__
|
||||||
support, and a flexible :ref:`objects adaptation system
|
support. Many Python types are supported out-of-the-box and :ref:`adapted to
|
||||||
<python-types-adaptation>`. Many basic Python types are supported
|
matching PostgreSQL data types <python-types-adaptation>`; adaptation can be
|
||||||
out-of-the-box and mapped to matching PostgreSQL data types, such as strings
|
extended and customized thanks to a flexible :ref:`objects adaptation system
|
||||||
(both byte strings and Unicode), numbers (ints, longs, floats, decimals),
|
<adapting-new-types>`.
|
||||||
booleans and date/time objects (both built-in and `mx.DateTime`_), several
|
|
||||||
types of :ref:`binary objects <adapt-binary>`. Also available are mappings
|
|
||||||
between lists and PostgreSQL arrays of any supported type, between
|
|
||||||
:ref:`dictionaries and PostgreSQL hstore <adapt-hstore>`, between
|
|
||||||
:ref:`tuples/namedtuples and PostgreSQL composite types <adapt-composite>`,
|
|
||||||
and between Python objects and :ref:`JSON <adapt-json>`.
|
|
||||||
|
|
||||||
Psycopg 2 is both Unicode and Python 3 friendly.
|
Psycopg 2 is both Unicode and Python 3 friendly.
|
||||||
|
|
||||||
|
|
|
@ -100,7 +100,8 @@ many placeholders can use the same values::
|
||||||
... {'int': 10, 'str': "O'Reilly", 'date': datetime.date(2005, 11, 18)})
|
... {'int': 10, 'str': "O'Reilly", 'date': datetime.date(2005, 11, 18)})
|
||||||
|
|
||||||
When parameters are used, in order to include a literal ``%`` in the query you
|
When parameters are used, in order to include a literal ``%`` in the query you
|
||||||
can use the ``%%`` string.
|
can use the ``%%`` string. Using characters ``%``, ``(``, ``)`` in the
|
||||||
|
argument names is not supported.
|
||||||
|
|
||||||
While the mechanism resembles regular Python strings manipulation, there are a
|
While the mechanism resembles regular Python strings manipulation, there are a
|
||||||
few subtle differences you should care about when passing parameters to a
|
few subtle differences you should care about when passing parameters to a
|
||||||
|
@ -298,8 +299,8 @@ proper SQL literals::
|
||||||
Numbers adaptation
|
Numbers adaptation
|
||||||
^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
Numeric objects: `int`, `long`, `float`, `~decimal.Decimal` are converted in
|
Python numeric objects `int`, `long`, `float`, `~decimal.Decimal` are
|
||||||
the PostgreSQL numerical representation::
|
converted into a PostgreSQL numerical representation::
|
||||||
|
|
||||||
>>> cur.mogrify("SELECT %s, %s, %s, %s;", (10, 10L, 10.0, Decimal("10.00")))
|
>>> cur.mogrify("SELECT %s, %s, %s, %s;", (10, 10L, 10.0, Decimal("10.00")))
|
||||||
'SELECT 10, 10, 10.0, 10.00;'
|
'SELECT 10, 10, 10.0, 10.00;'
|
||||||
|
@ -311,7 +312,7 @@ converted into `!Decimal`.
|
||||||
.. note::
|
.. note::
|
||||||
|
|
||||||
Sometimes you may prefer to receive :sql:`numeric` data as `!float`
|
Sometimes you may prefer to receive :sql:`numeric` data as `!float`
|
||||||
insted, for performance reason or ease of manipulation: you can configure
|
instead, for performance reason or ease of manipulation: you can configure
|
||||||
an adapter to :ref:`cast PostgreSQL numeric to Python float <faq-float>`.
|
an adapter to :ref:`cast PostgreSQL numeric to Python float <faq-float>`.
|
||||||
This of course may imply a loss of precision.
|
This of course may imply a loss of precision.
|
||||||
|
|
||||||
|
@ -422,7 +423,7 @@ the connection or globally: see the function
|
||||||
Binary adaptation
|
Binary adaptation
|
||||||
^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
Binary types: Python types representing binary objects are converted into
|
Python types representing binary objects are converted into
|
||||||
PostgreSQL binary string syntax, suitable for :sql:`bytea` fields. Such
|
PostgreSQL binary string syntax, suitable for :sql:`bytea` fields. Such
|
||||||
types are `buffer` (only available in Python 2), `memoryview` (available
|
types are `buffer` (only available in Python 2), `memoryview` (available
|
||||||
from Python 2.7), `bytearray` (available from Python 2.6) and `bytes`
|
from Python 2.7), `bytearray` (available from Python 2.6) and `bytes`
|
||||||
|
@ -477,7 +478,7 @@ or `!memoryview` (in Python 3).
|
||||||
Date/Time objects adaptation
|
Date/Time objects adaptation
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
Date and time objects: builtin `~datetime.datetime`, `~datetime.date`,
|
Python builtin `~datetime.datetime`, `~datetime.date`,
|
||||||
`~datetime.time`, `~datetime.timedelta` are converted into PostgreSQL's
|
`~datetime.time`, `~datetime.timedelta` are converted into PostgreSQL's
|
||||||
:sql:`timestamp[tz]`, :sql:`date`, :sql:`time`, :sql:`interval` data types.
|
:sql:`timestamp[tz]`, :sql:`date`, :sql:`time`, :sql:`interval` data types.
|
||||||
Time zones are supported too. The Egenix `mx.DateTime`_ objects are adapted
|
Time zones are supported too. The Egenix `mx.DateTime`_ objects are adapted
|
||||||
|
@ -496,6 +497,7 @@ the same way::
|
||||||
.. seealso:: `PostgreSQL date/time types
|
.. seealso:: `PostgreSQL date/time types
|
||||||
<http://www.postgresql.org/docs/current/static/datatype-datetime.html>`__
|
<http://www.postgresql.org/docs/current/static/datatype-datetime.html>`__
|
||||||
|
|
||||||
|
|
||||||
.. index::
|
.. index::
|
||||||
single: Time Zones
|
single: Time Zones
|
||||||
|
|
||||||
|
@ -530,6 +532,40 @@ rounded to the nearest minute, with an error of up to 30 seconds.
|
||||||
versions use `psycopg2.extras.register_tstz_w_secs()`.
|
versions use `psycopg2.extras.register_tstz_w_secs()`.
|
||||||
|
|
||||||
|
|
||||||
|
.. index::
|
||||||
|
double: Date objects; Infinite
|
||||||
|
|
||||||
|
.. _infinite-dates-handling:
|
||||||
|
|
||||||
|
Infinite dates handling
|
||||||
|
'''''''''''''''''''''''
|
||||||
|
|
||||||
|
PostgreSQL can store the representation of an "infinite" date, timestamp, or
|
||||||
|
interval. Infinite dates are not available to Python, so these objects are
|
||||||
|
mapped to `!date.max`, `!datetime.max`, `!interval.max`. Unfortunately the
|
||||||
|
mapping cannot be bidirectional so these dates will be stored back into the
|
||||||
|
database with their values, such as :sql:`9999-12-31`.
|
||||||
|
|
||||||
|
It is possible to create an alternative adapter for dates and other objects
|
||||||
|
to map `date.max` to :sql:`infinity`, for instance::
|
||||||
|
|
||||||
|
class InfDateAdapter:
|
||||||
|
def __init__(self, wrapped):
|
||||||
|
self.wrapped = wrapped
|
||||||
|
def getquoted(self):
|
||||||
|
if self.wrapped == datetime.date.max:
|
||||||
|
return b"'infinity'::date"
|
||||||
|
elif self.wrapped == datetime.date.min:
|
||||||
|
return b"'-infinity'::date"
|
||||||
|
else:
|
||||||
|
return psycopg2.extensions.DateFromPy(self.wrapped).getquoted()
|
||||||
|
|
||||||
|
psycopg2.extensions.register_adapter(datetime.date, InfDateAdapter)
|
||||||
|
|
||||||
|
Of course it will not be possible to write the value of `date.max` in the
|
||||||
|
database anymore: :sql:`infinity` will be stored instead.
|
||||||
|
|
||||||
|
|
||||||
.. _adapt-list:
|
.. _adapt-list:
|
||||||
|
|
||||||
Lists adaptation
|
Lists adaptation
|
||||||
|
@ -560,7 +596,7 @@ Python lists are converted into PostgreSQL :sql:`ARRAY`\ s::
|
||||||
.. note::
|
.. note::
|
||||||
|
|
||||||
Reading back from PostgreSQL, arrays are converted to lists of Python
|
Reading back from PostgreSQL, arrays are converted to lists of Python
|
||||||
objects as expected, but only if the items are of a known known type.
|
objects as expected, but only if the items are of a known type.
|
||||||
Arrays of unknown types are returned as represented by the database (e.g.
|
Arrays of unknown types are returned as represented by the database (e.g.
|
||||||
``{a,b,c}``). If you want to convert the items into Python objects you can
|
``{a,b,c}``). If you want to convert the items into Python objects you can
|
||||||
easily create a typecaster for :ref:`array of unknown types
|
easily create a typecaster for :ref:`array of unknown types
|
||||||
|
@ -576,7 +612,7 @@ Tuples adaptation
|
||||||
double: Tuple; Adaptation
|
double: Tuple; Adaptation
|
||||||
single: IN operator
|
single: IN operator
|
||||||
|
|
||||||
Python tuples are converted in a syntax suitable for the SQL :sql:`IN`
|
Python tuples are converted into a syntax suitable for the SQL :sql:`IN`
|
||||||
operator and to represent a composite type::
|
operator and to represent a composite type::
|
||||||
|
|
||||||
>>> cur.mogrify("SELECT %s IN %s;", (10, (10, 20, 30)))
|
>>> cur.mogrify("SELECT %s IN %s;", (10, (10, 20, 30)))
|
||||||
|
|
|
@ -24,7 +24,7 @@ import psycopg2
|
||||||
if len(sys.argv) > 1:
|
if len(sys.argv) > 1:
|
||||||
DSN = sys.argv[1]
|
DSN = sys.argv[1]
|
||||||
|
|
||||||
print "Opening connection using dns:", DSN
|
print "Opening connection using dsn:", DSN
|
||||||
conn = psycopg2.connect(DSN)
|
conn = psycopg2.connect(DSN)
|
||||||
print "Encoding for this connection is", conn.encoding
|
print "Encoding for this connection is", conn.encoding
|
||||||
|
|
||||||
|
@ -79,7 +79,7 @@ for row in curs.fetchall():
|
||||||
print "done"
|
print "done"
|
||||||
print " python type of image data is", type(row[0])
|
print " python type of image data is", type(row[0])
|
||||||
|
|
||||||
# this rollback is required because we can't drop a table with a binary cusor
|
# this rollback is required because we can't drop a table with a binary cursor
|
||||||
# declared and still open
|
# declared and still open
|
||||||
conn.rollback()
|
conn.rollback()
|
||||||
|
|
||||||
|
|
|
@ -27,7 +27,7 @@ import psycopg2
|
||||||
if len(sys.argv) > 1:
|
if len(sys.argv) > 1:
|
||||||
DSN = sys.argv[1]
|
DSN = sys.argv[1]
|
||||||
|
|
||||||
print "Opening connection using dns:", DSN
|
print "Opening connection using dsn:", DSN
|
||||||
conn = psycopg2.connect(DSN)
|
conn = psycopg2.connect(DSN)
|
||||||
print "Encoding for this connection is", conn.encoding
|
print "Encoding for this connection is", conn.encoding
|
||||||
|
|
||||||
|
|
|
@ -27,7 +27,7 @@ import psycopg2
|
||||||
if len(sys.argv) > 1:
|
if len(sys.argv) > 1:
|
||||||
DSN = sys.argv[1]
|
DSN = sys.argv[1]
|
||||||
|
|
||||||
print "Opening connection using dns:", DSN
|
print "Opening connection using dsn:", DSN
|
||||||
conn = psycopg2.connect(DSN)
|
conn = psycopg2.connect(DSN)
|
||||||
print "Encoding for this connection is", conn.encoding
|
print "Encoding for this connection is", conn.encoding
|
||||||
|
|
||||||
|
|
|
@ -28,7 +28,7 @@ from psycopg2.extensions import adapt
|
||||||
if len(sys.argv) > 1:
|
if len(sys.argv) > 1:
|
||||||
DSN = sys.argv[1]
|
DSN = sys.argv[1]
|
||||||
|
|
||||||
print "Opening connection using dns:", DSN
|
print "Opening connection using dsn:", DSN
|
||||||
conn = psycopg2.connect(DSN)
|
conn = psycopg2.connect(DSN)
|
||||||
curs = conn.cursor()
|
curs = conn.cursor()
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
# encoding.py - show to change client enkoding (and test it works)
|
# encoding.py - show to change client encoding (and test it works)
|
||||||
# -*- encoding: utf8 -*-
|
# -*- encoding: utf8 -*-
|
||||||
#
|
#
|
||||||
# Copyright (C) 2004-2010 Federico Di Gregorio <fog@debian.org>
|
# Copyright (C) 2004-2010 Federico Di Gregorio <fog@debian.org>
|
||||||
|
@ -26,7 +26,7 @@ import psycopg2.extensions
|
||||||
if len(sys.argv) > 1:
|
if len(sys.argv) > 1:
|
||||||
DSN = sys.argv[1]
|
DSN = sys.argv[1]
|
||||||
|
|
||||||
print "Opening connection using dns:", DSN
|
print "Opening connection using dsn:", DSN
|
||||||
conn = psycopg2.connect(DSN)
|
conn = psycopg2.connect(DSN)
|
||||||
print "Initial encoding for this connection is", conn.encoding
|
print "Initial encoding for this connection is", conn.encoding
|
||||||
|
|
||||||
|
|
|
@ -24,7 +24,7 @@ import psycopg2
|
||||||
if len(sys.argv) > 1:
|
if len(sys.argv) > 1:
|
||||||
DSN = sys.argv[1]
|
DSN = sys.argv[1]
|
||||||
|
|
||||||
print "Opening connection using dns:", DSN
|
print "Opening connection using dsn:", DSN
|
||||||
conn = psycopg2.connect(DSN)
|
conn = psycopg2.connect(DSN)
|
||||||
print "Encoding for this connection is", conn.encoding
|
print "Encoding for this connection is", conn.encoding
|
||||||
|
|
||||||
|
|
|
@ -23,7 +23,7 @@ import sys, psycopg2
|
||||||
if len(sys.argv) > 1:
|
if len(sys.argv) > 1:
|
||||||
DSN = sys.argv[1]
|
DSN = sys.argv[1]
|
||||||
|
|
||||||
print "Opening connection using dns:", DSN
|
print "Opening connection using dsn:", DSN
|
||||||
conn = psycopg2.connect(DSN)
|
conn = psycopg2.connect(DSN)
|
||||||
curs = conn.cursor()
|
curs = conn.cursor()
|
||||||
|
|
||||||
|
|
|
@ -24,7 +24,7 @@ import psycopg2
|
||||||
if len(sys.argv) > 1:
|
if len(sys.argv) > 1:
|
||||||
DSN = sys.argv[1]
|
DSN = sys.argv[1]
|
||||||
|
|
||||||
print "Opening connection using dns:", DSN
|
print "Opening connection using dsn:", DSN
|
||||||
conn = psycopg2.connect(DSN)
|
conn = psycopg2.connect(DSN)
|
||||||
print "Encoding for this connection is", conn.encoding
|
print "Encoding for this connection is", conn.encoding
|
||||||
|
|
||||||
|
|
|
@ -24,7 +24,7 @@ import sys, psycopg2
|
||||||
if len(sys.argv) > 1:
|
if len(sys.argv) > 1:
|
||||||
DSN = sys.argv[1]
|
DSN = sys.argv[1]
|
||||||
|
|
||||||
print "Opening connection using dns:", DSN
|
print "Opening connection using dsn:", DSN
|
||||||
|
|
||||||
conn = psycopg2.connect(DSN)
|
conn = psycopg2.connect(DSN)
|
||||||
print "Encoding for this connection is", conn.encoding
|
print "Encoding for this connection is", conn.encoding
|
||||||
|
|
|
@ -26,7 +26,7 @@ from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
|
||||||
if len(sys.argv) > 1:
|
if len(sys.argv) > 1:
|
||||||
DSN = sys.argv[1]
|
DSN = sys.argv[1]
|
||||||
|
|
||||||
print "Opening connection using dns:", DSN
|
print "Opening connection using dsn:", DSN
|
||||||
conn = psycopg2.connect(DSN)
|
conn = psycopg2.connect(DSN)
|
||||||
print "Encoding for this connection is", conn.encoding
|
print "Encoding for this connection is", conn.encoding
|
||||||
|
|
||||||
|
|
|
@ -30,7 +30,7 @@ import psycopg2
|
||||||
if len(sys.argv) > 1:
|
if len(sys.argv) > 1:
|
||||||
DSN = sys.argv[1]
|
DSN = sys.argv[1]
|
||||||
|
|
||||||
print "Opening connection using dns:", DSN
|
print "Opening connection using dsn:", DSN
|
||||||
conn = psycopg2.connect(DSN)
|
conn = psycopg2.connect(DSN)
|
||||||
print "Encoding for this connection is", conn.encoding
|
print "Encoding for this connection is", conn.encoding
|
||||||
|
|
||||||
|
|
|
@ -45,7 +45,7 @@ if len(sys.argv) > 1:
|
||||||
if len(sys.argv) > 2:
|
if len(sys.argv) > 2:
|
||||||
MODE = int(sys.argv[2])
|
MODE = int(sys.argv[2])
|
||||||
|
|
||||||
print "Opening connection using dns:", DSN
|
print "Opening connection using dsn:", DSN
|
||||||
conn = psycopg2.connect(DSN)
|
conn = psycopg2.connect(DSN)
|
||||||
curs = conn.cursor()
|
curs = conn.cursor()
|
||||||
|
|
||||||
|
|
|
@ -29,7 +29,7 @@ import psycopg2.extensions
|
||||||
if len(sys.argv) > 1:
|
if len(sys.argv) > 1:
|
||||||
DSN = sys.argv[1]
|
DSN = sys.argv[1]
|
||||||
|
|
||||||
print "Opening connection using dns:", DSN
|
print "Opening connection using dsn:", DSN
|
||||||
conn = psycopg2.connect(DSN)
|
conn = psycopg2.connect(DSN)
|
||||||
print "Encoding for this connection is", conn.encoding
|
print "Encoding for this connection is", conn.encoding
|
||||||
|
|
||||||
|
|
|
@ -28,7 +28,7 @@ from psycopg2.tz import ZERO, LOCAL, FixedOffsetTimezone
|
||||||
if len(sys.argv) > 1:
|
if len(sys.argv) > 1:
|
||||||
DSN = sys.argv[1]
|
DSN = sys.argv[1]
|
||||||
|
|
||||||
print "Opening connection using dns:", DSN
|
print "Opening connection using dsn:", DSN
|
||||||
conn = psycopg2.connect(DSN)
|
conn = psycopg2.connect(DSN)
|
||||||
curs = conn.cursor()
|
curs = conn.cursor()
|
||||||
|
|
||||||
|
|
|
@ -33,7 +33,7 @@ import psycopg2.extras
|
||||||
if len(sys.argv) > 1:
|
if len(sys.argv) > 1:
|
||||||
DSN = sys.argv[1]
|
DSN = sys.argv[1]
|
||||||
|
|
||||||
print "Opening connection using dns:", DSN
|
print "Opening connection using dsn:", DSN
|
||||||
conn = psycopg2.connect(DSN)
|
conn = psycopg2.connect(DSN)
|
||||||
print "Initial encoding for this connection is", conn.encoding
|
print "Initial encoding for this connection is", conn.encoding
|
||||||
|
|
||||||
|
|
|
@ -121,6 +121,8 @@ class Range(object):
|
||||||
return self._bounds is not None
|
return self._bounds is not None
|
||||||
|
|
||||||
def __eq__(self, other):
|
def __eq__(self, other):
|
||||||
|
if not isinstance(other, Range):
|
||||||
|
return False
|
||||||
return (self._lower == other._lower
|
return (self._lower == other._lower
|
||||||
and self._upper == other._upper
|
and self._upper == other._upper
|
||||||
and self._bounds == other._bounds)
|
and self._bounds == other._bounds)
|
||||||
|
@ -131,12 +133,43 @@ class Range(object):
|
||||||
def __hash__(self):
|
def __hash__(self):
|
||||||
return hash((self._lower, self._upper, self._bounds))
|
return hash((self._lower, self._upper, self._bounds))
|
||||||
|
|
||||||
def __lt__(self, other):
|
# as the postgres docs describe for the server-side stuff,
|
||||||
raise TypeError(
|
# ordering is rather arbitrary, but will remain stable
|
||||||
'Range objects cannot be ordered; please refer to the PostgreSQL'
|
# and consistent.
|
||||||
' documentation to perform this operation in the database')
|
|
||||||
|
|
||||||
__le__ = __gt__ = __ge__ = __lt__
|
def __lt__(self, other):
|
||||||
|
if not isinstance(other, Range):
|
||||||
|
return NotImplemented
|
||||||
|
for attr in ('_lower', '_upper', '_bounds'):
|
||||||
|
self_value = getattr(self, attr)
|
||||||
|
other_value = getattr(other, attr)
|
||||||
|
if self_value == other_value:
|
||||||
|
pass
|
||||||
|
elif self_value is None:
|
||||||
|
return True
|
||||||
|
elif other_value is None:
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
return self_value < other_value
|
||||||
|
return False
|
||||||
|
|
||||||
|
def __le__(self, other):
|
||||||
|
if self == other:
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return self.__lt__(other)
|
||||||
|
|
||||||
|
def __gt__(self, other):
|
||||||
|
if isinstance(other, Range):
|
||||||
|
return other.__lt__(self)
|
||||||
|
else:
|
||||||
|
return NotImplemented
|
||||||
|
|
||||||
|
def __ge__(self, other):
|
||||||
|
if self == other:
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return self.__gt__(other)
|
||||||
|
|
||||||
|
|
||||||
def register_range(pgrange, pyrange, conn_or_curs, globally=False):
|
def register_range(pgrange, pyrange, conn_or_curs, globally=False):
|
||||||
|
@ -354,7 +387,7 @@ where typname = %s and ns.nspname = %s;
|
||||||
|
|
||||||
m = self._re_range.match(s)
|
m = self._re_range.match(s)
|
||||||
if m is None:
|
if m is None:
|
||||||
raise InterfaceError("failed to parse range: %s")
|
raise InterfaceError("failed to parse range: '%s'" % s)
|
||||||
|
|
||||||
lower = m.group(3)
|
lower = m.group(3)
|
||||||
if lower is None:
|
if lower is None:
|
||||||
|
@ -415,7 +448,7 @@ class NumberRangeAdapter(RangeAdapter):
|
||||||
def getquoted(self):
|
def getquoted(self):
|
||||||
r = self.adapted
|
r = self.adapted
|
||||||
if r.isempty:
|
if r.isempty:
|
||||||
return "'empty'"
|
return b("'empty'")
|
||||||
|
|
||||||
if not r.lower_inf:
|
if not r.lower_inf:
|
||||||
# not exactly: we are relying that none of these object is really
|
# not exactly: we are relying that none of these object is really
|
||||||
|
@ -431,8 +464,8 @@ class NumberRangeAdapter(RangeAdapter):
|
||||||
else:
|
else:
|
||||||
upper = ''
|
upper = ''
|
||||||
|
|
||||||
return b("'%s%s,%s%s'" % (
|
return ("'%s%s,%s%s'" % (
|
||||||
r._bounds[0], lower, upper, r._bounds[1]))
|
r._bounds[0], lower, upper, r._bounds[1])).encode('ascii')
|
||||||
|
|
||||||
# TODO: probably won't work with infs, nans and other tricky cases.
|
# TODO: probably won't work with infs, nans and other tricky cases.
|
||||||
register_adapter(NumericRange, NumberRangeAdapter)
|
register_adapter(NumericRange, NumberRangeAdapter)
|
||||||
|
|
|
@ -82,7 +82,7 @@ STATUS_SYNC = 3 # currently unused
|
||||||
STATUS_ASYNC = 4 # currently unused
|
STATUS_ASYNC = 4 # currently unused
|
||||||
STATUS_PREPARED = 5
|
STATUS_PREPARED = 5
|
||||||
|
|
||||||
# This is a usefull mnemonic to check if the connection is in a transaction
|
# This is a useful mnemonic to check if the connection is in a transaction
|
||||||
STATUS_IN_TRANSACTION = STATUS_BEGIN
|
STATUS_IN_TRANSACTION = STATUS_BEGIN
|
||||||
|
|
||||||
"""psycopg asynchronous connection polling values"""
|
"""psycopg asynchronous connection polling values"""
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
"""Miscellaneous goodies for psycopg2
|
"""Miscellaneous goodies for psycopg2
|
||||||
|
|
||||||
This module is a generic place used to hold little helper functions
|
This module is a generic place used to hold little helper functions
|
||||||
and classes untill a better place in the distribution is found.
|
and classes until a better place in the distribution is found.
|
||||||
"""
|
"""
|
||||||
# psycopg/extras.py - miscellaneous extra goodies for psycopg
|
# psycopg/extras.py - miscellaneous extra goodies for psycopg
|
||||||
#
|
#
|
||||||
|
@ -131,7 +131,7 @@ class DictCursor(DictCursorBase):
|
||||||
self._query_executed = 0
|
self._query_executed = 0
|
||||||
|
|
||||||
class DictRow(list):
|
class DictRow(list):
|
||||||
"""A row object that allow by-colmun-name access to data."""
|
"""A row object that allow by-column-name access to data."""
|
||||||
|
|
||||||
__slots__ = ('_index',)
|
__slots__ = ('_index',)
|
||||||
|
|
||||||
|
@ -406,7 +406,7 @@ class MinTimeLoggingConnection(LoggingConnection):
|
||||||
|
|
||||||
This is just an example of how to sub-class `LoggingConnection` to
|
This is just an example of how to sub-class `LoggingConnection` to
|
||||||
provide some extra filtering for the logged queries. Both the
|
provide some extra filtering for the logged queries. Both the
|
||||||
`inizialize()` and `filter()` methods are overwritten to make sure
|
`initialize()` and `filter()` methods are overwritten to make sure
|
||||||
that only queries executing for more than ``mintime`` ms are logged.
|
that only queries executing for more than ``mintime`` ms are logged.
|
||||||
|
|
||||||
Note that this connection uses the specialized cursor
|
Note that this connection uses the specialized cursor
|
||||||
|
@ -449,13 +449,15 @@ class UUID_adapter(object):
|
||||||
def __init__(self, uuid):
|
def __init__(self, uuid):
|
||||||
self._uuid = uuid
|
self._uuid = uuid
|
||||||
|
|
||||||
def prepare(self, conn):
|
def __conform__(self, proto):
|
||||||
pass
|
if proto is _ext.ISQLQuote:
|
||||||
|
return self
|
||||||
|
|
||||||
def getquoted(self):
|
def getquoted(self):
|
||||||
return "'"+str(self._uuid)+"'::uuid"
|
return b("'%s'::uuid" % self._uuid)
|
||||||
|
|
||||||
__str__ = getquoted
|
def __str__(self):
|
||||||
|
return "'%s'::uuid" % self._uuid
|
||||||
|
|
||||||
def register_uuid(oids=None, conn_or_curs=None):
|
def register_uuid(oids=None, conn_or_curs=None):
|
||||||
"""Create the UUID type and an uuid.UUID adapter.
|
"""Create the UUID type and an uuid.UUID adapter.
|
||||||
|
@ -514,8 +516,8 @@ class Inet(object):
|
||||||
obj.prepare(self._conn)
|
obj.prepare(self._conn)
|
||||||
return obj.getquoted() + b("::inet")
|
return obj.getquoted() + b("::inet")
|
||||||
|
|
||||||
def __conform__(self, foo):
|
def __conform__(self, proto):
|
||||||
if foo is _ext.ISQLQuote:
|
if proto is _ext.ISQLQuote:
|
||||||
return self
|
return self
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
|
|
|
@ -45,7 +45,7 @@ typedef struct {
|
||||||
HIDDEN PyObject *psyco_Binary(PyObject *module, PyObject *args);
|
HIDDEN PyObject *psyco_Binary(PyObject *module, PyObject *args);
|
||||||
#define psyco_Binary_doc \
|
#define psyco_Binary_doc \
|
||||||
"Binary(buffer) -> new binary object\n\n" \
|
"Binary(buffer) -> new binary object\n\n" \
|
||||||
"Build an object capable to hold a bynary string value."
|
"Build an object capable to hold a binary string value."
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
|
|
@ -141,7 +141,10 @@ static int pthread_mutex_init(pthread_mutex_t *mutex, void* fake)
|
||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if (defined(__FreeBSD__) && __FreeBSD_version < 503000) || (defined(_WIN32) && !defined(__GNUC__)) || defined(__sun__) || defined(sun)
|
#if (defined(__FreeBSD__) && __FreeBSD_version < 503000) \
|
||||||
|
|| (defined(_WIN32) && !defined(__GNUC__)) \
|
||||||
|
|| (defined(sun) || defined(__sun__)) \
|
||||||
|
&& (defined(__SunOS_5_8) || defined(__SunOS_5_9))
|
||||||
/* what's this, we have no round function either? */
|
/* what's this, we have no round function either? */
|
||||||
static double round(double num)
|
static double round(double num)
|
||||||
{
|
{
|
||||||
|
|
|
@ -226,7 +226,7 @@ conn_get_standard_conforming_strings(PGconn *pgconn)
|
||||||
* The presence of the 'standard_conforming_strings' parameter
|
* The presence of the 'standard_conforming_strings' parameter
|
||||||
* means that the server _accepts_ the E'' quote.
|
* means that the server _accepts_ the E'' quote.
|
||||||
*
|
*
|
||||||
* If the paramer is off, the PQescapeByteaConn returns
|
* If the parameter is off, the PQescapeByteaConn returns
|
||||||
* backslash escaped strings (e.g. '\001' -> "\\001"),
|
* backslash escaped strings (e.g. '\001' -> "\\001"),
|
||||||
* so the E'' quotes are required to avoid warnings
|
* so the E'' quotes are required to avoid warnings
|
||||||
* if 'escape_string_warning' is set.
|
* if 'escape_string_warning' is set.
|
||||||
|
@ -506,10 +506,6 @@ conn_setup(connectionObject *self, PGconn *pgconn)
|
||||||
pthread_mutex_lock(&self->lock);
|
pthread_mutex_lock(&self->lock);
|
||||||
Py_BLOCK_THREADS;
|
Py_BLOCK_THREADS;
|
||||||
|
|
||||||
if (psyco_green() && (0 > pq_set_non_blocking(self, 1))) {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!conn_is_datestyle_ok(self->pgconn)) {
|
if (!conn_is_datestyle_ok(self->pgconn)) {
|
||||||
int res;
|
int res;
|
||||||
Py_UNBLOCK_THREADS;
|
Py_UNBLOCK_THREADS;
|
||||||
|
@ -573,6 +569,9 @@ _conn_sync_connect(connectionObject *self)
|
||||||
|
|
||||||
/* if the connection is green, wait to finish connection */
|
/* if the connection is green, wait to finish connection */
|
||||||
if (green) {
|
if (green) {
|
||||||
|
if (0 > pq_set_non_blocking(self, 1)) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
if (0 != psyco_wait(self)) {
|
if (0 != psyco_wait(self)) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
@ -614,6 +613,11 @@ _conn_async_connect(connectionObject *self)
|
||||||
|
|
||||||
PQsetNoticeProcessor(pgconn, conn_notice_callback, (void*)self);
|
PQsetNoticeProcessor(pgconn, conn_notice_callback, (void*)self);
|
||||||
|
|
||||||
|
/* Set the connection to nonblocking now. */
|
||||||
|
if (pq_set_non_blocking(self, 1) != 0) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
/* The connection will be completed banging on poll():
|
/* The connection will be completed banging on poll():
|
||||||
* First with _conn_poll_connecting() that will finish connection,
|
* First with _conn_poll_connecting() that will finish connection,
|
||||||
* then with _conn_poll_setup_async() that will do the same job
|
* then with _conn_poll_setup_async() that will do the same job
|
||||||
|
@ -625,14 +629,23 @@ _conn_async_connect(connectionObject *self)
|
||||||
int
|
int
|
||||||
conn_connect(connectionObject *self, long int async)
|
conn_connect(connectionObject *self, long int async)
|
||||||
{
|
{
|
||||||
|
int rv;
|
||||||
|
|
||||||
if (async == 1) {
|
if (async == 1) {
|
||||||
Dprintf("con_connect: connecting in ASYNC mode");
|
Dprintf("con_connect: connecting in ASYNC mode");
|
||||||
return _conn_async_connect(self);
|
rv = _conn_async_connect(self);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
Dprintf("con_connect: connecting in SYNC mode");
|
Dprintf("con_connect: connecting in SYNC mode");
|
||||||
return _conn_sync_connect(self);
|
rv = _conn_sync_connect(self);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (rv != 0) {
|
||||||
|
/* connection failed, so let's close ourselves */
|
||||||
|
self->closed = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
return rv;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -642,6 +655,7 @@ static int
|
||||||
_conn_poll_connecting(connectionObject *self)
|
_conn_poll_connecting(connectionObject *self)
|
||||||
{
|
{
|
||||||
int res = PSYCO_POLL_ERROR;
|
int res = PSYCO_POLL_ERROR;
|
||||||
|
const char *msg;
|
||||||
|
|
||||||
Dprintf("conn_poll: poll connecting");
|
Dprintf("conn_poll: poll connecting");
|
||||||
switch (PQconnectPoll(self->pgconn)) {
|
switch (PQconnectPoll(self->pgconn)) {
|
||||||
|
@ -656,7 +670,11 @@ _conn_poll_connecting(connectionObject *self)
|
||||||
break;
|
break;
|
||||||
case PGRES_POLLING_FAILED:
|
case PGRES_POLLING_FAILED:
|
||||||
case PGRES_POLLING_ACTIVE:
|
case PGRES_POLLING_ACTIVE:
|
||||||
PyErr_SetString(OperationalError, "asynchronous connection failed");
|
msg = PQerrorMessage(self->pgconn);
|
||||||
|
if (!(msg && *msg)) {
|
||||||
|
msg = "asynchronous connection failed";
|
||||||
|
}
|
||||||
|
PyErr_SetString(OperationalError, msg);
|
||||||
res = PSYCO_POLL_ERROR;
|
res = PSYCO_POLL_ERROR;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -783,11 +801,6 @@ _conn_poll_setup_async(connectionObject *self)
|
||||||
|
|
||||||
switch (self->status) {
|
switch (self->status) {
|
||||||
case CONN_STATUS_CONNECTING:
|
case CONN_STATUS_CONNECTING:
|
||||||
/* Set the connection to nonblocking now. */
|
|
||||||
if (pq_set_non_blocking(self, 1) != 0) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
self->equote = conn_get_standard_conforming_strings(self->pgconn);
|
self->equote = conn_get_standard_conforming_strings(self->pgconn);
|
||||||
self->protocol = conn_get_protocol_version(self->pgconn);
|
self->protocol = conn_get_protocol_version(self->pgconn);
|
||||||
self->server_version = conn_get_server_version(self->pgconn);
|
self->server_version = conn_get_server_version(self->pgconn);
|
||||||
|
@ -1177,7 +1190,7 @@ conn_set_client_encoding(connectionObject *self, const char *enc)
|
||||||
goto endlock;
|
goto endlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* no error, we can proceeed and store the new encoding */
|
/* no error, we can proceed and store the new encoding */
|
||||||
{
|
{
|
||||||
char *tmp = self->encoding;
|
char *tmp = self->encoding;
|
||||||
self->encoding = clean_enc;
|
self->encoding = clean_enc;
|
||||||
|
|
|
@ -55,7 +55,7 @@ psyco_conn_cursor(connectionObject *self, PyObject *args, PyObject *kwargs)
|
||||||
PyObject *obj = NULL;
|
PyObject *obj = NULL;
|
||||||
PyObject *rv = NULL;
|
PyObject *rv = NULL;
|
||||||
PyObject *name = Py_None;
|
PyObject *name = Py_None;
|
||||||
PyObject *factory = (PyObject *)&cursorType;
|
PyObject *factory = Py_None;
|
||||||
PyObject *withhold = Py_False;
|
PyObject *withhold = Py_False;
|
||||||
PyObject *scrollable = Py_None;
|
PyObject *scrollable = Py_None;
|
||||||
|
|
||||||
|
@ -64,16 +64,21 @@ psyco_conn_cursor(connectionObject *self, PyObject *args, PyObject *kwargs)
|
||||||
|
|
||||||
EXC_IF_CONN_CLOSED(self);
|
EXC_IF_CONN_CLOSED(self);
|
||||||
|
|
||||||
if (self->cursor_factory && self->cursor_factory != Py_None) {
|
|
||||||
factory = self->cursor_factory;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!PyArg_ParseTupleAndKeywords(
|
if (!PyArg_ParseTupleAndKeywords(
|
||||||
args, kwargs, "|OOOO", kwlist,
|
args, kwargs, "|OOOO", kwlist,
|
||||||
&name, &factory, &withhold, &scrollable)) {
|
&name, &factory, &withhold, &scrollable)) {
|
||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (factory == Py_None) {
|
||||||
|
if (self->cursor_factory && self->cursor_factory != Py_None) {
|
||||||
|
factory = self->cursor_factory;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
factory = (PyObject *)&cursorType;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (self->status != CONN_STATUS_READY &&
|
if (self->status != CONN_STATUS_READY &&
|
||||||
self->status != CONN_STATUS_BEGIN &&
|
self->status != CONN_STATUS_BEGIN &&
|
||||||
self->status != CONN_STATUS_PREPARED) {
|
self->status != CONN_STATUS_PREPARED) {
|
||||||
|
@ -700,8 +705,6 @@ psyco_conn_set_client_encoding(connectionObject *self, PyObject *args)
|
||||||
static PyObject *
|
static PyObject *
|
||||||
psyco_conn_get_transaction_status(connectionObject *self)
|
psyco_conn_get_transaction_status(connectionObject *self)
|
||||||
{
|
{
|
||||||
EXC_IF_CONN_CLOSED(self);
|
|
||||||
|
|
||||||
return PyInt_FromLong((long)PQtransactionStatus(self->pgconn));
|
return PyInt_FromLong((long)PQtransactionStatus(self->pgconn));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -748,7 +751,7 @@ psyco_conn_get_parameter_status(connectionObject *self, PyObject *args)
|
||||||
static PyObject *
|
static PyObject *
|
||||||
psyco_conn_lobject(connectionObject *self, PyObject *args, PyObject *keywds)
|
psyco_conn_lobject(connectionObject *self, PyObject *args, PyObject *keywds)
|
||||||
{
|
{
|
||||||
int oid = (int)InvalidOid, new_oid = (int)InvalidOid;
|
Oid oid = InvalidOid, new_oid = InvalidOid;
|
||||||
const char *new_file = NULL;
|
const char *new_file = NULL;
|
||||||
const char *smode = "";
|
const char *smode = "";
|
||||||
PyObject *factory = (PyObject *)&lobjectType;
|
PyObject *factory = (PyObject *)&lobjectType;
|
||||||
|
@ -757,7 +760,7 @@ psyco_conn_lobject(connectionObject *self, PyObject *args, PyObject *keywds)
|
||||||
static char *kwlist[] = {"oid", "mode", "new_oid", "new_file",
|
static char *kwlist[] = {"oid", "mode", "new_oid", "new_file",
|
||||||
"cursor_factory", NULL};
|
"cursor_factory", NULL};
|
||||||
|
|
||||||
if (!PyArg_ParseTupleAndKeywords(args, keywds, "|izizO", kwlist,
|
if (!PyArg_ParseTupleAndKeywords(args, keywds, "|IzIzO", kwlist,
|
||||||
&oid, &smode, &new_oid, &new_file,
|
&oid, &smode, &new_oid, &new_file,
|
||||||
&factory)) {
|
&factory)) {
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -769,16 +772,16 @@ psyco_conn_lobject(connectionObject *self, PyObject *args, PyObject *keywds)
|
||||||
EXC_IF_TPC_PREPARED(self, lobject);
|
EXC_IF_TPC_PREPARED(self, lobject);
|
||||||
|
|
||||||
Dprintf("psyco_conn_lobject: new lobject for connection at %p", self);
|
Dprintf("psyco_conn_lobject: new lobject for connection at %p", self);
|
||||||
Dprintf("psyco_conn_lobject: parameters: oid = %d, mode = %s",
|
Dprintf("psyco_conn_lobject: parameters: oid = %u, mode = %s",
|
||||||
oid, smode);
|
oid, smode);
|
||||||
Dprintf("psyco_conn_lobject: parameters: new_oid = %d, new_file = %s",
|
Dprintf("psyco_conn_lobject: parameters: new_oid = %d, new_file = %s",
|
||||||
new_oid, new_file);
|
new_oid, new_file);
|
||||||
|
|
||||||
if (new_file)
|
if (new_file)
|
||||||
obj = PyObject_CallFunction(factory, "Oisis",
|
obj = PyObject_CallFunction(factory, "OIsIs",
|
||||||
self, oid, smode, new_oid, new_file);
|
self, oid, smode, new_oid, new_file);
|
||||||
else
|
else
|
||||||
obj = PyObject_CallFunction(factory, "Oisi",
|
obj = PyObject_CallFunction(factory, "OIsI",
|
||||||
self, oid, smode, new_oid);
|
self, oid, smode, new_oid);
|
||||||
|
|
||||||
if (obj == NULL) return NULL;
|
if (obj == NULL) return NULL;
|
||||||
|
@ -1099,6 +1102,7 @@ connection_setup(connectionObject *self, const char *dsn, long int async)
|
||||||
res = 0;
|
res = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
exit:
|
||||||
/* here we obfuscate the password even if there was a connection error */
|
/* here we obfuscate the password even if there was a connection error */
|
||||||
pos = strstr(self->dsn, "password");
|
pos = strstr(self->dsn, "password");
|
||||||
if (pos != NULL) {
|
if (pos != NULL) {
|
||||||
|
@ -1106,7 +1110,6 @@ connection_setup(connectionObject *self, const char *dsn, long int async)
|
||||||
*pos = 'x';
|
*pos = 'x';
|
||||||
}
|
}
|
||||||
|
|
||||||
exit:
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1128,10 +1131,13 @@ connection_dealloc(PyObject* obj)
|
||||||
{
|
{
|
||||||
connectionObject *self = (connectionObject *)obj;
|
connectionObject *self = (connectionObject *)obj;
|
||||||
|
|
||||||
conn_close(self);
|
/* Make sure to untrack the connection before calling conn_close, which may
|
||||||
|
* allow a different thread to try and dealloc the connection again,
|
||||||
|
* resulting in a double-free segfault (ticket #166). */
|
||||||
PyObject_GC_UnTrack(self);
|
PyObject_GC_UnTrack(self);
|
||||||
|
|
||||||
|
conn_close(self);
|
||||||
|
|
||||||
if (self->weakreflist) {
|
if (self->weakreflist) {
|
||||||
PyObject_ClearWeakRefs(obj);
|
PyObject_ClearWeakRefs(obj);
|
||||||
}
|
}
|
||||||
|
|
|
@ -97,30 +97,44 @@ HIDDEN int psyco_curs_scrollable_set(cursorObject *self, PyObject *pyvalue);
|
||||||
|
|
||||||
/* exception-raising macros */
|
/* exception-raising macros */
|
||||||
#define EXC_IF_CURS_CLOSED(self) \
|
#define EXC_IF_CURS_CLOSED(self) \
|
||||||
if ((self)->closed || ((self)->conn && (self)->conn->closed)) { \
|
do { \
|
||||||
|
if (!(self)->conn) { \
|
||||||
|
PyErr_SetString(InterfaceError, "the cursor has no connection"); \
|
||||||
|
return NULL; } \
|
||||||
|
if ((self)->closed || (self)->conn->closed) { \
|
||||||
PyErr_SetString(InterfaceError, "cursor already closed"); \
|
PyErr_SetString(InterfaceError, "cursor already closed"); \
|
||||||
return NULL; }
|
return NULL; } \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
#define EXC_IF_NO_TUPLES(self) \
|
#define EXC_IF_NO_TUPLES(self) \
|
||||||
|
do \
|
||||||
if ((self)->notuples && (self)->name == NULL) { \
|
if ((self)->notuples && (self)->name == NULL) { \
|
||||||
PyErr_SetString(ProgrammingError, "no results to fetch"); \
|
PyErr_SetString(ProgrammingError, "no results to fetch"); \
|
||||||
return NULL; }
|
return NULL; } \
|
||||||
|
while (0)
|
||||||
|
|
||||||
#define EXC_IF_NO_MARK(self) \
|
#define EXC_IF_NO_MARK(self) \
|
||||||
|
do \
|
||||||
if ((self)->mark != (self)->conn->mark && (self)->withhold == 0) { \
|
if ((self)->mark != (self)->conn->mark && (self)->withhold == 0) { \
|
||||||
PyErr_SetString(ProgrammingError, "named cursor isn't valid anymore"); \
|
PyErr_SetString(ProgrammingError, "named cursor isn't valid anymore"); \
|
||||||
return NULL; }
|
return NULL; } \
|
||||||
|
while (0)
|
||||||
|
|
||||||
#define EXC_IF_CURS_ASYNC(self, cmd) if ((self)->conn->async == 1) { \
|
#define EXC_IF_CURS_ASYNC(self, cmd) \
|
||||||
PyErr_SetString(ProgrammingError, #cmd " cannot be used " \
|
do \
|
||||||
"in asynchronous mode"); \
|
if ((self)->conn->async == 1) { \
|
||||||
return NULL; }
|
PyErr_SetString(ProgrammingError, \
|
||||||
|
#cmd " cannot be used in asynchronous mode"); \
|
||||||
|
return NULL; } \
|
||||||
|
while (0)
|
||||||
|
|
||||||
#define EXC_IF_ASYNC_IN_PROGRESS(self, cmd) \
|
#define EXC_IF_ASYNC_IN_PROGRESS(self, cmd) \
|
||||||
|
do \
|
||||||
if ((self)->conn->async_cursor != NULL) { \
|
if ((self)->conn->async_cursor != NULL) { \
|
||||||
PyErr_SetString(ProgrammingError, #cmd " cannot be used " \
|
PyErr_SetString(ProgrammingError, \
|
||||||
"while an asynchronous query is underway"); \
|
#cmd " cannot be used while an asynchronous query is underway"); \
|
||||||
return NULL; }
|
return NULL; } \
|
||||||
|
while (0)
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
|
|
@ -109,7 +109,7 @@ _mogrify(PyObject *var, PyObject *fmt, cursorObject *curs, PyObject **new)
|
||||||
/* if we find '%(' then this is a dictionary, we:
|
/* if we find '%(' then this is a dictionary, we:
|
||||||
1/ find the matching ')' and extract the key name
|
1/ find the matching ')' and extract the key name
|
||||||
2/ locate the value in the dictionary (or return an error)
|
2/ locate the value in the dictionary (or return an error)
|
||||||
3/ mogrify the value into something usefull (quoting)...
|
3/ mogrify the value into something useful (quoting)...
|
||||||
4/ ...and add it to the new dictionary to be used as argument
|
4/ ...and add it to the new dictionary to be used as argument
|
||||||
*/
|
*/
|
||||||
case '(':
|
case '(':
|
||||||
|
@ -314,7 +314,7 @@ _psyco_curs_merge_query_args(cursorObject *self,
|
||||||
"not all arguments converted"
|
"not all arguments converted"
|
||||||
|
|
||||||
and return the appropriate ProgrammingError. we do that by grabbing
|
and return the appropriate ProgrammingError. we do that by grabbing
|
||||||
the curren exception (we will later restore it if the type or the
|
the current exception (we will later restore it if the type or the
|
||||||
strings do not match.) */
|
strings do not match.) */
|
||||||
|
|
||||||
if (!(fquery = Bytes_Format(query, args))) {
|
if (!(fquery = Bytes_Format(query, args))) {
|
||||||
|
@ -506,7 +506,7 @@ psyco_curs_executemany(cursorObject *self, PyObject *args, PyObject *kwargs)
|
||||||
{
|
{
|
||||||
PyObject *operation = NULL, *vars = NULL;
|
PyObject *operation = NULL, *vars = NULL;
|
||||||
PyObject *v, *iter = NULL;
|
PyObject *v, *iter = NULL;
|
||||||
int rowcount = 0;
|
long rowcount = 0;
|
||||||
|
|
||||||
static char *kwlist[] = {"query", "vars_list", NULL};
|
static char *kwlist[] = {"query", "vars_list", NULL};
|
||||||
|
|
||||||
|
@ -1169,7 +1169,7 @@ psyco_curs_scroll(cursorObject *self, PyObject *args, PyObject *kwargs)
|
||||||
char buffer[128];
|
char buffer[128];
|
||||||
|
|
||||||
EXC_IF_NO_MARK(self);
|
EXC_IF_NO_MARK(self);
|
||||||
EXC_IF_ASYNC_IN_PROGRESS(self, scroll)
|
EXC_IF_ASYNC_IN_PROGRESS(self, scroll);
|
||||||
EXC_IF_TPC_PREPARED(self->conn, scroll);
|
EXC_IF_TPC_PREPARED(self->conn, scroll);
|
||||||
|
|
||||||
if (strcmp(mode, "absolute") == 0) {
|
if (strcmp(mode, "absolute") == 0) {
|
||||||
|
@ -1822,7 +1822,7 @@ cursor_setup(cursorObject *self, connectionObject *conn, const char *name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* FIXME: why does this raise an excpetion on the _next_ line of code?
|
/* FIXME: why does this raise an exception on the _next_ line of code?
|
||||||
if (PyObject_IsInstance((PyObject*)conn,
|
if (PyObject_IsInstance((PyObject*)conn,
|
||||||
(PyObject *)&connectionType) == 0) {
|
(PyObject *)&connectionType) == 0) {
|
||||||
PyErr_SetString(PyExc_TypeError,
|
PyErr_SetString(PyExc_TypeError,
|
||||||
|
@ -1899,31 +1899,34 @@ cursor_init(PyObject *obj, PyObject *args, PyObject *kwargs)
|
||||||
{
|
{
|
||||||
PyObject *conn;
|
PyObject *conn;
|
||||||
PyObject *name = Py_None;
|
PyObject *name = Py_None;
|
||||||
const char *cname;
|
PyObject *bname = NULL;
|
||||||
|
const char *cname = NULL;
|
||||||
|
int rv = -1;
|
||||||
|
|
||||||
static char *kwlist[] = {"conn", "name", NULL};
|
static char *kwlist[] = {"conn", "name", NULL};
|
||||||
|
|
||||||
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!|O", kwlist,
|
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!|O", kwlist,
|
||||||
&connectionType, &conn, &name)) {
|
&connectionType, &conn, &name)) {
|
||||||
return -1;
|
goto exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (name == Py_None) {
|
if (name != Py_None) {
|
||||||
cname = NULL;
|
|
||||||
} else {
|
|
||||||
Py_INCREF(name); /* for ensure_bytes */
|
Py_INCREF(name); /* for ensure_bytes */
|
||||||
if (!(name = psycopg_ensure_bytes(name))) {
|
if (!(bname = psycopg_ensure_bytes(name))) {
|
||||||
/* name has had a ref stolen */
|
/* name has had a ref stolen */
|
||||||
return -1;
|
goto exit;
|
||||||
}
|
}
|
||||||
Py_DECREF(name);
|
|
||||||
|
|
||||||
if (!(cname = Bytes_AsString(name))) {
|
if (!(cname = Bytes_AsString(bname))) {
|
||||||
return -1;
|
goto exit;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return cursor_setup((cursorObject *)obj, (connectionObject *)conn, cname);
|
rv = cursor_setup((cursorObject *)obj, (connectionObject *)conn, cname);
|
||||||
|
|
||||||
|
exit:
|
||||||
|
Py_XDECREF(bname);
|
||||||
|
return rv;
|
||||||
}
|
}
|
||||||
|
|
||||||
static PyObject *
|
static PyObject *
|
||||||
|
|
|
@ -163,8 +163,16 @@ psyco_error_reduce(errorObject *self)
|
||||||
if (2 != PyTuple_GET_SIZE(tuple)) { goto exit; }
|
if (2 != PyTuple_GET_SIZE(tuple)) { goto exit; }
|
||||||
|
|
||||||
if (!(dict = PyDict_New())) { goto error; }
|
if (!(dict = PyDict_New())) { goto error; }
|
||||||
if (0 != PyDict_SetItemString(dict, "pgerror", self->pgerror)) { goto error; }
|
if (self->pgerror) {
|
||||||
if (0 != PyDict_SetItemString(dict, "pgcode", self->pgcode)) { goto error; }
|
if (0 != PyDict_SetItemString(dict, "pgerror", self->pgerror)) {
|
||||||
|
goto error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (self->pgcode) {
|
||||||
|
if (0 != PyDict_SetItemString(dict, "pgcode", self->pgcode)) {
|
||||||
|
goto error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
PyObject *newtuple;
|
PyObject *newtuple;
|
||||||
|
|
|
@ -253,7 +253,7 @@ lobject_close_locked(lobjectObject *self, char **error)
|
||||||
return 0;
|
return 0;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
PyErr_SetString(OperationalError, "the connection is broken");
|
*error = strdup("the connection is broken");
|
||||||
return -1;
|
return -1;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
@ -355,9 +355,11 @@ lobject_dealloc(PyObject* obj)
|
||||||
{
|
{
|
||||||
lobjectObject *self = (lobjectObject *)obj;
|
lobjectObject *self = (lobjectObject *)obj;
|
||||||
|
|
||||||
|
if (self->conn && self->fd != -1) {
|
||||||
if (lobject_close(self) < 0)
|
if (lobject_close(self) < 0)
|
||||||
PyErr_Print();
|
PyErr_Print();
|
||||||
Py_XDECREF((PyObject*)self->conn);
|
Py_XDECREF((PyObject*)self->conn);
|
||||||
|
}
|
||||||
PyMem_Free(self->smode);
|
PyMem_Free(self->smode);
|
||||||
|
|
||||||
Dprintf("lobject_dealloc: deleted lobject object at %p, refcnt = "
|
Dprintf("lobject_dealloc: deleted lobject object at %p, refcnt = "
|
||||||
|
@ -369,17 +371,18 @@ lobject_dealloc(PyObject* obj)
|
||||||
static int
|
static int
|
||||||
lobject_init(PyObject *obj, PyObject *args, PyObject *kwds)
|
lobject_init(PyObject *obj, PyObject *args, PyObject *kwds)
|
||||||
{
|
{
|
||||||
int oid = (int)InvalidOid, new_oid = (int)InvalidOid;
|
Oid oid = InvalidOid, new_oid = InvalidOid;
|
||||||
const char *smode = "";
|
const char *smode = "";
|
||||||
const char *new_file = NULL;
|
const char *new_file = NULL;
|
||||||
PyObject *conn;
|
PyObject *conn = NULL;
|
||||||
|
|
||||||
if (!PyArg_ParseTuple(args, "O|iziz",
|
if (!PyArg_ParseTuple(args, "O!|IzIz",
|
||||||
&conn, &oid, &smode, &new_oid, &new_file))
|
&connectionType, &conn,
|
||||||
|
&oid, &smode, &new_oid, &new_file))
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
return lobject_setup((lobjectObject *)obj,
|
return lobject_setup((lobjectObject *)obj,
|
||||||
(connectionObject *)conn, (Oid)oid, smode, (Oid)new_oid, new_file);
|
(connectionObject *)conn, oid, smode, new_oid, new_file);
|
||||||
}
|
}
|
||||||
|
|
||||||
static PyObject *
|
static PyObject *
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/* microporotocols_proto.h - definiton for psycopg's protocols
|
/* microporotocols_proto.h - definition for psycopg's protocols
|
||||||
*
|
*
|
||||||
* Copyright (C) 2003-2010 Federico Di Gregorio <fog@debian.org>
|
* Copyright (C) 2003-2010 Federico Di Gregorio <fog@debian.org>
|
||||||
*
|
*
|
||||||
|
|
|
@ -25,7 +25,7 @@
|
||||||
|
|
||||||
/* IMPORTANT NOTE: no function in this file do its own connection locking
|
/* IMPORTANT NOTE: no function in this file do its own connection locking
|
||||||
except for pg_execute and pq_fetch (that are somehow high-level). This means
|
except for pg_execute and pq_fetch (that are somehow high-level). This means
|
||||||
that all the othe functions should be called while holding a lock to the
|
that all the other functions should be called while holding a lock to the
|
||||||
connection.
|
connection.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
@ -153,7 +153,7 @@ exception_from_sqlstate(const char *sqlstate)
|
||||||
This function should be called while holding the GIL.
|
This function should be called while holding the GIL.
|
||||||
|
|
||||||
The function passes the ownership of the pgres to the returned exception,
|
The function passes the ownership of the pgres to the returned exception,
|
||||||
wherer the pgres was the explicit argument or taken from the cursor.
|
where the pgres was the explicit argument or taken from the cursor.
|
||||||
So, after calling it curs->pgres will be set to null */
|
So, after calling it curs->pgres will be set to null */
|
||||||
|
|
||||||
RAISES static void
|
RAISES static void
|
||||||
|
@ -417,11 +417,21 @@ pq_complete_error(connectionObject *conn, PGresult **pgres, char **error)
|
||||||
pq_raise(conn, NULL, pgres);
|
pq_raise(conn, NULL, pgres);
|
||||||
/* now *pgres is null */
|
/* now *pgres is null */
|
||||||
}
|
}
|
||||||
else if (*error != NULL) {
|
else {
|
||||||
|
if (*error != NULL) {
|
||||||
PyErr_SetString(OperationalError, *error);
|
PyErr_SetString(OperationalError, *error);
|
||||||
} else {
|
} else {
|
||||||
PyErr_SetString(OperationalError, "unknown error");
|
PyErr_SetString(OperationalError, "unknown error");
|
||||||
}
|
}
|
||||||
|
/* Trivia: with a broken socket connection PQexec returns NULL, so we
|
||||||
|
* end up here. With a TCP connection we get a pgres with an error
|
||||||
|
* instead, and the connection gets closed in the pq_raise call above
|
||||||
|
* (see ticket #196)
|
||||||
|
*/
|
||||||
|
if (CONNECTION_BAD == PQstatus(conn->pgconn)) {
|
||||||
|
conn->closed = 2;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (*error) {
|
if (*error) {
|
||||||
free(*error);
|
free(*error);
|
||||||
|
@ -781,7 +791,7 @@ exit:
|
||||||
means that there is data available to be collected. -1 means an error, the
|
means that there is data available to be collected. -1 means an error, the
|
||||||
exception will be set accordingly.
|
exception will be set accordingly.
|
||||||
|
|
||||||
this fucntion locks the connection object
|
this function locks the connection object
|
||||||
this function call Py_*_ALLOW_THREADS macros */
|
this function call Py_*_ALLOW_THREADS macros */
|
||||||
|
|
||||||
int
|
int
|
||||||
|
@ -797,6 +807,12 @@ pq_is_busy(connectionObject *conn)
|
||||||
Dprintf("pq_is_busy: PQconsumeInput() failed");
|
Dprintf("pq_is_busy: PQconsumeInput() failed");
|
||||||
pthread_mutex_unlock(&(conn->lock));
|
pthread_mutex_unlock(&(conn->lock));
|
||||||
Py_BLOCK_THREADS;
|
Py_BLOCK_THREADS;
|
||||||
|
|
||||||
|
/* if the libpq says pgconn is lost, close the py conn */
|
||||||
|
if (CONNECTION_BAD == PQstatus(conn->pgconn)) {
|
||||||
|
conn->closed = 2;
|
||||||
|
}
|
||||||
|
|
||||||
PyErr_SetString(OperationalError, PQerrorMessage(conn->pgconn));
|
PyErr_SetString(OperationalError, PQerrorMessage(conn->pgconn));
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
@ -826,6 +842,12 @@ pq_is_busy_locked(connectionObject *conn)
|
||||||
|
|
||||||
if (PQconsumeInput(conn->pgconn) == 0) {
|
if (PQconsumeInput(conn->pgconn) == 0) {
|
||||||
Dprintf("pq_is_busy_locked: PQconsumeInput() failed");
|
Dprintf("pq_is_busy_locked: PQconsumeInput() failed");
|
||||||
|
|
||||||
|
/* if the libpq says pgconn is lost, close the py conn */
|
||||||
|
if (CONNECTION_BAD == PQstatus(conn->pgconn)) {
|
||||||
|
conn->closed = 2;
|
||||||
|
}
|
||||||
|
|
||||||
PyErr_SetString(OperationalError, PQerrorMessage(conn->pgconn));
|
PyErr_SetString(OperationalError, PQerrorMessage(conn->pgconn));
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
@ -974,7 +996,7 @@ pq_execute(cursorObject *curs, const char *query, int async, int no_result)
|
||||||
/* if the execute was sync, we call pq_fetch() immediately,
|
/* if the execute was sync, we call pq_fetch() immediately,
|
||||||
to respect the old DBAPI-2.0 compatible behaviour */
|
to respect the old DBAPI-2.0 compatible behaviour */
|
||||||
if (async == 0) {
|
if (async == 0) {
|
||||||
Dprintf("pq_execute: entering syncronous DBAPI compatibility mode");
|
Dprintf("pq_execute: entering synchronous DBAPI compatibility mode");
|
||||||
if (pq_fetch(curs, no_result) < 0) return -1;
|
if (pq_fetch(curs, no_result) < 0) return -1;
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
|
@ -1041,7 +1063,7 @@ pq_get_last_result(connectionObject *conn)
|
||||||
|
|
||||||
/* pq_fetch - fetch data after a query
|
/* pq_fetch - fetch data after a query
|
||||||
|
|
||||||
this fucntion locks the connection object
|
this function locks the connection object
|
||||||
this function call Py_*_ALLOW_THREADS macros
|
this function call Py_*_ALLOW_THREADS macros
|
||||||
|
|
||||||
return value:
|
return value:
|
||||||
|
@ -1134,9 +1156,8 @@ _pq_fetch_tuples(cursorObject *curs)
|
||||||
cast = psyco_default_cast;
|
cast = psyco_default_cast;
|
||||||
}
|
}
|
||||||
|
|
||||||
Dprintf("_pq_fetch_tuples: using cast at %p (%s) for type %d",
|
Dprintf("_pq_fetch_tuples: using cast at %p for type %d",
|
||||||
cast, Bytes_AS_STRING(((typecastObject*)cast)->name),
|
cast, PQftype(curs->pgres,i));
|
||||||
PQftype(curs->pgres,i));
|
|
||||||
Py_INCREF(cast);
|
Py_INCREF(cast);
|
||||||
PyTuple_SET_ITEM(casts, i, cast);
|
PyTuple_SET_ITEM(casts, i, cast);
|
||||||
|
|
||||||
|
@ -1243,6 +1264,20 @@ exit:
|
||||||
return rv;
|
return rv;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
_read_rowcount(cursorObject *curs)
|
||||||
|
{
|
||||||
|
const char *rowcount;
|
||||||
|
|
||||||
|
rowcount = PQcmdTuples(curs->pgres);
|
||||||
|
Dprintf("_read_rowcount: PQcmdTuples = %s", rowcount);
|
||||||
|
if (!rowcount || !rowcount[0]) {
|
||||||
|
curs->rowcount = -1;
|
||||||
|
} else {
|
||||||
|
curs->rowcount = atol(rowcount);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
_pq_copy_in_v3(cursorObject *curs)
|
_pq_copy_in_v3(cursorObject *curs)
|
||||||
{
|
{
|
||||||
|
@ -1335,7 +1370,7 @@ _pq_copy_in_v3(cursorObject *curs)
|
||||||
else if (error == 2)
|
else if (error == 2)
|
||||||
res = PQputCopyEnd(curs->conn->pgconn, "error in PQputCopyData() call");
|
res = PQputCopyEnd(curs->conn->pgconn, "error in PQputCopyData() call");
|
||||||
else
|
else
|
||||||
/* XXX would be nice to propagate the exeption */
|
/* XXX would be nice to propagate the exception */
|
||||||
res = PQputCopyEnd(curs->conn->pgconn, "error in .read() call");
|
res = PQputCopyEnd(curs->conn->pgconn, "error in .read() call");
|
||||||
|
|
||||||
CLEARPGRES(curs->pgres);
|
CLEARPGRES(curs->pgres);
|
||||||
|
@ -1343,7 +1378,7 @@ _pq_copy_in_v3(cursorObject *curs)
|
||||||
Dprintf("_pq_copy_in_v3: copy ended; res = %d", res);
|
Dprintf("_pq_copy_in_v3: copy ended; res = %d", res);
|
||||||
|
|
||||||
/* if the result is -1 we should not even try to get a result from the
|
/* if the result is -1 we should not even try to get a result from the
|
||||||
bacause that will lock the current thread forever */
|
because that will lock the current thread forever */
|
||||||
if (res == -1) {
|
if (res == -1) {
|
||||||
pq_raise(curs->conn, curs, NULL);
|
pq_raise(curs->conn, curs, NULL);
|
||||||
/* FIXME: pq_raise check the connection but for some reason even
|
/* FIXME: pq_raise check the connection but for some reason even
|
||||||
|
@ -1360,6 +1395,7 @@ _pq_copy_in_v3(cursorObject *curs)
|
||||||
|
|
||||||
if (NULL == curs->pgres)
|
if (NULL == curs->pgres)
|
||||||
break;
|
break;
|
||||||
|
_read_rowcount(curs);
|
||||||
if (PQresultStatus(curs->pgres) == PGRES_FATAL_ERROR)
|
if (PQresultStatus(curs->pgres) == PGRES_FATAL_ERROR)
|
||||||
pq_raise(curs->conn, curs, NULL);
|
pq_raise(curs->conn, curs, NULL);
|
||||||
CLEARPGRES(curs->pgres);
|
CLEARPGRES(curs->pgres);
|
||||||
|
@ -1436,6 +1472,7 @@ _pq_copy_out_v3(cursorObject *curs)
|
||||||
|
|
||||||
if (NULL == curs->pgres)
|
if (NULL == curs->pgres)
|
||||||
break;
|
break;
|
||||||
|
_read_rowcount(curs);
|
||||||
if (PQresultStatus(curs->pgres) == PGRES_FATAL_ERROR)
|
if (PQresultStatus(curs->pgres) == PGRES_FATAL_ERROR)
|
||||||
pq_raise(curs->conn, curs, NULL);
|
pq_raise(curs->conn, curs, NULL);
|
||||||
CLEARPGRES(curs->pgres);
|
CLEARPGRES(curs->pgres);
|
||||||
|
@ -1451,7 +1488,6 @@ int
|
||||||
pq_fetch(cursorObject *curs, int no_result)
|
pq_fetch(cursorObject *curs, int no_result)
|
||||||
{
|
{
|
||||||
int pgstatus, ex = -1;
|
int pgstatus, ex = -1;
|
||||||
const char *rowcount;
|
|
||||||
|
|
||||||
/* even if we fail, we remove any information about the previous query */
|
/* even if we fail, we remove any information about the previous query */
|
||||||
curs_reset(curs);
|
curs_reset(curs);
|
||||||
|
@ -1483,11 +1519,7 @@ pq_fetch(cursorObject *curs, int no_result)
|
||||||
|
|
||||||
case PGRES_COMMAND_OK:
|
case PGRES_COMMAND_OK:
|
||||||
Dprintf("pq_fetch: command returned OK (no tuples)");
|
Dprintf("pq_fetch: command returned OK (no tuples)");
|
||||||
rowcount = PQcmdTuples(curs->pgres);
|
_read_rowcount(curs);
|
||||||
if (!rowcount || !rowcount[0])
|
|
||||||
curs->rowcount = -1;
|
|
||||||
else
|
|
||||||
curs->rowcount = atoi(rowcount);
|
|
||||||
curs->lastoid = PQoidValue(curs->pgres);
|
curs->lastoid = PQoidValue(curs->pgres);
|
||||||
CLEARPGRES(curs->pgres);
|
CLEARPGRES(curs->pgres);
|
||||||
ex = 1;
|
ex = 1;
|
||||||
|
@ -1495,8 +1527,8 @@ pq_fetch(cursorObject *curs, int no_result)
|
||||||
|
|
||||||
case PGRES_COPY_OUT:
|
case PGRES_COPY_OUT:
|
||||||
Dprintf("pq_fetch: data from a COPY TO (no tuples)");
|
Dprintf("pq_fetch: data from a COPY TO (no tuples)");
|
||||||
ex = _pq_copy_out_v3(curs);
|
|
||||||
curs->rowcount = -1;
|
curs->rowcount = -1;
|
||||||
|
ex = _pq_copy_out_v3(curs);
|
||||||
/* error caught by out glorious notice handler */
|
/* error caught by out glorious notice handler */
|
||||||
if (PyErr_Occurred()) ex = -1;
|
if (PyErr_Occurred()) ex = -1;
|
||||||
CLEARPGRES(curs->pgres);
|
CLEARPGRES(curs->pgres);
|
||||||
|
@ -1504,8 +1536,8 @@ pq_fetch(cursorObject *curs, int no_result)
|
||||||
|
|
||||||
case PGRES_COPY_IN:
|
case PGRES_COPY_IN:
|
||||||
Dprintf("pq_fetch: data from a COPY FROM (no tuples)");
|
Dprintf("pq_fetch: data from a COPY FROM (no tuples)");
|
||||||
ex = _pq_copy_in_v3(curs);
|
|
||||||
curs->rowcount = -1;
|
curs->rowcount = -1;
|
||||||
|
ex = _pq_copy_in_v3(curs);
|
||||||
/* error caught by out glorious notice handler */
|
/* error caught by out glorious notice handler */
|
||||||
if (PyErr_Occurred()) ex = -1;
|
if (PyErr_Occurred()) ex = -1;
|
||||||
CLEARPGRES(curs->pgres);
|
CLEARPGRES(curs->pgres);
|
||||||
|
|
|
@ -59,7 +59,7 @@ extern "C" {
|
||||||
HIDDEN psyco_errors_fill_RETURN psyco_errors_fill psyco_errors_fill_PROTO;
|
HIDDEN psyco_errors_fill_RETURN psyco_errors_fill psyco_errors_fill_PROTO;
|
||||||
HIDDEN psyco_errors_set_RETURN psyco_errors_set psyco_errors_set_PROTO;
|
HIDDEN psyco_errors_set_RETURN psyco_errors_set psyco_errors_set_PROTO;
|
||||||
|
|
||||||
/* global excpetions */
|
/* global exceptions */
|
||||||
extern HIDDEN PyObject *Error, *Warning, *InterfaceError, *DatabaseError,
|
extern HIDDEN PyObject *Error, *Warning, *InterfaceError, *DatabaseError,
|
||||||
*InternalError, *OperationalError, *ProgrammingError,
|
*InternalError, *OperationalError, *ProgrammingError,
|
||||||
*IntegrityError, *DataError, *NotSupportedError;
|
*IntegrityError, *DataError, *NotSupportedError;
|
||||||
|
@ -169,7 +169,7 @@ STEALS(1) HIDDEN PyObject * psycopg_ensure_text(PyObject *obj);
|
||||||
"Error related to SQL query cancellation."
|
"Error related to SQL query cancellation."
|
||||||
|
|
||||||
#define TransactionRollbackError_doc \
|
#define TransactionRollbackError_doc \
|
||||||
"Error causing transaction rollback (deadlocks, serialisation failures, etc)."
|
"Error causing transaction rollback (deadlocks, serialization failures, etc)."
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
|
|
|
@ -437,7 +437,7 @@ static struct {
|
||||||
static int
|
static int
|
||||||
psyco_errors_init(void)
|
psyco_errors_init(void)
|
||||||
{
|
{
|
||||||
/* the names of the exceptions here reflect the oranization of the
|
/* the names of the exceptions here reflect the organization of the
|
||||||
psycopg2 module and not the fact the the original error objects
|
psycopg2 module and not the fact the the original error objects
|
||||||
live in _psycopg */
|
live in _psycopg */
|
||||||
|
|
||||||
|
|
|
@ -61,7 +61,7 @@ chunk_getreadbuffer(chunkObject *self, Py_ssize_t segment, void **ptr)
|
||||||
if (segment != 0)
|
if (segment != 0)
|
||||||
{
|
{
|
||||||
PyErr_SetString(PyExc_SystemError,
|
PyErr_SetString(PyExc_SystemError,
|
||||||
"acessing non-existant buffer segment");
|
"accessing non-existant buffer segment");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
*ptr = self->base;
|
*ptr = self->base;
|
||||||
|
@ -160,7 +160,7 @@ typecast_BINARY_cast(const char *s, Py_ssize_t l, PyObject *curs)
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
/* This is a buffer in the classic bytea format. So we can handle it
|
/* This is a buffer in the classic bytea format. So we can handle it
|
||||||
* to the PQunescapeBytea to have it parsed, rignt? ...Wrong. We
|
* to the PQunescapeBytea to have it parsed, right? ...Wrong. We
|
||||||
* could, but then we'd have to record whether buffer was allocated by
|
* could, but then we'd have to record whether buffer was allocated by
|
||||||
* Python or by the libpq to dispose it properly. Furthermore the
|
* Python or by the libpq to dispose it properly. Furthermore the
|
||||||
* PQunescapeBytea interface is not the most brilliant as it wants a
|
* PQunescapeBytea interface is not the most brilliant as it wants a
|
||||||
|
|
|
@ -74,7 +74,7 @@ typecastObject_initlist typecast_builtins[] = {
|
||||||
FOOTER = """ {NULL, NULL, NULL, NULL}\n};\n"""
|
FOOTER = """ {NULL, NULL, NULL, NULL}\n};\n"""
|
||||||
|
|
||||||
|
|
||||||
# usefull error reporting function
|
# useful error reporting function
|
||||||
def error(msg):
|
def error(msg):
|
||||||
"""Report an error on stderr."""
|
"""Report an error on stderr."""
|
||||||
sys.stderr.write(msg+'\n')
|
sys.stderr.write(msg+'\n')
|
||||||
|
|
|
@ -28,5 +28,5 @@ have_ssl=0
|
||||||
# Statically link against the postgresql client library.
|
# Statically link against the postgresql client library.
|
||||||
#static_libpq=1
|
#static_libpq=1
|
||||||
|
|
||||||
# Add here eventual extra libreries required to link the module.
|
# Add here eventual extra libraries required to link the module.
|
||||||
#libraries=
|
#libraries=
|
||||||
|
|
34
setup.py
34
setup.py
|
@ -21,7 +21,7 @@ and stable as a rock.
|
||||||
psycopg2 is different from the other database adapter because it was
|
psycopg2 is different from the other database adapter because it was
|
||||||
designed for heavily multi-threaded applications that create and destroy
|
designed for heavily multi-threaded applications that create and destroy
|
||||||
lots of cursors and make a conspicuous number of concurrent INSERTs or
|
lots of cursors and make a conspicuous number of concurrent INSERTs or
|
||||||
UPDATEs. psycopg2 also provide full asycronous operations and support
|
UPDATEs. psycopg2 also provide full asynchronous operations and support
|
||||||
for coroutine libraries.
|
for coroutine libraries.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@ -31,7 +31,14 @@ Intended Audience :: Developers
|
||||||
License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)
|
License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)
|
||||||
License :: OSI Approved :: Zope Public License
|
License :: OSI Approved :: Zope Public License
|
||||||
Programming Language :: Python
|
Programming Language :: Python
|
||||||
|
Programming Language :: Python :: 2.5
|
||||||
|
Programming Language :: Python :: 2.6
|
||||||
|
Programming Language :: Python :: 2.7
|
||||||
Programming Language :: Python :: 3
|
Programming Language :: Python :: 3
|
||||||
|
Programming Language :: Python :: 3.1
|
||||||
|
Programming Language :: Python :: 3.2
|
||||||
|
Programming Language :: Python :: 3.3
|
||||||
|
Programming Language :: Python :: 3.4
|
||||||
Programming Language :: C
|
Programming Language :: C
|
||||||
Programming Language :: SQL
|
Programming Language :: SQL
|
||||||
Topic :: Database
|
Topic :: Database
|
||||||
|
@ -76,8 +83,7 @@ except ImportError:
|
||||||
|
|
||||||
# Take a look at http://www.python.org/dev/peps/pep-0386/
|
# Take a look at http://www.python.org/dev/peps/pep-0386/
|
||||||
# for a consistent versioning pattern.
|
# for a consistent versioning pattern.
|
||||||
|
PSYCOPG_VERSION = '2.5.3'
|
||||||
PSYCOPG_VERSION = '2.5'
|
|
||||||
|
|
||||||
version_flags = ['dt', 'dec']
|
version_flags = ['dt', 'dec']
|
||||||
|
|
||||||
|
@ -210,7 +216,7 @@ or with the pg_config option in 'setup.cfg'.
|
||||||
class psycopg_build_ext(build_ext):
|
class psycopg_build_ext(build_ext):
|
||||||
"""Conditionally complement the setup.cfg options file.
|
"""Conditionally complement the setup.cfg options file.
|
||||||
|
|
||||||
This class configures the include_dirs, libray_dirs, libraries
|
This class configures the include_dirs, library_dirs, libraries
|
||||||
options as required by the system. Most of the configuration happens
|
options as required by the system. Most of the configuration happens
|
||||||
in finalize_options() method.
|
in finalize_options() method.
|
||||||
|
|
||||||
|
@ -287,12 +293,15 @@ class psycopg_build_ext(build_ext):
|
||||||
manifest = '_psycopg.vc9.x86.manifest'
|
manifest = '_psycopg.vc9.x86.manifest'
|
||||||
if platform == 'win-amd64':
|
if platform == 'win-amd64':
|
||||||
manifest = '_psycopg.vc9.amd64.manifest'
|
manifest = '_psycopg.vc9.amd64.manifest'
|
||||||
|
try:
|
||||||
|
ext_path = self.get_ext_fullpath(extension.name)
|
||||||
|
except AttributeError:
|
||||||
|
ext_path = os.path.join(self.build_lib,
|
||||||
|
'psycopg2', '_psycopg.pyd')
|
||||||
self.compiler.spawn(
|
self.compiler.spawn(
|
||||||
['mt.exe', '-nologo', '-manifest',
|
['mt.exe', '-nologo', '-manifest',
|
||||||
os.path.join('psycopg', manifest),
|
os.path.join('psycopg', manifest),
|
||||||
'-outputresource:%s;2' % (
|
'-outputresource:%s;2' % ext_path])
|
||||||
os.path.join(self.build_lib,
|
|
||||||
'psycopg2', '_psycopg.pyd'))])
|
|
||||||
|
|
||||||
def finalize_win32(self):
|
def finalize_win32(self):
|
||||||
"""Finalize build system configuration on win32 platform."""
|
"""Finalize build system configuration on win32 platform."""
|
||||||
|
@ -362,7 +371,7 @@ class psycopg_build_ext(build_ext):
|
||||||
finalize_linux3 = finalize_linux
|
finalize_linux3 = finalize_linux
|
||||||
|
|
||||||
def finalize_options(self):
|
def finalize_options(self):
|
||||||
"""Complete the build system configuation."""
|
"""Complete the build system configuration."""
|
||||||
build_ext.finalize_options(self)
|
build_ext.finalize_options(self)
|
||||||
|
|
||||||
pg_config_helper = PostgresConfig(self)
|
pg_config_helper = PostgresConfig(self)
|
||||||
|
@ -499,9 +508,11 @@ you probably need to install its companion -dev or -devel package."""
|
||||||
|
|
||||||
# generate a nice version string to avoid confusion when users report bugs
|
# generate a nice version string to avoid confusion when users report bugs
|
||||||
version_flags.append('pq3') # no more a choice
|
version_flags.append('pq3') # no more a choice
|
||||||
|
|
||||||
for have in parser.get('build_ext', 'define').split(','):
|
for have in parser.get('build_ext', 'define').split(','):
|
||||||
if have == 'PSYCOPG_EXTENSIONS':
|
if have == 'PSYCOPG_EXTENSIONS':
|
||||||
version_flags.append('ext')
|
version_flags.append('ext')
|
||||||
|
|
||||||
if version_flags:
|
if version_flags:
|
||||||
PSYCOPG_VERSION_EX = PSYCOPG_VERSION + " (%s)" % ' '.join(version_flags)
|
PSYCOPG_VERSION_EX = PSYCOPG_VERSION + " (%s)" % ' '.join(version_flags)
|
||||||
else:
|
else:
|
||||||
|
@ -522,6 +533,13 @@ if parser.has_option('build_ext', 'static_libpq'):
|
||||||
else:
|
else:
|
||||||
static_libpq = 0
|
static_libpq = 0
|
||||||
|
|
||||||
|
# And now... explicitly add the defines from the .cfg files.
|
||||||
|
# Looks like setuptools or some other cog doesn't add them to the command line
|
||||||
|
# when called e.g. with "pip -e git+url'. This results in declarations
|
||||||
|
# duplicate on the commandline, which I hope is not a problem.
|
||||||
|
for define in parser.get('build_ext', 'define').split(','):
|
||||||
|
define_macros.append((define, '1'))
|
||||||
|
|
||||||
# build the extension
|
# build the extension
|
||||||
|
|
||||||
sources = [ os.path.join('psycopg', x) for x in sources]
|
sources = [ os.path.join('psycopg', x) for x in sources]
|
||||||
|
|
|
@ -60,7 +60,7 @@ import sys
|
||||||
# - Now a subclass of TestCase, to avoid requiring the driver stub
|
# - Now a subclass of TestCase, to avoid requiring the driver stub
|
||||||
# to use multiple inheritance
|
# to use multiple inheritance
|
||||||
# - Reversed the polarity of buggy test in test_description
|
# - Reversed the polarity of buggy test in test_description
|
||||||
# - Test exception heirarchy correctly
|
# - Test exception hierarchy correctly
|
||||||
# - self.populate is now self._populate(), so if a driver stub
|
# - self.populate is now self._populate(), so if a driver stub
|
||||||
# overrides self.ddl1 this change propogates
|
# overrides self.ddl1 this change propogates
|
||||||
# - VARCHAR columns now have a width, which will hopefully make the
|
# - VARCHAR columns now have a width, which will hopefully make the
|
||||||
|
@ -188,7 +188,7 @@ class DatabaseAPI20Test(unittest.TestCase):
|
||||||
|
|
||||||
def test_Exceptions(self):
|
def test_Exceptions(self):
|
||||||
# Make sure required exceptions exist, and are in the
|
# Make sure required exceptions exist, and are in the
|
||||||
# defined heirarchy.
|
# defined hierarchy.
|
||||||
if sys.version[0] == '3': #under Python 3 StardardError no longer exists
|
if sys.version[0] == '3': #under Python 3 StardardError no longer exists
|
||||||
self.failUnless(issubclass(self.driver.Warning,Exception))
|
self.failUnless(issubclass(self.driver.Warning,Exception))
|
||||||
self.failUnless(issubclass(self.driver.Error,Exception))
|
self.failUnless(issubclass(self.driver.Error,Exception))
|
||||||
|
@ -504,7 +504,7 @@ class DatabaseAPI20Test(unittest.TestCase):
|
||||||
self.assertRaises(self.driver.Error,cur.fetchone)
|
self.assertRaises(self.driver.Error,cur.fetchone)
|
||||||
|
|
||||||
# cursor.fetchone should raise an Error if called after
|
# cursor.fetchone should raise an Error if called after
|
||||||
# executing a query that cannnot return rows
|
# executing a query that cannot return rows
|
||||||
self.executeDDL1(cur)
|
self.executeDDL1(cur)
|
||||||
self.assertRaises(self.driver.Error,cur.fetchone)
|
self.assertRaises(self.driver.Error,cur.fetchone)
|
||||||
|
|
||||||
|
@ -516,7 +516,7 @@ class DatabaseAPI20Test(unittest.TestCase):
|
||||||
self.failUnless(cur.rowcount in (-1,0))
|
self.failUnless(cur.rowcount in (-1,0))
|
||||||
|
|
||||||
# cursor.fetchone should raise an Error if called after
|
# cursor.fetchone should raise an Error if called after
|
||||||
# executing a query that cannnot return rows
|
# executing a query that cannot return rows
|
||||||
cur.execute("insert into %sbooze values ('Victoria Bitter')" % (
|
cur.execute("insert into %sbooze values ('Victoria Bitter')" % (
|
||||||
self.table_prefix
|
self.table_prefix
|
||||||
))
|
))
|
||||||
|
|
|
@ -449,6 +449,16 @@ class AsyncTests(ConnectingTestCase):
|
||||||
self.wait(self.conn)
|
self.wait(self.conn)
|
||||||
self.assertEqual(cur.fetchone(), (42,))
|
self.assertEqual(cur.fetchone(), (42,))
|
||||||
|
|
||||||
|
def test_async_connection_error_message(self):
|
||||||
|
try:
|
||||||
|
cnn = psycopg2.connect('dbname=thisdatabasedoesntexist', async=True)
|
||||||
|
self.wait(cnn)
|
||||||
|
except psycopg2.Error, e:
|
||||||
|
self.assertNotEqual(str(e), "asynchronous connection failed",
|
||||||
|
"connection error reason lost")
|
||||||
|
else:
|
||||||
|
self.fail("no exception raised")
|
||||||
|
|
||||||
|
|
||||||
def test_suite():
|
def test_suite():
|
||||||
return unittest.TestLoader().loadTestsFromName(__name__)
|
return unittest.TestLoader().loadTestsFromName(__name__)
|
||||||
|
|
|
@ -249,6 +249,28 @@ class ConnectionTests(ConnectingTestCase):
|
||||||
cur.execute("select 1 as a")
|
cur.execute("select 1 as a")
|
||||||
self.assertRaises(TypeError, (lambda r: r['a']), cur.fetchone())
|
self.assertRaises(TypeError, (lambda r: r['a']), cur.fetchone())
|
||||||
|
|
||||||
|
def test_cursor_factory_none(self):
|
||||||
|
# issue #210
|
||||||
|
conn = self.connect()
|
||||||
|
cur = conn.cursor(cursor_factory=None)
|
||||||
|
self.assertEqual(type(cur), psycopg2.extensions.cursor)
|
||||||
|
|
||||||
|
conn = self.connect(cursor_factory=psycopg2.extras.DictCursor)
|
||||||
|
cur = conn.cursor(cursor_factory=None)
|
||||||
|
self.assertEqual(type(cur), psycopg2.extras.DictCursor)
|
||||||
|
|
||||||
|
def test_failed_init_status(self):
|
||||||
|
class SubConnection(psycopg2.extensions.connection):
|
||||||
|
def __init__(self, dsn):
|
||||||
|
try:
|
||||||
|
super(SubConnection, self).__init__(dsn)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
c = SubConnection("dbname=thereisnosuchdatabasemate password=foobar")
|
||||||
|
self.assert_(c.closed, "connection failed so it must be closed")
|
||||||
|
self.assert_('foobar' not in c.dsn, "password was not obscured")
|
||||||
|
|
||||||
|
|
||||||
class IsolationLevelsTestCase(ConnectingTestCase):
|
class IsolationLevelsTestCase(ConnectingTestCase):
|
||||||
|
|
||||||
|
|
|
@ -25,7 +25,7 @@
|
||||||
import sys
|
import sys
|
||||||
import string
|
import string
|
||||||
from testutils import unittest, ConnectingTestCase, decorate_all_tests
|
from testutils import unittest, ConnectingTestCase, decorate_all_tests
|
||||||
from testutils import skip_if_no_iobase
|
from testutils import skip_if_no_iobase, skip_before_postgres
|
||||||
from cStringIO import StringIO
|
from cStringIO import StringIO
|
||||||
from itertools import cycle, izip
|
from itertools import cycle, izip
|
||||||
|
|
||||||
|
@ -199,6 +199,20 @@ class CopyTests(ConnectingTestCase):
|
||||||
f.seek(0)
|
f.seek(0)
|
||||||
self.assertEqual(f.readline().rstrip(), about)
|
self.assertEqual(f.readline().rstrip(), about)
|
||||||
|
|
||||||
|
# same tests with setting size
|
||||||
|
f = io.StringIO()
|
||||||
|
f.write(about)
|
||||||
|
f.seek(0)
|
||||||
|
exp_size = 123
|
||||||
|
# hack here to leave file as is, only check size when reading
|
||||||
|
real_read = f.read
|
||||||
|
def read(_size, f=f, exp_size=exp_size):
|
||||||
|
self.assertEqual(_size, exp_size)
|
||||||
|
return real_read(_size)
|
||||||
|
f.read = read
|
||||||
|
curs.copy_expert('COPY tcopy (data) FROM STDIN', f, size=exp_size)
|
||||||
|
curs.execute("select data from tcopy;")
|
||||||
|
self.assertEqual(curs.fetchone()[0], abin)
|
||||||
|
|
||||||
def _copy_from(self, curs, nrecs, srec, copykw):
|
def _copy_from(self, curs, nrecs, srec, copykw):
|
||||||
f = StringIO()
|
f = StringIO()
|
||||||
|
@ -258,6 +272,35 @@ class CopyTests(ConnectingTestCase):
|
||||||
curs.execute("select count(*) from manycols;")
|
curs.execute("select count(*) from manycols;")
|
||||||
self.assertEqual(curs.fetchone()[0], 2)
|
self.assertEqual(curs.fetchone()[0], 2)
|
||||||
|
|
||||||
|
@skip_before_postgres(8, 2) # they don't send the count
|
||||||
|
def test_copy_rowcount(self):
|
||||||
|
curs = self.conn.cursor()
|
||||||
|
|
||||||
|
curs.copy_from(StringIO('aaa\nbbb\nccc\n'), 'tcopy', columns=['data'])
|
||||||
|
self.assertEqual(curs.rowcount, 3)
|
||||||
|
|
||||||
|
curs.copy_expert(
|
||||||
|
"copy tcopy (data) from stdin",
|
||||||
|
StringIO('ddd\neee\n'))
|
||||||
|
self.assertEqual(curs.rowcount, 2)
|
||||||
|
|
||||||
|
curs.copy_to(StringIO(), "tcopy")
|
||||||
|
self.assertEqual(curs.rowcount, 5)
|
||||||
|
|
||||||
|
curs.execute("insert into tcopy (data) values ('fff')")
|
||||||
|
curs.copy_expert("copy tcopy to stdout", StringIO())
|
||||||
|
self.assertEqual(curs.rowcount, 6)
|
||||||
|
|
||||||
|
def test_copy_rowcount_error(self):
|
||||||
|
curs = self.conn.cursor()
|
||||||
|
|
||||||
|
curs.execute("insert into tcopy (data) values ('fff')")
|
||||||
|
self.assertEqual(curs.rowcount, 1)
|
||||||
|
|
||||||
|
self.assertRaises(psycopg2.DataError,
|
||||||
|
curs.copy_from, StringIO('aaa\nbbb\nccc\n'), 'tcopy')
|
||||||
|
self.assertEqual(curs.rowcount, -1)
|
||||||
|
|
||||||
|
|
||||||
decorate_all_tests(CopyTests, skip_copy_if_green)
|
decorate_all_tests(CopyTests, skip_copy_if_green)
|
||||||
|
|
||||||
|
|
|
@ -27,7 +27,7 @@ import psycopg2
|
||||||
import psycopg2.extensions
|
import psycopg2.extensions
|
||||||
from psycopg2.extensions import b
|
from psycopg2.extensions import b
|
||||||
from testutils import unittest, ConnectingTestCase, skip_before_postgres
|
from testutils import unittest, ConnectingTestCase, skip_before_postgres
|
||||||
from testutils import skip_if_no_namedtuple
|
from testutils import skip_if_no_namedtuple, skip_if_no_getrefcount
|
||||||
|
|
||||||
class CursorTests(ConnectingTestCase):
|
class CursorTests(ConnectingTestCase):
|
||||||
|
|
||||||
|
@ -97,6 +97,7 @@ class CursorTests(ConnectingTestCase):
|
||||||
self.assertEqual(b('SELECT 10.3;'),
|
self.assertEqual(b('SELECT 10.3;'),
|
||||||
cur.mogrify("SELECT %s;", (Decimal("10.3"),)))
|
cur.mogrify("SELECT %s;", (Decimal("10.3"),)))
|
||||||
|
|
||||||
|
@skip_if_no_getrefcount
|
||||||
def test_mogrify_leak_on_multiple_reference(self):
|
def test_mogrify_leak_on_multiple_reference(self):
|
||||||
# issue #81: reference leak when a parameter value is referenced
|
# issue #81: reference leak when a parameter value is referenced
|
||||||
# more than once from a dict.
|
# more than once from a dict.
|
||||||
|
@ -157,6 +158,7 @@ class CursorTests(ConnectingTestCase):
|
||||||
curs = self.conn.cursor()
|
curs = self.conn.cursor()
|
||||||
w = ref(curs)
|
w = ref(curs)
|
||||||
del curs
|
del curs
|
||||||
|
import gc; gc.collect()
|
||||||
self.assert_(w() is None)
|
self.assert_(w() is None)
|
||||||
|
|
||||||
def test_null_name(self):
|
def test_null_name(self):
|
||||||
|
@ -400,7 +402,7 @@ class CursorTests(ConnectingTestCase):
|
||||||
|
|
||||||
@skip_before_postgres(8, 0)
|
@skip_before_postgres(8, 0)
|
||||||
def test_scroll_named(self):
|
def test_scroll_named(self):
|
||||||
cur = self.conn.cursor()
|
cur = self.conn.cursor('tmp', scrollable=True)
|
||||||
cur.execute("select generate_series(0,9)")
|
cur.execute("select generate_series(0,9)")
|
||||||
cur.scroll(2)
|
cur.scroll(2)
|
||||||
self.assertEqual(cur.fetchone(), (2,))
|
self.assertEqual(cur.fetchone(), (2,))
|
||||||
|
@ -410,8 +412,20 @@ class CursorTests(ConnectingTestCase):
|
||||||
self.assertEqual(cur.fetchone(), (8,))
|
self.assertEqual(cur.fetchone(), (8,))
|
||||||
cur.scroll(9, mode='absolute')
|
cur.scroll(9, mode='absolute')
|
||||||
self.assertEqual(cur.fetchone(), (9,))
|
self.assertEqual(cur.fetchone(), (9,))
|
||||||
self.assertRaises((IndexError, psycopg2.ProgrammingError),
|
|
||||||
cur.scroll, 10, mode='absolute')
|
def test_bad_subclass(self):
|
||||||
|
# check that we get an error message instead of a segfault
|
||||||
|
# for badly written subclasses.
|
||||||
|
# see http://stackoverflow.com/questions/22019341/
|
||||||
|
class StupidCursor(psycopg2.extensions.cursor):
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
# I am stupid so not calling superclass init
|
||||||
|
pass
|
||||||
|
|
||||||
|
cur = StupidCursor()
|
||||||
|
self.assertRaises(psycopg2.InterfaceError, cur.execute, 'select 1')
|
||||||
|
self.assertRaises(psycopg2.InterfaceError, cur.executemany,
|
||||||
|
'select 1', [])
|
||||||
|
|
||||||
|
|
||||||
def test_suite():
|
def test_suite():
|
||||||
|
|
|
@ -213,6 +213,14 @@ class DatetimeTests(ConnectingTestCase, CommonDatetimeTestsMixin):
|
||||||
self.assertEqual(value.seconds, 41103)
|
self.assertEqual(value.seconds, 41103)
|
||||||
self.assertEqual(value.microseconds, 876544)
|
self.assertEqual(value.microseconds, 876544)
|
||||||
|
|
||||||
|
def test_parse_infinity(self):
|
||||||
|
value = self.DATETIME('-infinity', self.curs)
|
||||||
|
self.assertEqual(str(value), '0001-01-01 00:00:00')
|
||||||
|
value = self.DATETIME('infinity', self.curs)
|
||||||
|
self.assertEqual(str(value), '9999-12-31 23:59:59.999999')
|
||||||
|
value = self.DATE('infinity', self.curs)
|
||||||
|
self.assertEqual(str(value), '9999-12-31')
|
||||||
|
|
||||||
def test_adapt_date(self):
|
def test_adapt_date(self):
|
||||||
from datetime import date
|
from datetime import date
|
||||||
value = self.execute('select (%s)::date::text',
|
value = self.execute('select (%s)::date::text',
|
||||||
|
@ -240,7 +248,7 @@ class DatetimeTests(ConnectingTestCase, CommonDatetimeTestsMixin):
|
||||||
self.assertEqual(seconds, 3674096)
|
self.assertEqual(seconds, 3674096)
|
||||||
self.assertEqual(int(round((value - seconds) * 1000000)), 123456)
|
self.assertEqual(int(round((value - seconds) * 1000000)), 123456)
|
||||||
|
|
||||||
def test_adapt_megative_timedelta(self):
|
def test_adapt_negative_timedelta(self):
|
||||||
from datetime import timedelta
|
from datetime import timedelta
|
||||||
value = self.execute('select extract(epoch from (%s)::interval)',
|
value = self.execute('select extract(epoch from (%s)::interval)',
|
||||||
[timedelta(days=-42, seconds=45296,
|
[timedelta(days=-42, seconds=45296,
|
||||||
|
@ -428,7 +436,7 @@ class mxDateTimeTests(ConnectingTestCase, CommonDatetimeTestsMixin):
|
||||||
self.assertEqual(seconds, 3674096)
|
self.assertEqual(seconds, 3674096)
|
||||||
self.assertEqual(int(round((value - seconds) * 1000000)), 123456)
|
self.assertEqual(int(round((value - seconds) * 1000000)), 123456)
|
||||||
|
|
||||||
def test_adapt_megative_timedelta(self):
|
def test_adapt_negative_timedelta(self):
|
||||||
from mx.DateTime import DateTimeDeltaFrom
|
from mx.DateTime import DateTimeDeltaFrom
|
||||||
value = self.execute('select extract(epoch from (%s)::interval)',
|
value = self.execute('select extract(epoch from (%s)::interval)',
|
||||||
[DateTimeDeltaFrom(days=-42,
|
[DateTimeDeltaFrom(days=-42,
|
||||||
|
|
|
@ -77,6 +77,10 @@ class LargeObjectTests(LargeObjectTestCase):
|
||||||
self.assertNotEqual(lo, None)
|
self.assertNotEqual(lo, None)
|
||||||
self.assertEqual(lo.mode[0], "w")
|
self.assertEqual(lo.mode[0], "w")
|
||||||
|
|
||||||
|
def test_connection_needed(self):
|
||||||
|
self.assertRaises(TypeError,
|
||||||
|
psycopg2.extensions.lobject, [])
|
||||||
|
|
||||||
def test_open_non_existent(self):
|
def test_open_non_existent(self):
|
||||||
# By creating then removing a large object, we get an Oid that
|
# By creating then removing a large object, we get an Oid that
|
||||||
# should be unused.
|
# should be unused.
|
||||||
|
@ -126,6 +130,7 @@ class LargeObjectTests(LargeObjectTestCase):
|
||||||
|
|
||||||
self.assertRaises(psycopg2.OperationalError,
|
self.assertRaises(psycopg2.OperationalError,
|
||||||
self.conn.lobject, 0, "w", lo.oid)
|
self.conn.lobject, 0, "w", lo.oid)
|
||||||
|
self.assert_(not self.conn.closed)
|
||||||
|
|
||||||
def test_import(self):
|
def test_import(self):
|
||||||
self.tmpdir = tempfile.mkdtemp()
|
self.tmpdir = tempfile.mkdtemp()
|
||||||
|
@ -369,6 +374,12 @@ class LargeObjectTests(LargeObjectTestCase):
|
||||||
finally:
|
finally:
|
||||||
self.conn.tpc_commit()
|
self.conn.tpc_commit()
|
||||||
|
|
||||||
|
def test_large_oid(self):
|
||||||
|
# Test we don't overflow with an oid not fitting a signed int
|
||||||
|
try:
|
||||||
|
self.conn.lobject(0xFFFFFFFE)
|
||||||
|
except psycopg2.OperationalError:
|
||||||
|
pass
|
||||||
|
|
||||||
decorate_all_tests(LargeObjectTests, skip_if_no_lo, skip_lo_if_green)
|
decorate_all_tests(LargeObjectTests, skip_if_no_lo, skip_lo_if_green)
|
||||||
|
|
||||||
|
|
|
@ -199,7 +199,7 @@ class ExceptionsTestCase(ConnectingTestCase):
|
||||||
self.assertEqual(diag.sqlstate, '42P01')
|
self.assertEqual(diag.sqlstate, '42P01')
|
||||||
|
|
||||||
del diag
|
del diag
|
||||||
gc.collect()
|
gc.collect(); gc.collect()
|
||||||
assert(w() is None)
|
assert(w() is None)
|
||||||
|
|
||||||
@skip_copy_if_green
|
@skip_copy_if_green
|
||||||
|
@ -279,6 +279,21 @@ class ExceptionsTestCase(ConnectingTestCase):
|
||||||
self.assertEqual(e.pgcode, e1.pgcode)
|
self.assertEqual(e.pgcode, e1.pgcode)
|
||||||
self.assert_(e1.cursor is None)
|
self.assert_(e1.cursor is None)
|
||||||
|
|
||||||
|
@skip_before_python(2, 5)
|
||||||
|
def test_pickle_connection_error(self):
|
||||||
|
# segfaults on psycopg 2.5.1 - see ticket #170
|
||||||
|
import pickle
|
||||||
|
try:
|
||||||
|
psycopg2.connect('dbname=nosuchdatabasemate')
|
||||||
|
except psycopg2.Error, exc:
|
||||||
|
e = exc
|
||||||
|
|
||||||
|
e1 = pickle.loads(pickle.dumps(e))
|
||||||
|
|
||||||
|
self.assertEqual(e.pgerror, e1.pgerror)
|
||||||
|
self.assertEqual(e.pgcode, e1.pgcode)
|
||||||
|
self.assert_(e1.cursor is None)
|
||||||
|
|
||||||
|
|
||||||
def test_suite():
|
def test_suite():
|
||||||
return unittest.TestLoader().loadTestsFromName(__name__)
|
return unittest.TestLoader().loadTestsFromName(__name__)
|
||||||
|
|
|
@ -13,6 +13,7 @@
|
||||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
|
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
|
||||||
# License for more details.
|
# License for more details.
|
||||||
|
from __future__ import with_statement
|
||||||
|
|
||||||
import re
|
import re
|
||||||
import sys
|
import sys
|
||||||
|
@ -22,6 +23,7 @@ from functools import wraps
|
||||||
|
|
||||||
from testutils import unittest, skip_if_no_uuid, skip_before_postgres
|
from testutils import unittest, skip_if_no_uuid, skip_before_postgres
|
||||||
from testutils import ConnectingTestCase, decorate_all_tests
|
from testutils import ConnectingTestCase, decorate_all_tests
|
||||||
|
from testutils import py3_raises_typeerror
|
||||||
|
|
||||||
import psycopg2
|
import psycopg2
|
||||||
import psycopg2.extras
|
import psycopg2.extras
|
||||||
|
@ -1212,12 +1214,86 @@ class RangeTestCase(unittest.TestCase):
|
||||||
assert_not_equal(Range(10, 20), Range(11, 20))
|
assert_not_equal(Range(10, 20), Range(11, 20))
|
||||||
assert_not_equal(Range(10, 20, '[)'), Range(10, 20, '[]'))
|
assert_not_equal(Range(10, 20, '[)'), Range(10, 20, '[]'))
|
||||||
|
|
||||||
def test_not_ordered(self):
|
def test_eq_wrong_type(self):
|
||||||
from psycopg2.extras import Range
|
from psycopg2.extras import Range
|
||||||
self.assertRaises(TypeError, lambda: Range(empty=True) < Range(0,4))
|
self.assertNotEqual(Range(10, 20), ())
|
||||||
self.assertRaises(TypeError, lambda: Range(1,2) > Range(0,4))
|
|
||||||
self.assertRaises(TypeError, lambda: Range(1,2) <= Range())
|
def test_eq_subclass(self):
|
||||||
self.assertRaises(TypeError, lambda: Range(1,2) >= Range())
|
from psycopg2.extras import Range, NumericRange
|
||||||
|
|
||||||
|
class IntRange(NumericRange): pass
|
||||||
|
class PositiveIntRange(IntRange): pass
|
||||||
|
|
||||||
|
self.assertEqual(Range(10, 20), IntRange(10, 20))
|
||||||
|
self.assertEqual(PositiveIntRange(10, 20), IntRange(10, 20))
|
||||||
|
|
||||||
|
# as the postgres docs describe for the server-side stuff,
|
||||||
|
# ordering is rather arbitrary, but will remain stable
|
||||||
|
# and consistent.
|
||||||
|
|
||||||
|
def test_lt_ordering(self):
|
||||||
|
from psycopg2.extras import Range
|
||||||
|
self.assert_(Range(empty=True) < Range(0, 4))
|
||||||
|
self.assert_(not Range(1, 2) < Range(0, 4))
|
||||||
|
self.assert_(Range(0, 4) < Range(1, 2))
|
||||||
|
self.assert_(not Range(1, 2) < Range())
|
||||||
|
self.assert_(Range() < Range(1, 2))
|
||||||
|
self.assert_(not Range(1) < Range(upper=1))
|
||||||
|
self.assert_(not Range() < Range())
|
||||||
|
self.assert_(not Range(empty=True) < Range(empty=True))
|
||||||
|
self.assert_(not Range(1, 2) < Range(1, 2))
|
||||||
|
with py3_raises_typeerror():
|
||||||
|
self.assert_(1 < Range(1, 2))
|
||||||
|
with py3_raises_typeerror():
|
||||||
|
self.assert_(not Range(1, 2) < 1)
|
||||||
|
|
||||||
|
def test_gt_ordering(self):
|
||||||
|
from psycopg2.extras import Range
|
||||||
|
self.assert_(not Range(empty=True) > Range(0, 4))
|
||||||
|
self.assert_(Range(1, 2) > Range(0, 4))
|
||||||
|
self.assert_(not Range(0, 4) > Range(1, 2))
|
||||||
|
self.assert_(Range(1, 2) > Range())
|
||||||
|
self.assert_(not Range() > Range(1, 2))
|
||||||
|
self.assert_(Range(1) > Range(upper=1))
|
||||||
|
self.assert_(not Range() > Range())
|
||||||
|
self.assert_(not Range(empty=True) > Range(empty=True))
|
||||||
|
self.assert_(not Range(1, 2) > Range(1, 2))
|
||||||
|
with py3_raises_typeerror():
|
||||||
|
self.assert_(not 1 > Range(1, 2))
|
||||||
|
with py3_raises_typeerror():
|
||||||
|
self.assert_(Range(1, 2) > 1)
|
||||||
|
|
||||||
|
def test_le_ordering(self):
|
||||||
|
from psycopg2.extras import Range
|
||||||
|
self.assert_(Range(empty=True) <= Range(0, 4))
|
||||||
|
self.assert_(not Range(1, 2) <= Range(0, 4))
|
||||||
|
self.assert_(Range(0, 4) <= Range(1, 2))
|
||||||
|
self.assert_(not Range(1, 2) <= Range())
|
||||||
|
self.assert_(Range() <= Range(1, 2))
|
||||||
|
self.assert_(not Range(1) <= Range(upper=1))
|
||||||
|
self.assert_(Range() <= Range())
|
||||||
|
self.assert_(Range(empty=True) <= Range(empty=True))
|
||||||
|
self.assert_(Range(1, 2) <= Range(1, 2))
|
||||||
|
with py3_raises_typeerror():
|
||||||
|
self.assert_(1 <= Range(1, 2))
|
||||||
|
with py3_raises_typeerror():
|
||||||
|
self.assert_(not Range(1, 2) <= 1)
|
||||||
|
|
||||||
|
def test_ge_ordering(self):
|
||||||
|
from psycopg2.extras import Range
|
||||||
|
self.assert_(not Range(empty=True) >= Range(0, 4))
|
||||||
|
self.assert_(Range(1, 2) >= Range(0, 4))
|
||||||
|
self.assert_(not Range(0, 4) >= Range(1, 2))
|
||||||
|
self.assert_(Range(1, 2) >= Range())
|
||||||
|
self.assert_(not Range() >= Range(1, 2))
|
||||||
|
self.assert_(Range(1) >= Range(upper=1))
|
||||||
|
self.assert_(Range() >= Range())
|
||||||
|
self.assert_(Range(empty=True) >= Range(empty=True))
|
||||||
|
self.assert_(Range(1, 2) >= Range(1, 2))
|
||||||
|
with py3_raises_typeerror():
|
||||||
|
self.assert_(not 1 >= Range(1, 2))
|
||||||
|
with py3_raises_typeerror():
|
||||||
|
self.assert_(Range(1, 2) >= 1)
|
||||||
|
|
||||||
|
|
||||||
def skip_if_no_range(f):
|
def skip_if_no_range(f):
|
||||||
|
|
|
@ -293,6 +293,15 @@ def skip_if_green(reason):
|
||||||
|
|
||||||
skip_copy_if_green = skip_if_green("copy in async mode currently not supported")
|
skip_copy_if_green = skip_if_green("copy in async mode currently not supported")
|
||||||
|
|
||||||
|
def skip_if_no_getrefcount(f):
|
||||||
|
@wraps(f)
|
||||||
|
def skip_if_no_getrefcount_(self):
|
||||||
|
if not hasattr(sys, 'getrefcount'):
|
||||||
|
return self.skipTest('skipped, no sys.getrefcount()')
|
||||||
|
else:
|
||||||
|
return f(self)
|
||||||
|
return skip_if_no_getrefcount_
|
||||||
|
|
||||||
def script_to_py3(script):
|
def script_to_py3(script):
|
||||||
"""Convert a script to Python3 syntax if required."""
|
"""Convert a script to Python3 syntax if required."""
|
||||||
if sys.version_info[0] < 3:
|
if sys.version_info[0] < 3:
|
||||||
|
@ -320,3 +329,13 @@ def script_to_py3(script):
|
||||||
f2.close()
|
f2.close()
|
||||||
os.remove(filename)
|
os.remove(filename)
|
||||||
|
|
||||||
|
class py3_raises_typeerror(object):
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def __exit__(self, type, exc, tb):
|
||||||
|
if sys.version_info[0] >= 3:
|
||||||
|
assert type is TypeError
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue
Block a user