2010-07-09 18:23:02 +04:00
|
|
|
#!/usr/bin/env python
|
|
|
|
|
2011-01-07 04:44:19 +03:00
|
|
|
# test_cursor.py - unit test for cursor attributes
|
|
|
|
#
|
2019-02-17 04:34:52 +03:00
|
|
|
# Copyright (C) 2010-2019 Daniele Varrazzo <daniele.varrazzo@gmail.com>
|
2011-01-07 04:44:19 +03:00
|
|
|
#
|
|
|
|
# psycopg2 is free software: you can redistribute it and/or modify it
|
|
|
|
# under the terms of the GNU Lesser General Public License as published
|
|
|
|
# by the Free Software Foundation, either version 3 of the License, or
|
|
|
|
# (at your option) any later version.
|
|
|
|
#
|
|
|
|
# In addition, as a special exception, the copyright holders give
|
|
|
|
# permission to link this program with the OpenSSL library (or with
|
|
|
|
# modified versions of OpenSSL that use the same license as OpenSSL),
|
|
|
|
# and distribute linked combinations including the two.
|
|
|
|
#
|
|
|
|
# You must obey the GNU Lesser General Public License in all respects for
|
|
|
|
# all of the code used other than OpenSSL.
|
|
|
|
#
|
|
|
|
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
|
|
|
|
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
|
|
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
|
|
|
|
# License for more details.
|
|
|
|
|
2019-03-16 21:56:56 +03:00
|
|
|
import gc
|
|
|
|
import sys
|
2011-02-01 05:59:09 +03:00
|
|
|
import time
|
2019-02-16 19:30:34 +03:00
|
|
|
import ctypes
|
2014-10-01 23:57:47 +04:00
|
|
|
import pickle
|
2010-07-09 18:23:02 +04:00
|
|
|
import psycopg2
|
|
|
|
import psycopg2.extensions
|
2017-12-02 04:59:53 +03:00
|
|
|
import unittest
|
2019-03-16 18:30:15 +03:00
|
|
|
from datetime import date
|
|
|
|
from decimal import Decimal
|
|
|
|
from weakref import ref
|
2017-12-04 05:47:19 +03:00
|
|
|
from .testutils import (ConnectingTestCase, skip_before_postgres,
|
2017-11-27 00:41:22 +03:00
|
|
|
skip_if_no_getrefcount, slow, skip_if_no_superuser,
|
2017-12-12 07:08:18 +03:00
|
|
|
skip_if_windows)
|
2010-07-09 18:23:02 +04:00
|
|
|
|
Always detect when a connection is closed behind psycopg2's back.
There's a race condition that only seems to happen over Unix-domain
sockets. Sometimes, the closed socket is reported by the kernel to
libpq like this (captured with strace):
sendto(3, "Q\0\0\0\34select pg_backend_pid()\0", 29, MSG_NOSIGNAL, NULL, 0) = 29
recvfrom(3, "E\0\0\0mSFATAL\0C57P01\0Mterminating "..., 16384, 0, NULL, NULL) = 110
recvfrom(3, 0x12d0330, 16384, 0, 0, 0) = -1 ECONNRESET (Connection reset by peer)
That is, psycopg2/libpq sees no error when sending the first query
after the connection is closed, but gets an error reading the result.
In that case, everything worked fine.
But sometimes, the error manifests like this:
sendto(3, "Q\0\0\0\34select pg_backend_pid()\0", 29, MSG_NOSIGNAL, NULL, 0) = -1 EPIPE (Broken pipe)
recvfrom(3, "E\0\0\0mSFATAL\0C57P01\0Mterminating "..., 16384, 0, NULL, NULL) = 110
recvfrom(3, "", 16274, 0, NULL, NULL) = 0
recvfrom(3, "", 16274, 0, NULL, NULL) = 0
i.e. libpq received an error when sending the query. This manifests as
a slightly different exception from a slightly different place. More
importantly, in this case connection.closed is left at 0 rather than
being set to 2, and that is the bug I'm fixing here.
Note that we see almost identical behaviour for sync and async
connections, and the fixes are the same. So I added extremely similar
test cases.
Finally, there is still a bug here: for async connections, we
sometimes raise DatabaseError (incorrect) and sometimes raise
OperationalError (correct). Will fix that next.
2016-06-29 00:46:21 +03:00
|
|
|
import psycopg2.extras
|
2017-12-12 07:08:18 +03:00
|
|
|
from psycopg2.compat import text_type
|
Always detect when a connection is closed behind psycopg2's back.
There's a race condition that only seems to happen over Unix-domain
sockets. Sometimes, the closed socket is reported by the kernel to
libpq like this (captured with strace):
sendto(3, "Q\0\0\0\34select pg_backend_pid()\0", 29, MSG_NOSIGNAL, NULL, 0) = 29
recvfrom(3, "E\0\0\0mSFATAL\0C57P01\0Mterminating "..., 16384, 0, NULL, NULL) = 110
recvfrom(3, 0x12d0330, 16384, 0, 0, 0) = -1 ECONNRESET (Connection reset by peer)
That is, psycopg2/libpq sees no error when sending the first query
after the connection is closed, but gets an error reading the result.
In that case, everything worked fine.
But sometimes, the error manifests like this:
sendto(3, "Q\0\0\0\34select pg_backend_pid()\0", 29, MSG_NOSIGNAL, NULL, 0) = -1 EPIPE (Broken pipe)
recvfrom(3, "E\0\0\0mSFATAL\0C57P01\0Mterminating "..., 16384, 0, NULL, NULL) = 110
recvfrom(3, "", 16274, 0, NULL, NULL) = 0
recvfrom(3, "", 16274, 0, NULL, NULL) = 0
i.e. libpq received an error when sending the query. This manifests as
a slightly different exception from a slightly different place. More
importantly, in this case connection.closed is left at 0 rather than
being set to 2, and that is the bug I'm fixing here.
Note that we see almost identical behaviour for sync and async
connections, and the fixes are the same. So I added extremely similar
test cases.
Finally, there is still a bug here: for async connections, we
sometimes raise DatabaseError (incorrect) and sometimes raise
OperationalError (correct). Will fix that next.
2016-06-29 00:46:21 +03:00
|
|
|
|
2016-10-11 02:10:53 +03:00
|
|
|
|
2013-04-07 03:23:30 +04:00
|
|
|
class CursorTests(ConnectingTestCase):
|
2010-07-09 18:23:02 +04:00
|
|
|
|
2012-03-04 09:03:15 +04:00
|
|
|
def test_close_idempotent(self):
|
|
|
|
cur = self.conn.cursor()
|
|
|
|
cur.close()
|
|
|
|
cur.close()
|
|
|
|
self.assert_(cur.closed)
|
|
|
|
|
2011-03-04 23:30:43 +03:00
|
|
|
def test_empty_query(self):
|
|
|
|
cur = self.conn.cursor()
|
|
|
|
self.assertRaises(psycopg2.ProgrammingError, cur.execute, "")
|
|
|
|
self.assertRaises(psycopg2.ProgrammingError, cur.execute, " ")
|
|
|
|
self.assertRaises(psycopg2.ProgrammingError, cur.execute, ";")
|
|
|
|
|
2010-07-09 18:23:02 +04:00
|
|
|
def test_executemany_propagate_exceptions(self):
|
2010-11-28 19:00:32 +03:00
|
|
|
conn = self.conn
|
2010-07-09 18:23:02 +04:00
|
|
|
cur = conn.cursor()
|
|
|
|
cur.execute("create temp table test_exc (data int);")
|
2016-10-11 02:10:53 +03:00
|
|
|
|
2010-07-09 18:23:02 +04:00
|
|
|
def buggygen():
|
2016-10-11 02:10:53 +03:00
|
|
|
yield 1 // 0
|
|
|
|
|
2010-07-09 18:23:02 +04:00
|
|
|
self.assertRaises(ZeroDivisionError,
|
|
|
|
cur.executemany, "insert into test_exc values (%s)", buggygen())
|
|
|
|
cur.close()
|
|
|
|
|
2010-10-05 06:13:44 +04:00
|
|
|
def test_mogrify_unicode(self):
|
2010-11-28 19:00:32 +03:00
|
|
|
conn = self.conn
|
2010-10-05 06:13:44 +04:00
|
|
|
cur = conn.cursor()
|
|
|
|
|
|
|
|
# test consistency between execute and mogrify.
|
|
|
|
|
|
|
|
# unicode query containing only ascii data
|
|
|
|
cur.execute(u"SELECT 'foo';")
|
|
|
|
self.assertEqual('foo', cur.fetchone()[0])
|
2016-08-15 03:55:57 +03:00
|
|
|
self.assertEqual(b"SELECT 'foo';", cur.mogrify(u"SELECT 'foo';"))
|
2010-10-05 06:13:44 +04:00
|
|
|
|
|
|
|
conn.set_client_encoding('UTF8')
|
|
|
|
snowman = u"\u2603"
|
|
|
|
|
2016-10-11 02:22:23 +03:00
|
|
|
def b(s):
|
2017-12-12 07:08:18 +03:00
|
|
|
if isinstance(s, text_type):
|
2016-10-11 02:22:23 +03:00
|
|
|
return s.encode('utf8')
|
|
|
|
else:
|
|
|
|
return s
|
|
|
|
|
2010-10-05 06:13:44 +04:00
|
|
|
# unicode query with non-ascii data
|
|
|
|
cur.execute(u"SELECT '%s';" % snowman)
|
2010-12-29 05:47:29 +03:00
|
|
|
self.assertEqual(snowman.encode('utf8'), b(cur.fetchone()[0]))
|
2017-02-06 21:43:39 +03:00
|
|
|
self.assertQuotedEqual(("SELECT '%s';" % snowman).encode('utf8'),
|
|
|
|
cur.mogrify(u"SELECT '%s';" % snowman))
|
2010-10-05 06:13:44 +04:00
|
|
|
|
|
|
|
# unicode args
|
|
|
|
cur.execute("SELECT %s;", (snowman,))
|
2010-12-29 05:47:29 +03:00
|
|
|
self.assertEqual(snowman.encode("utf-8"), b(cur.fetchone()[0]))
|
2017-02-06 21:43:39 +03:00
|
|
|
self.assertQuotedEqual(("SELECT '%s';" % snowman).encode('utf8'),
|
|
|
|
cur.mogrify("SELECT %s;", (snowman,)))
|
2010-10-05 06:13:44 +04:00
|
|
|
|
|
|
|
# unicode query and args
|
|
|
|
cur.execute(u"SELECT %s;", (snowman,))
|
2010-12-29 05:47:29 +03:00
|
|
|
self.assertEqual(snowman.encode("utf-8"), b(cur.fetchone()[0]))
|
2017-02-06 21:43:39 +03:00
|
|
|
self.assertQuotedEqual(("SELECT '%s';" % snowman).encode('utf8'),
|
|
|
|
cur.mogrify(u"SELECT %s;", (snowman,)))
|
2010-07-09 18:23:02 +04:00
|
|
|
|
2010-11-06 05:24:28 +03:00
|
|
|
def test_mogrify_decimal_explodes(self):
|
2010-11-28 19:00:32 +03:00
|
|
|
conn = self.conn
|
2010-11-06 05:24:28 +03:00
|
|
|
cur = conn.cursor()
|
2016-08-15 03:55:57 +03:00
|
|
|
self.assertEqual(b'SELECT 10.3;',
|
2010-11-06 05:24:28 +03:00
|
|
|
cur.mogrify("SELECT %s;", (Decimal("10.3"),)))
|
|
|
|
|
2013-05-06 13:39:24 +04:00
|
|
|
@skip_if_no_getrefcount
|
2011-12-11 06:52:06 +04:00
|
|
|
def test_mogrify_leak_on_multiple_reference(self):
|
|
|
|
# issue #81: reference leak when a parameter value is referenced
|
|
|
|
# more than once from a dict.
|
|
|
|
cur = self.conn.cursor()
|
2016-10-11 02:10:53 +03:00
|
|
|
foo = (lambda x: x)('foo') * 10
|
2011-12-11 06:52:06 +04:00
|
|
|
nref1 = sys.getrefcount(foo)
|
|
|
|
cur.mogrify("select %(foo)s, %(foo)s, %(foo)s", {'foo': foo})
|
|
|
|
nref2 = sys.getrefcount(foo)
|
|
|
|
self.assertEqual(nref1, nref2)
|
|
|
|
|
2018-01-11 02:06:31 +03:00
|
|
|
def test_modify_closed(self):
|
|
|
|
cur = self.conn.cursor()
|
|
|
|
cur.close()
|
|
|
|
sql = cur.mogrify("select %s", (10,))
|
|
|
|
self.assertEqual(sql, b"select 10")
|
|
|
|
|
2011-02-18 02:18:05 +03:00
|
|
|
def test_bad_placeholder(self):
|
|
|
|
cur = self.conn.cursor()
|
|
|
|
self.assertRaises(psycopg2.ProgrammingError,
|
|
|
|
cur.mogrify, "select %(foo", {})
|
|
|
|
self.assertRaises(psycopg2.ProgrammingError,
|
|
|
|
cur.mogrify, "select %(foo", {'foo': 1})
|
|
|
|
self.assertRaises(psycopg2.ProgrammingError,
|
|
|
|
cur.mogrify, "select %(foo, %(bar)", {'foo': 1})
|
|
|
|
self.assertRaises(psycopg2.ProgrammingError,
|
|
|
|
cur.mogrify, "select %(foo, %(bar)", {'foo': 1, 'bar': 2})
|
|
|
|
|
2011-01-02 00:55:10 +03:00
|
|
|
def test_cast(self):
|
|
|
|
curs = self.conn.cursor()
|
|
|
|
|
|
|
|
self.assertEqual(42, curs.cast(20, '42'))
|
|
|
|
self.assertAlmostEqual(3.14, curs.cast(700, '3.14'))
|
|
|
|
|
2017-11-27 04:55:24 +03:00
|
|
|
self.assertEqual(Decimal('123.45'), curs.cast(1700, '123.45'))
|
2011-01-02 00:55:10 +03:00
|
|
|
|
2016-10-11 02:10:53 +03:00
|
|
|
self.assertEqual(date(2011, 1, 2), curs.cast(1082, '2011-01-02'))
|
2011-01-02 00:55:10 +03:00
|
|
|
self.assertEqual("who am i?", curs.cast(705, 'who am i?')) # unknown
|
|
|
|
|
|
|
|
def test_cast_specificity(self):
|
|
|
|
curs = self.conn.cursor()
|
|
|
|
self.assertEqual("foo", curs.cast(705, 'foo'))
|
|
|
|
|
|
|
|
D = psycopg2.extensions.new_type((705,), "DOUBLING", lambda v, c: v * 2)
|
|
|
|
psycopg2.extensions.register_type(D, self.conn)
|
|
|
|
self.assertEqual("foofoo", curs.cast(705, 'foo'))
|
|
|
|
|
|
|
|
T = psycopg2.extensions.new_type((705,), "TREBLING", lambda v, c: v * 3)
|
|
|
|
psycopg2.extensions.register_type(T, curs)
|
|
|
|
self.assertEqual("foofoofoo", curs.cast(705, 'foo'))
|
|
|
|
|
|
|
|
curs2 = self.conn.cursor()
|
|
|
|
self.assertEqual("foofoo", curs2.cast(705, 'foo'))
|
|
|
|
|
2011-01-03 14:47:01 +03:00
|
|
|
def test_weakref(self):
|
|
|
|
curs = self.conn.cursor()
|
|
|
|
w = ref(curs)
|
|
|
|
del curs
|
2016-10-11 02:10:53 +03:00
|
|
|
gc.collect()
|
2011-01-03 14:47:01 +03:00
|
|
|
self.assert_(w() is None)
|
|
|
|
|
2012-04-11 20:32:10 +04:00
|
|
|
def test_null_name(self):
|
|
|
|
curs = self.conn.cursor(None)
|
|
|
|
self.assertEqual(curs.name, None)
|
|
|
|
|
2011-02-23 04:53:25 +03:00
|
|
|
def test_invalid_name(self):
|
|
|
|
curs = self.conn.cursor()
|
|
|
|
curs.execute("create temp table invname (data int);")
|
2016-10-11 02:10:53 +03:00
|
|
|
for i in (10, 20, 30):
|
2011-02-23 11:43:01 +03:00
|
|
|
curs.execute("insert into invname values (%s)", (i,))
|
2011-02-23 04:53:25 +03:00
|
|
|
curs.close()
|
|
|
|
|
|
|
|
curs = self.conn.cursor(r'1-2-3 \ "test"')
|
|
|
|
curs.execute("select data from invname order by data")
|
|
|
|
self.assertEqual(curs.fetchall(), [(10,), (20,), (30,)])
|
2011-10-05 03:12:35 +04:00
|
|
|
|
2014-08-21 08:01:34 +04:00
|
|
|
def _create_withhold_table(self):
|
2011-07-05 12:28:34 +04:00
|
|
|
curs = self.conn.cursor()
|
2011-10-05 03:12:35 +04:00
|
|
|
try:
|
|
|
|
curs.execute("drop table withhold")
|
|
|
|
except psycopg2.ProgrammingError:
|
|
|
|
self.conn.rollback()
|
2011-07-05 12:28:34 +04:00
|
|
|
curs.execute("create table withhold (data int)")
|
|
|
|
for i in (10, 20, 30):
|
|
|
|
curs.execute("insert into withhold values (%s)", (i,))
|
|
|
|
curs.close()
|
|
|
|
|
2014-08-21 08:01:34 +04:00
|
|
|
def test_withhold(self):
|
|
|
|
self.assertRaises(psycopg2.ProgrammingError, self.conn.cursor,
|
|
|
|
withhold=True)
|
|
|
|
|
|
|
|
self._create_withhold_table()
|
2011-07-05 12:28:34 +04:00
|
|
|
curs = self.conn.cursor("W")
|
2016-10-11 02:10:53 +03:00
|
|
|
self.assertEqual(curs.withhold, False)
|
2011-07-05 12:28:34 +04:00
|
|
|
curs.withhold = True
|
2016-10-11 02:10:53 +03:00
|
|
|
self.assertEqual(curs.withhold, True)
|
2011-07-05 12:28:34 +04:00
|
|
|
curs.execute("select data from withhold order by data")
|
|
|
|
self.conn.commit()
|
|
|
|
self.assertEqual(curs.fetchall(), [(10,), (20,), (30,)])
|
2011-08-10 20:25:46 +04:00
|
|
|
curs.close()
|
|
|
|
|
|
|
|
curs = self.conn.cursor("W", withhold=True)
|
2016-10-11 02:10:53 +03:00
|
|
|
self.assertEqual(curs.withhold, True)
|
2011-08-10 20:25:46 +04:00
|
|
|
curs.execute("select data from withhold order by data")
|
|
|
|
self.conn.commit()
|
|
|
|
self.assertEqual(curs.fetchall(), [(10,), (20,), (30,)])
|
2011-10-05 03:12:35 +04:00
|
|
|
|
2011-07-05 12:28:34 +04:00
|
|
|
curs = self.conn.cursor()
|
|
|
|
curs.execute("drop table withhold")
|
|
|
|
self.conn.commit()
|
2011-02-23 04:53:25 +03:00
|
|
|
|
2014-08-21 08:01:34 +04:00
|
|
|
def test_withhold_no_begin(self):
|
|
|
|
self._create_withhold_table()
|
|
|
|
curs = self.conn.cursor("w", withhold=True)
|
|
|
|
curs.execute("select data from withhold order by data")
|
|
|
|
self.assertEqual(curs.fetchone(), (10,))
|
|
|
|
self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_BEGIN)
|
2018-10-13 05:28:42 +03:00
|
|
|
self.assertEqual(self.conn.info.transaction_status,
|
2014-08-21 08:01:34 +04:00
|
|
|
psycopg2.extensions.TRANSACTION_STATUS_INTRANS)
|
|
|
|
|
|
|
|
self.conn.commit()
|
|
|
|
self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY)
|
2018-10-13 05:28:42 +03:00
|
|
|
self.assertEqual(self.conn.info.transaction_status,
|
2014-08-21 08:01:34 +04:00
|
|
|
psycopg2.extensions.TRANSACTION_STATUS_IDLE)
|
|
|
|
|
|
|
|
self.assertEqual(curs.fetchone(), (20,))
|
|
|
|
self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY)
|
2018-10-13 05:28:42 +03:00
|
|
|
self.assertEqual(self.conn.info.transaction_status,
|
2014-08-21 08:01:34 +04:00
|
|
|
psycopg2.extensions.TRANSACTION_STATUS_IDLE)
|
|
|
|
|
|
|
|
curs.close()
|
|
|
|
self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY)
|
2018-10-13 05:28:42 +03:00
|
|
|
self.assertEqual(self.conn.info.transaction_status,
|
2014-08-21 08:01:34 +04:00
|
|
|
psycopg2.extensions.TRANSACTION_STATUS_IDLE)
|
|
|
|
|
2014-08-21 08:14:20 +04:00
|
|
|
def test_withhold_autocommit(self):
|
|
|
|
self._create_withhold_table()
|
|
|
|
self.conn.commit()
|
|
|
|
self.conn.autocommit = True
|
|
|
|
curs = self.conn.cursor("w", withhold=True)
|
|
|
|
curs.execute("select data from withhold order by data")
|
|
|
|
|
|
|
|
self.assertEqual(curs.fetchone(), (10,))
|
|
|
|
self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY)
|
2018-10-13 05:28:42 +03:00
|
|
|
self.assertEqual(self.conn.info.transaction_status,
|
2014-08-21 08:14:20 +04:00
|
|
|
psycopg2.extensions.TRANSACTION_STATUS_IDLE)
|
|
|
|
|
|
|
|
self.conn.commit()
|
|
|
|
self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY)
|
2018-10-13 05:28:42 +03:00
|
|
|
self.assertEqual(self.conn.info.transaction_status,
|
2014-08-21 08:14:20 +04:00
|
|
|
psycopg2.extensions.TRANSACTION_STATUS_IDLE)
|
|
|
|
|
|
|
|
curs.close()
|
|
|
|
self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY)
|
2018-10-13 05:28:42 +03:00
|
|
|
self.assertEqual(self.conn.info.transaction_status,
|
2014-08-21 08:14:20 +04:00
|
|
|
psycopg2.extensions.TRANSACTION_STATUS_IDLE)
|
|
|
|
|
2012-08-15 04:11:26 +04:00
|
|
|
def test_scrollable(self):
|
|
|
|
self.assertRaises(psycopg2.ProgrammingError, self.conn.cursor,
|
|
|
|
scrollable=True)
|
|
|
|
|
|
|
|
curs = self.conn.cursor()
|
|
|
|
curs.execute("create table scrollable (data int)")
|
|
|
|
curs.executemany("insert into scrollable values (%s)",
|
2016-10-11 02:10:53 +03:00
|
|
|
[(i,) for i in range(100)])
|
2012-08-15 04:11:26 +04:00
|
|
|
curs.close()
|
|
|
|
|
|
|
|
for t in range(2):
|
|
|
|
if not t:
|
|
|
|
curs = self.conn.cursor("S")
|
2016-10-11 02:10:53 +03:00
|
|
|
self.assertEqual(curs.scrollable, None)
|
2012-08-15 04:11:26 +04:00
|
|
|
curs.scrollable = True
|
|
|
|
else:
|
|
|
|
curs = self.conn.cursor("S", scrollable=True)
|
|
|
|
|
2016-10-11 02:10:53 +03:00
|
|
|
self.assertEqual(curs.scrollable, True)
|
2012-08-15 04:11:26 +04:00
|
|
|
curs.itersize = 10
|
|
|
|
|
|
|
|
# complex enough to make postgres cursors declare without
|
|
|
|
# scroll/no scroll to fail
|
|
|
|
curs.execute("""
|
|
|
|
select x.data
|
|
|
|
from scrollable x
|
|
|
|
join scrollable y on x.data = y.data
|
|
|
|
order by y.data""")
|
|
|
|
for i, (n,) in enumerate(curs):
|
|
|
|
self.assertEqual(i, n)
|
|
|
|
|
|
|
|
curs.scroll(-1)
|
|
|
|
for i in range(99, -1, -1):
|
|
|
|
curs.scroll(-1)
|
|
|
|
self.assertEqual(i, curs.fetchone()[0])
|
|
|
|
curs.scroll(-1)
|
|
|
|
|
|
|
|
curs.close()
|
|
|
|
|
2012-08-15 12:44:44 +04:00
|
|
|
def test_not_scrollable(self):
|
|
|
|
self.assertRaises(psycopg2.ProgrammingError, self.conn.cursor,
|
|
|
|
scrollable=False)
|
|
|
|
|
|
|
|
curs = self.conn.cursor()
|
|
|
|
curs.execute("create table scrollable (data int)")
|
|
|
|
curs.executemany("insert into scrollable values (%s)",
|
2016-10-11 02:10:53 +03:00
|
|
|
[(i,) for i in range(100)])
|
2012-08-15 12:44:44 +04:00
|
|
|
curs.close()
|
|
|
|
|
|
|
|
curs = self.conn.cursor("S") # default scrollability
|
|
|
|
curs.execute("select * from scrollable")
|
|
|
|
self.assertEqual(curs.scrollable, None)
|
|
|
|
curs.scroll(2)
|
|
|
|
try:
|
|
|
|
curs.scroll(-1)
|
|
|
|
except psycopg2.OperationalError:
|
|
|
|
return self.skipTest("can't evaluate non-scrollable cursor")
|
|
|
|
curs.close()
|
|
|
|
|
|
|
|
curs = self.conn.cursor("S", scrollable=False)
|
|
|
|
self.assertEqual(curs.scrollable, False)
|
|
|
|
curs.execute("select * from scrollable")
|
|
|
|
curs.scroll(2)
|
|
|
|
self.assertRaises(psycopg2.OperationalError, curs.scroll, -1)
|
|
|
|
|
2017-02-02 04:53:50 +03:00
|
|
|
@slow
|
2011-02-15 20:11:07 +03:00
|
|
|
@skip_before_postgres(8, 2)
|
2011-02-01 05:59:09 +03:00
|
|
|
def test_iter_named_cursor_efficient(self):
|
|
|
|
curs = self.conn.cursor('tmp')
|
|
|
|
# if these records are fetched in the same roundtrip their
|
|
|
|
# timestamp will not be influenced by the pause in Python world.
|
|
|
|
curs.execute("""select clock_timestamp() from generate_series(1,2)""")
|
|
|
|
i = iter(curs)
|
2017-12-02 06:53:30 +03:00
|
|
|
t1 = next(i)[0]
|
2011-02-01 05:59:09 +03:00
|
|
|
time.sleep(0.2)
|
2017-12-02 06:53:30 +03:00
|
|
|
t2 = next(i)[0]
|
2011-02-01 05:59:09 +03:00
|
|
|
self.assert_((t2 - t1).microseconds * 1e-6 < 0.1,
|
|
|
|
"named cursor records fetched in 2 roundtrips (delta: %s)"
|
|
|
|
% (t2 - t1))
|
|
|
|
|
2011-02-15 20:11:07 +03:00
|
|
|
@skip_before_postgres(8, 0)
|
2011-02-17 15:29:07 +03:00
|
|
|
def test_iter_named_cursor_default_itersize(self):
|
2011-02-04 19:29:29 +03:00
|
|
|
curs = self.conn.cursor('tmp')
|
|
|
|
curs.execute('select generate_series(1,50)')
|
2016-10-11 02:10:53 +03:00
|
|
|
rv = [(r[0], curs.rownumber) for r in curs]
|
2011-02-04 19:29:29 +03:00
|
|
|
# everything swallowed in one gulp
|
2016-10-11 02:10:53 +03:00
|
|
|
self.assertEqual(rv, [(i, i) for i in range(1, 51)])
|
2011-02-04 19:29:29 +03:00
|
|
|
|
2011-02-15 20:11:07 +03:00
|
|
|
@skip_before_postgres(8, 0)
|
2011-02-17 15:29:07 +03:00
|
|
|
def test_iter_named_cursor_itersize(self):
|
2011-02-04 19:29:29 +03:00
|
|
|
curs = self.conn.cursor('tmp')
|
2011-02-17 15:29:07 +03:00
|
|
|
curs.itersize = 30
|
2011-02-04 19:29:29 +03:00
|
|
|
curs.execute('select generate_series(1,50)')
|
2016-10-11 02:10:53 +03:00
|
|
|
rv = [(r[0], curs.rownumber) for r in curs]
|
2011-02-04 19:29:29 +03:00
|
|
|
# everything swallowed in two gulps
|
2016-10-11 02:10:53 +03:00
|
|
|
self.assertEqual(rv, [(i, ((i - 1) % 30) + 1) for i in range(1, 51)])
|
2011-02-04 19:29:29 +03:00
|
|
|
|
2012-02-24 02:04:22 +04:00
|
|
|
@skip_before_postgres(8, 0)
|
|
|
|
def test_iter_named_cursor_rownumber(self):
|
|
|
|
curs = self.conn.cursor('tmp')
|
|
|
|
# note: this fails if itersize < dataset: internally we check
|
|
|
|
# rownumber == rowcount to detect when to read anoter page, so we
|
|
|
|
# would need an extra attribute to have a monotonic rownumber.
|
|
|
|
curs.itersize = 20
|
|
|
|
curs.execute('select generate_series(1,10)')
|
|
|
|
for i, rec in enumerate(curs):
|
|
|
|
self.assertEqual(i + 1, curs.rownumber)
|
|
|
|
|
2018-10-11 05:36:36 +03:00
|
|
|
def test_description_attribs(self):
|
2011-02-19 03:05:43 +03:00
|
|
|
curs = self.conn.cursor()
|
|
|
|
curs.execute("""select
|
|
|
|
3.14::decimal(10,2) as pi,
|
|
|
|
'hello'::text as hi,
|
|
|
|
'2010-02-18'::date as now;
|
|
|
|
""")
|
|
|
|
self.assertEqual(len(curs.description), 3)
|
|
|
|
for c in curs.description:
|
|
|
|
self.assertEqual(len(c), 7) # DBAPI happy
|
|
|
|
for a in ('name', 'type_code', 'display_size', 'internal_size',
|
|
|
|
'precision', 'scale', 'null_ok'):
|
|
|
|
self.assert_(hasattr(c, a), a)
|
|
|
|
|
|
|
|
c = curs.description[0]
|
|
|
|
self.assertEqual(c.name, 'pi')
|
|
|
|
self.assert_(c.type_code in psycopg2.extensions.DECIMAL.values)
|
|
|
|
self.assert_(c.internal_size > 0)
|
|
|
|
self.assertEqual(c.precision, 10)
|
|
|
|
self.assertEqual(c.scale, 2)
|
|
|
|
|
|
|
|
c = curs.description[1]
|
|
|
|
self.assertEqual(c.name, 'hi')
|
|
|
|
self.assert_(c.type_code in psycopg2.STRING.values)
|
|
|
|
self.assert_(c.internal_size < 0)
|
|
|
|
self.assertEqual(c.precision, None)
|
|
|
|
self.assertEqual(c.scale, None)
|
|
|
|
|
|
|
|
c = curs.description[2]
|
|
|
|
self.assertEqual(c.name, 'now')
|
|
|
|
self.assert_(c.type_code in psycopg2.extensions.DATE.values)
|
|
|
|
self.assert_(c.internal_size > 0)
|
|
|
|
self.assertEqual(c.precision, None)
|
|
|
|
self.assertEqual(c.scale, None)
|
|
|
|
|
2018-10-11 05:36:36 +03:00
|
|
|
def test_description_extra_attribs(self):
|
|
|
|
curs = self.conn.cursor()
|
|
|
|
curs.execute("""
|
|
|
|
create table testcol (
|
|
|
|
pi decimal(10,2),
|
|
|
|
hi text)
|
|
|
|
""")
|
|
|
|
curs.execute("select oid from pg_class where relname = %s", ('testcol',))
|
|
|
|
oid = curs.fetchone()[0]
|
|
|
|
|
|
|
|
curs.execute("insert into testcol values (3.14, 'hello')")
|
|
|
|
curs.execute("select hi, pi, 42 from testcol")
|
|
|
|
self.assertEqual(curs.description[0].table_oid, oid)
|
|
|
|
self.assertEqual(curs.description[0].table_column, 2)
|
|
|
|
|
|
|
|
self.assertEqual(curs.description[1].table_oid, oid)
|
|
|
|
self.assertEqual(curs.description[1].table_column, 1)
|
|
|
|
|
|
|
|
self.assertEqual(curs.description[2].table_oid, None)
|
|
|
|
self.assertEqual(curs.description[2].table_column, None)
|
|
|
|
|
2014-10-01 23:57:47 +04:00
|
|
|
def test_pickle_description(self):
|
|
|
|
curs = self.conn.cursor()
|
|
|
|
curs.execute('SELECT 1 AS foo')
|
|
|
|
description = curs.description
|
|
|
|
|
|
|
|
pickled = pickle.dumps(description, pickle.HIGHEST_PROTOCOL)
|
|
|
|
unpickled = pickle.loads(pickled)
|
|
|
|
|
|
|
|
self.assertEqual(description, unpickled)
|
|
|
|
|
2011-10-15 02:17:24 +04:00
|
|
|
@skip_before_postgres(8, 0)
|
|
|
|
def test_named_cursor_stealing(self):
|
|
|
|
# you can use a named cursor to iterate on a refcursor created
|
|
|
|
# somewhere else
|
|
|
|
cur1 = self.conn.cursor()
|
|
|
|
cur1.execute("DECLARE test CURSOR WITHOUT HOLD "
|
|
|
|
" FOR SELECT generate_series(1,7)")
|
|
|
|
|
|
|
|
cur2 = self.conn.cursor('test')
|
|
|
|
# can call fetch without execute
|
|
|
|
self.assertEqual((1,), cur2.fetchone())
|
|
|
|
self.assertEqual([(2,), (3,), (4,)], cur2.fetchmany(3))
|
|
|
|
self.assertEqual([(5,), (6,), (7,)], cur2.fetchall())
|
|
|
|
|
2018-07-24 21:02:13 +03:00
|
|
|
@skip_before_postgres(8, 2)
|
2018-05-20 15:56:59 +03:00
|
|
|
def test_named_noop_close(self):
|
|
|
|
cur = self.conn.cursor('test')
|
|
|
|
cur.close()
|
|
|
|
|
2018-07-21 20:32:02 +03:00
|
|
|
@skip_before_postgres(8, 2)
|
|
|
|
def test_stolen_named_cursor_close(self):
|
|
|
|
cur1 = self.conn.cursor()
|
|
|
|
cur1.execute("DECLARE test CURSOR WITHOUT HOLD "
|
|
|
|
" FOR SELECT generate_series(1,7)")
|
|
|
|
cur2 = self.conn.cursor('test')
|
|
|
|
cur2.close()
|
|
|
|
|
|
|
|
cur1.execute("DECLARE test CURSOR WITHOUT HOLD "
|
|
|
|
" FOR SELECT generate_series(1,7)")
|
|
|
|
cur2 = self.conn.cursor('test')
|
|
|
|
cur2.close()
|
|
|
|
|
2011-12-26 23:06:23 +04:00
|
|
|
@skip_before_postgres(8, 0)
|
|
|
|
def test_scroll(self):
|
|
|
|
cur = self.conn.cursor()
|
|
|
|
cur.execute("select generate_series(0,9)")
|
|
|
|
cur.scroll(2)
|
|
|
|
self.assertEqual(cur.fetchone(), (2,))
|
|
|
|
cur.scroll(2)
|
|
|
|
self.assertEqual(cur.fetchone(), (5,))
|
|
|
|
cur.scroll(2, mode='relative')
|
|
|
|
self.assertEqual(cur.fetchone(), (8,))
|
|
|
|
cur.scroll(-1)
|
|
|
|
self.assertEqual(cur.fetchone(), (8,))
|
|
|
|
cur.scroll(-2)
|
|
|
|
self.assertEqual(cur.fetchone(), (7,))
|
|
|
|
cur.scroll(2, mode='absolute')
|
|
|
|
self.assertEqual(cur.fetchone(), (2,))
|
|
|
|
|
|
|
|
# on the boundary
|
|
|
|
cur.scroll(0, mode='absolute')
|
|
|
|
self.assertEqual(cur.fetchone(), (0,))
|
|
|
|
self.assertRaises((IndexError, psycopg2.ProgrammingError),
|
|
|
|
cur.scroll, -1, mode='absolute')
|
|
|
|
cur.scroll(0, mode='absolute')
|
|
|
|
self.assertRaises((IndexError, psycopg2.ProgrammingError),
|
|
|
|
cur.scroll, -1)
|
|
|
|
|
|
|
|
cur.scroll(9, mode='absolute')
|
|
|
|
self.assertEqual(cur.fetchone(), (9,))
|
|
|
|
self.assertRaises((IndexError, psycopg2.ProgrammingError),
|
|
|
|
cur.scroll, 10, mode='absolute')
|
|
|
|
cur.scroll(9, mode='absolute')
|
|
|
|
self.assertRaises((IndexError, psycopg2.ProgrammingError),
|
|
|
|
cur.scroll, 1)
|
|
|
|
|
|
|
|
@skip_before_postgres(8, 0)
|
|
|
|
def test_scroll_named(self):
|
2013-10-16 22:09:51 +04:00
|
|
|
cur = self.conn.cursor('tmp', scrollable=True)
|
2011-12-26 23:06:23 +04:00
|
|
|
cur.execute("select generate_series(0,9)")
|
|
|
|
cur.scroll(2)
|
|
|
|
self.assertEqual(cur.fetchone(), (2,))
|
|
|
|
cur.scroll(2)
|
|
|
|
self.assertEqual(cur.fetchone(), (5,))
|
|
|
|
cur.scroll(2, mode='relative')
|
|
|
|
self.assertEqual(cur.fetchone(), (8,))
|
|
|
|
cur.scroll(9, mode='absolute')
|
|
|
|
self.assertEqual(cur.fetchone(), (9,))
|
|
|
|
|
2014-02-26 23:37:59 +04:00
|
|
|
def test_bad_subclass(self):
|
|
|
|
# check that we get an error message instead of a segfault
|
|
|
|
# for badly written subclasses.
|
2018-09-23 04:54:55 +03:00
|
|
|
# see https://stackoverflow.com/questions/22019341/
|
2014-02-26 23:37:59 +04:00
|
|
|
class StupidCursor(psycopg2.extensions.cursor):
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
|
|
# I am stupid so not calling superclass init
|
|
|
|
pass
|
|
|
|
|
|
|
|
cur = StupidCursor()
|
|
|
|
self.assertRaises(psycopg2.InterfaceError, cur.execute, 'select 1')
|
|
|
|
self.assertRaises(psycopg2.InterfaceError, cur.executemany,
|
|
|
|
'select 1', [])
|
|
|
|
|
2014-06-05 04:32:53 +04:00
|
|
|
def test_callproc_badparam(self):
|
|
|
|
cur = self.conn.cursor()
|
|
|
|
self.assertRaises(TypeError, cur.callproc, 'lower', 42)
|
|
|
|
|
2014-05-31 04:34:19 +04:00
|
|
|
# It would be inappropriate to test callproc's named parameters in the
|
|
|
|
# DBAPI2.0 test section because they are a psycopg2 extension.
|
|
|
|
@skip_before_postgres(9, 0)
|
|
|
|
def test_callproc_dict(self):
|
|
|
|
# This parameter name tests for injection and quote escaping
|
|
|
|
paramname = '''
|
|
|
|
Robert'); drop table "students" --
|
|
|
|
'''.strip()
|
|
|
|
escaped_paramname = '"%s"' % paramname.replace('"', '""')
|
|
|
|
procname = 'pg_temp.randall'
|
|
|
|
|
|
|
|
cur = self.conn.cursor()
|
|
|
|
|
|
|
|
# Set up the temporary function
|
|
|
|
cur.execute('''
|
|
|
|
CREATE FUNCTION %s(%s INT)
|
|
|
|
RETURNS INT AS
|
|
|
|
'SELECT $1 * $1'
|
|
|
|
LANGUAGE SQL
|
2017-03-14 15:06:46 +03:00
|
|
|
''' % (procname, escaped_paramname))
|
2014-05-31 04:34:19 +04:00
|
|
|
|
|
|
|
# Make sure callproc works right
|
2017-03-14 15:06:46 +03:00
|
|
|
cur.callproc(procname, {paramname: 2})
|
2014-05-31 04:34:19 +04:00
|
|
|
self.assertEquals(cur.fetchone()[0], 4)
|
|
|
|
|
|
|
|
# Make sure callproc fails right
|
|
|
|
failing_cases = [
|
2017-03-14 15:06:46 +03:00
|
|
|
({paramname: 2, 'foo': 'bar'}, psycopg2.ProgrammingError),
|
|
|
|
({paramname: '2'}, psycopg2.ProgrammingError),
|
|
|
|
({paramname: 'two'}, psycopg2.ProgrammingError),
|
|
|
|
({u'bj\xc3rn': 2}, psycopg2.ProgrammingError),
|
|
|
|
({3: 2}, TypeError),
|
|
|
|
({self: 2}, TypeError),
|
2014-05-31 04:34:19 +04:00
|
|
|
]
|
|
|
|
for parameter_sequence, exception in failing_cases:
|
|
|
|
self.assertRaises(exception, cur.callproc, procname, parameter_sequence)
|
|
|
|
self.conn.rollback()
|
2010-11-06 05:24:28 +03:00
|
|
|
|
2016-07-04 23:49:45 +03:00
|
|
|
@skip_if_no_superuser
|
|
|
|
@skip_if_windows
|
|
|
|
@skip_before_postgres(8, 4)
|
Always detect when a connection is closed behind psycopg2's back.
There's a race condition that only seems to happen over Unix-domain
sockets. Sometimes, the closed socket is reported by the kernel to
libpq like this (captured with strace):
sendto(3, "Q\0\0\0\34select pg_backend_pid()\0", 29, MSG_NOSIGNAL, NULL, 0) = 29
recvfrom(3, "E\0\0\0mSFATAL\0C57P01\0Mterminating "..., 16384, 0, NULL, NULL) = 110
recvfrom(3, 0x12d0330, 16384, 0, 0, 0) = -1 ECONNRESET (Connection reset by peer)
That is, psycopg2/libpq sees no error when sending the first query
after the connection is closed, but gets an error reading the result.
In that case, everything worked fine.
But sometimes, the error manifests like this:
sendto(3, "Q\0\0\0\34select pg_backend_pid()\0", 29, MSG_NOSIGNAL, NULL, 0) = -1 EPIPE (Broken pipe)
recvfrom(3, "E\0\0\0mSFATAL\0C57P01\0Mterminating "..., 16384, 0, NULL, NULL) = 110
recvfrom(3, "", 16274, 0, NULL, NULL) = 0
recvfrom(3, "", 16274, 0, NULL, NULL) = 0
i.e. libpq received an error when sending the query. This manifests as
a slightly different exception from a slightly different place. More
importantly, in this case connection.closed is left at 0 rather than
being set to 2, and that is the bug I'm fixing here.
Note that we see almost identical behaviour for sync and async
connections, and the fixes are the same. So I added extremely similar
test cases.
Finally, there is still a bug here: for async connections, we
sometimes raise DatabaseError (incorrect) and sometimes raise
OperationalError (correct). Will fix that next.
2016-06-29 00:46:21 +03:00
|
|
|
def test_external_close_sync(self):
|
|
|
|
# If a "victim" connection is closed by a "control" connection
|
|
|
|
# behind psycopg2's back, psycopg2 always handles it correctly:
|
|
|
|
# raise OperationalError, set conn.closed to 2. This reproduces
|
|
|
|
# issue #443, a race between control_conn closing victim_conn and
|
|
|
|
# psycopg2 noticing.
|
|
|
|
control_conn = self.conn
|
|
|
|
connect_func = self.connect
|
2018-10-23 02:39:14 +03:00
|
|
|
|
|
|
|
def wait_func(conn):
|
|
|
|
pass
|
|
|
|
|
Always detect when a connection is closed behind psycopg2's back.
There's a race condition that only seems to happen over Unix-domain
sockets. Sometimes, the closed socket is reported by the kernel to
libpq like this (captured with strace):
sendto(3, "Q\0\0\0\34select pg_backend_pid()\0", 29, MSG_NOSIGNAL, NULL, 0) = 29
recvfrom(3, "E\0\0\0mSFATAL\0C57P01\0Mterminating "..., 16384, 0, NULL, NULL) = 110
recvfrom(3, 0x12d0330, 16384, 0, 0, 0) = -1 ECONNRESET (Connection reset by peer)
That is, psycopg2/libpq sees no error when sending the first query
after the connection is closed, but gets an error reading the result.
In that case, everything worked fine.
But sometimes, the error manifests like this:
sendto(3, "Q\0\0\0\34select pg_backend_pid()\0", 29, MSG_NOSIGNAL, NULL, 0) = -1 EPIPE (Broken pipe)
recvfrom(3, "E\0\0\0mSFATAL\0C57P01\0Mterminating "..., 16384, 0, NULL, NULL) = 110
recvfrom(3, "", 16274, 0, NULL, NULL) = 0
recvfrom(3, "", 16274, 0, NULL, NULL) = 0
i.e. libpq received an error when sending the query. This manifests as
a slightly different exception from a slightly different place. More
importantly, in this case connection.closed is left at 0 rather than
being set to 2, and that is the bug I'm fixing here.
Note that we see almost identical behaviour for sync and async
connections, and the fixes are the same. So I added extremely similar
test cases.
Finally, there is still a bug here: for async connections, we
sometimes raise DatabaseError (incorrect) and sometimes raise
OperationalError (correct). Will fix that next.
2016-06-29 00:46:21 +03:00
|
|
|
self._test_external_close(control_conn, connect_func, wait_func)
|
|
|
|
|
2016-07-04 23:49:45 +03:00
|
|
|
@skip_if_no_superuser
|
|
|
|
@skip_if_windows
|
|
|
|
@skip_before_postgres(8, 4)
|
Always detect when a connection is closed behind psycopg2's back.
There's a race condition that only seems to happen over Unix-domain
sockets. Sometimes, the closed socket is reported by the kernel to
libpq like this (captured with strace):
sendto(3, "Q\0\0\0\34select pg_backend_pid()\0", 29, MSG_NOSIGNAL, NULL, 0) = 29
recvfrom(3, "E\0\0\0mSFATAL\0C57P01\0Mterminating "..., 16384, 0, NULL, NULL) = 110
recvfrom(3, 0x12d0330, 16384, 0, 0, 0) = -1 ECONNRESET (Connection reset by peer)
That is, psycopg2/libpq sees no error when sending the first query
after the connection is closed, but gets an error reading the result.
In that case, everything worked fine.
But sometimes, the error manifests like this:
sendto(3, "Q\0\0\0\34select pg_backend_pid()\0", 29, MSG_NOSIGNAL, NULL, 0) = -1 EPIPE (Broken pipe)
recvfrom(3, "E\0\0\0mSFATAL\0C57P01\0Mterminating "..., 16384, 0, NULL, NULL) = 110
recvfrom(3, "", 16274, 0, NULL, NULL) = 0
recvfrom(3, "", 16274, 0, NULL, NULL) = 0
i.e. libpq received an error when sending the query. This manifests as
a slightly different exception from a slightly different place. More
importantly, in this case connection.closed is left at 0 rather than
being set to 2, and that is the bug I'm fixing here.
Note that we see almost identical behaviour for sync and async
connections, and the fixes are the same. So I added extremely similar
test cases.
Finally, there is still a bug here: for async connections, we
sometimes raise DatabaseError (incorrect) and sometimes raise
OperationalError (correct). Will fix that next.
2016-06-29 00:46:21 +03:00
|
|
|
def test_external_close_async(self):
|
|
|
|
# Issue #443 is in the async code too. Since the fix is duplicated,
|
|
|
|
# so is the test.
|
|
|
|
control_conn = self.conn
|
2018-10-23 02:39:14 +03:00
|
|
|
|
|
|
|
def connect_func():
|
|
|
|
return self.connect(async_=True)
|
|
|
|
|
Always detect when a connection is closed behind psycopg2's back.
There's a race condition that only seems to happen over Unix-domain
sockets. Sometimes, the closed socket is reported by the kernel to
libpq like this (captured with strace):
sendto(3, "Q\0\0\0\34select pg_backend_pid()\0", 29, MSG_NOSIGNAL, NULL, 0) = 29
recvfrom(3, "E\0\0\0mSFATAL\0C57P01\0Mterminating "..., 16384, 0, NULL, NULL) = 110
recvfrom(3, 0x12d0330, 16384, 0, 0, 0) = -1 ECONNRESET (Connection reset by peer)
That is, psycopg2/libpq sees no error when sending the first query
after the connection is closed, but gets an error reading the result.
In that case, everything worked fine.
But sometimes, the error manifests like this:
sendto(3, "Q\0\0\0\34select pg_backend_pid()\0", 29, MSG_NOSIGNAL, NULL, 0) = -1 EPIPE (Broken pipe)
recvfrom(3, "E\0\0\0mSFATAL\0C57P01\0Mterminating "..., 16384, 0, NULL, NULL) = 110
recvfrom(3, "", 16274, 0, NULL, NULL) = 0
recvfrom(3, "", 16274, 0, NULL, NULL) = 0
i.e. libpq received an error when sending the query. This manifests as
a slightly different exception from a slightly different place. More
importantly, in this case connection.closed is left at 0 rather than
being set to 2, and that is the bug I'm fixing here.
Note that we see almost identical behaviour for sync and async
connections, and the fixes are the same. So I added extremely similar
test cases.
Finally, there is still a bug here: for async connections, we
sometimes raise DatabaseError (incorrect) and sometimes raise
OperationalError (correct). Will fix that next.
2016-06-29 00:46:21 +03:00
|
|
|
wait_func = psycopg2.extras.wait_select
|
|
|
|
self._test_external_close(control_conn, connect_func, wait_func)
|
|
|
|
|
|
|
|
def _test_external_close(self, control_conn, connect_func, wait_func):
|
|
|
|
# The short sleep before using victim_conn the second time makes it
|
|
|
|
# much more likely to lose the race and see the bug. Repeating the
|
|
|
|
# test several times makes it even more likely.
|
|
|
|
for i in range(10):
|
|
|
|
victim_conn = connect_func()
|
|
|
|
wait_func(victim_conn)
|
|
|
|
|
|
|
|
with victim_conn.cursor() as cur:
|
|
|
|
cur.execute('select pg_backend_pid()')
|
|
|
|
wait_func(victim_conn)
|
|
|
|
pid1 = cur.fetchall()[0][0]
|
|
|
|
|
|
|
|
with control_conn.cursor() as cur:
|
|
|
|
cur.execute('select pg_terminate_backend(%s)', (pid1,))
|
|
|
|
|
|
|
|
time.sleep(0.001)
|
2016-07-04 23:49:45 +03:00
|
|
|
|
|
|
|
def f():
|
Always detect when a connection is closed behind psycopg2's back.
There's a race condition that only seems to happen over Unix-domain
sockets. Sometimes, the closed socket is reported by the kernel to
libpq like this (captured with strace):
sendto(3, "Q\0\0\0\34select pg_backend_pid()\0", 29, MSG_NOSIGNAL, NULL, 0) = 29
recvfrom(3, "E\0\0\0mSFATAL\0C57P01\0Mterminating "..., 16384, 0, NULL, NULL) = 110
recvfrom(3, 0x12d0330, 16384, 0, 0, 0) = -1 ECONNRESET (Connection reset by peer)
That is, psycopg2/libpq sees no error when sending the first query
after the connection is closed, but gets an error reading the result.
In that case, everything worked fine.
But sometimes, the error manifests like this:
sendto(3, "Q\0\0\0\34select pg_backend_pid()\0", 29, MSG_NOSIGNAL, NULL, 0) = -1 EPIPE (Broken pipe)
recvfrom(3, "E\0\0\0mSFATAL\0C57P01\0Mterminating "..., 16384, 0, NULL, NULL) = 110
recvfrom(3, "", 16274, 0, NULL, NULL) = 0
recvfrom(3, "", 16274, 0, NULL, NULL) = 0
i.e. libpq received an error when sending the query. This manifests as
a slightly different exception from a slightly different place. More
importantly, in this case connection.closed is left at 0 rather than
being set to 2, and that is the bug I'm fixing here.
Note that we see almost identical behaviour for sync and async
connections, and the fixes are the same. So I added extremely similar
test cases.
Finally, there is still a bug here: for async connections, we
sometimes raise DatabaseError (incorrect) and sometimes raise
OperationalError (correct). Will fix that next.
2016-06-29 00:46:21 +03:00
|
|
|
with victim_conn.cursor() as cur:
|
|
|
|
cur.execute('select 1')
|
|
|
|
wait_func(victim_conn)
|
|
|
|
|
2016-07-04 23:49:45 +03:00
|
|
|
self.assertRaises(psycopg2.OperationalError, f)
|
|
|
|
|
Always detect when a connection is closed behind psycopg2's back.
There's a race condition that only seems to happen over Unix-domain
sockets. Sometimes, the closed socket is reported by the kernel to
libpq like this (captured with strace):
sendto(3, "Q\0\0\0\34select pg_backend_pid()\0", 29, MSG_NOSIGNAL, NULL, 0) = 29
recvfrom(3, "E\0\0\0mSFATAL\0C57P01\0Mterminating "..., 16384, 0, NULL, NULL) = 110
recvfrom(3, 0x12d0330, 16384, 0, 0, 0) = -1 ECONNRESET (Connection reset by peer)
That is, psycopg2/libpq sees no error when sending the first query
after the connection is closed, but gets an error reading the result.
In that case, everything worked fine.
But sometimes, the error manifests like this:
sendto(3, "Q\0\0\0\34select pg_backend_pid()\0", 29, MSG_NOSIGNAL, NULL, 0) = -1 EPIPE (Broken pipe)
recvfrom(3, "E\0\0\0mSFATAL\0C57P01\0Mterminating "..., 16384, 0, NULL, NULL) = 110
recvfrom(3, "", 16274, 0, NULL, NULL) = 0
recvfrom(3, "", 16274, 0, NULL, NULL) = 0
i.e. libpq received an error when sending the query. This manifests as
a slightly different exception from a slightly different place. More
importantly, in this case connection.closed is left at 0 rather than
being set to 2, and that is the bug I'm fixing here.
Note that we see almost identical behaviour for sync and async
connections, and the fixes are the same. So I added extremely similar
test cases.
Finally, there is still a bug here: for async connections, we
sometimes raise DatabaseError (incorrect) and sometimes raise
OperationalError (correct). Will fix that next.
2016-06-29 00:46:21 +03:00
|
|
|
self.assertEqual(victim_conn.closed, 2)
|
|
|
|
|
2017-11-29 18:28:10 +03:00
|
|
|
@skip_before_postgres(8, 2)
|
|
|
|
def test_rowcount_on_executemany_returning(self):
|
|
|
|
cur = self.conn.cursor()
|
|
|
|
cur.execute("create table execmany(id serial primary key, data int)")
|
|
|
|
cur.executemany(
|
|
|
|
"insert into execmany (data) values (%s)",
|
|
|
|
[(i,) for i in range(4)])
|
|
|
|
self.assertEqual(cur.rowcount, 4)
|
|
|
|
|
|
|
|
cur.executemany(
|
|
|
|
"insert into execmany (data) values (%s) returning data",
|
|
|
|
[(i,) for i in range(5)])
|
|
|
|
self.assertEqual(cur.rowcount, 5)
|
|
|
|
|
2019-02-16 19:30:34 +03:00
|
|
|
@skip_before_postgres(9)
|
|
|
|
def test_pgresult_ptr(self):
|
|
|
|
curs = self.conn.cursor()
|
|
|
|
self.assert_(curs.pgresult_ptr is None)
|
|
|
|
|
|
|
|
curs.execute("select 'x'")
|
|
|
|
self.assert_(curs.pgresult_ptr is not None)
|
2019-03-18 22:49:16 +03:00
|
|
|
|
|
|
|
try:
|
|
|
|
f = self.libpq.PQcmdStatus
|
|
|
|
except AttributeError:
|
|
|
|
pass
|
|
|
|
else:
|
|
|
|
f.argtypes = [ctypes.c_void_p]
|
|
|
|
f.restype = ctypes.c_char_p
|
|
|
|
status = f(curs.pgresult_ptr)
|
|
|
|
self.assertEqual(status, b'SELECT 1')
|
2019-02-16 19:30:34 +03:00
|
|
|
|
|
|
|
curs.close()
|
|
|
|
self.assert_(curs.pgresult_ptr is None)
|
|
|
|
|
2014-06-05 04:32:53 +04:00
|
|
|
|
2010-07-09 18:23:02 +04:00
|
|
|
def test_suite():
|
|
|
|
return unittest.TestLoader().loadTestsFromName(__name__)
|
|
|
|
|
2018-10-23 02:39:14 +03:00
|
|
|
|
2010-07-09 18:23:02 +04:00
|
|
|
if __name__ == "__main__":
|
|
|
|
unittest.main()
|