From e32e1b834e6bd4d938200fccfe17579f7f775043 Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Mon, 1 Jun 2015 11:35:05 +0200 Subject: [PATCH 001/151] Add support for streaming replication protocol Introduce ReplicationConnection and ReplicationCursor classes, that incapsulate initiation of special type of PostgreSQL connection and handling of special replication commands only available in this special connection mode. The handling of stream of replication data from the server is modelled largely after the existing support for "COPY table TO file" command and pg_recvlogical tool supplied with PostgreSQL (though, it can also be used for physical replication.) --- doc/src/extras.rst | 122 +++++++++++++++++ lib/extras.py | 138 +++++++++++++++++++ psycopg/cursor.h | 6 + psycopg/cursor_type.c | 39 ++++++ psycopg/pqpath.c | 312 ++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 617 insertions(+) diff --git a/doc/src/extras.rst b/doc/src/extras.rst index 36ef0132..9bc302e2 100644 --- a/doc/src/extras.rst +++ b/doc/src/extras.rst @@ -141,6 +141,128 @@ Logging cursor .. autoclass:: MinTimeLoggingCursor +Replication cursor +^^^^^^^^^^^^^^^^^^ + +.. autoclass:: ReplicationConnection + + This connection factory class can be used to open a special type of + connection that is used for streaming replication. + + Example:: + + from psycopg2.extras import ReplicationConnection, REPLICATION_PHYSICAL, REPLICATION_LOGICAL + conn = psycopg2.connect(dsn, connection_factory=ReplicationConnection) + cur = conn.cursor() + +.. seealso:: + + - PostgreSQL `Replication protocol`__ + + .. __: http://www.postgresql.org/docs/current/static/protocol-replication.html + +.. autoclass:: ReplicationCursor + + .. method:: identify_system() + + Get information about the cluster status in form of a dict with + ``systemid``, ``timeline``, ``xlogpos`` and ``dbname`` as keys. + + Example:: + + >>> print cur.identify_system() + {'timeline': 1, 'systemid': '1234567890123456789', 'dbname': 'test', 'xlogpos': '0/1ABCDEF'} + + .. method:: create_replication_slot(slot_type, slot_name, output_plugin=None) + + Create streaming replication slot. + + :param slot_type: type of replication: either `REPLICATION_PHYSICAL` or + `REPLICATION_LOGICAL` + :param slot_name: name of the replication slot to be created + :param output_plugin: name of the logical decoding output plugin to use + (logical replication only) + + Example:: + + cur.create_replication_slot(REPLICATION_LOGICAL, "testslot", "test_decoding") + + .. method:: drop_replication_slot(slot_name) + + Drop streaming replication slot. + + :param slot_name: name of the replication slot to drop + + Example:: + + cur.drop_replication_slot("testslot") + + .. method:: start_replication(file, slot_type, slot_name=None, start_lsn=None, timeline=0, keepalive_interval=10, options=None) + + Start and consume replication stream. + + :param file: a file-like object to write replication stream messages to + :param slot_type: type of replication: either `REPLICATION_PHYSICAL` or + `REPLICATION_LOGICAL` + :param slot_name: name of the replication slot to use (required for + logical replication) + :param start_lsn: the point in replication stream (WAL position) to start + from, in the form ``XXX/XXX`` (forward-slash separated + pair of hexadecimals) + :param timeline: WAL history timeline to start streaming from (optional, + can only be used with physical replication) + :param keepalive_interval: interval (in seconds) to send keepalive + messages to the server, in case there was no + communication during that period of time + :param options: an dictionary of options to pass to logical replication + slot + + The ``keepalive_interval`` must be greater than zero. + + This method never returns unless an error message is sent from the + server, or the server closes connection, or there is an exception in the + ``write()`` method of the ``file`` object. + + One can even use ``sys.stdout`` as the destination (this is only good for + testing purposes, however):: + + >>> cur.start_replication(sys.stdout, "testslot") + ... + + This method acts much like the `~cursor.copy_to()` with an important + distinction that ``write()`` method return value is dirving the + server-side replication cursor. In order to report to the server that + the all the messages up to the current one have been stored reliably, one + should return true value (i.e. something that satisfies ``if retval:`` + conidtion) from the ``write`` callback:: + + class ReplicationStreamWriter(object): + def write(self, msg): + if store_message_reliably(msg): + return True + + cur.start_replication(writer, "testslot") + ... + + .. note:: + + One needs to be aware that failure to update the server-side cursor + on any one replication slot properly by constantly consuming and + reporting success to the server can eventually lead to "disk full" + condition on the server, because the server retains all the WAL + segments that might be needed to stream the changes via currently + open replication slots. + + Drop any open replication slots that are no longer being used. The + list of open slots can be obtained by running a query like ``SELECT * + FROM pg_replication_slots``. + +.. data:: REPLICATION_PHYSICAL + +.. data:: REPLICATION_LOGICAL + +.. index:: + pair: Cursor; Replication .. index:: diff --git a/lib/extras.py b/lib/extras.py index c9f1cbcd..4d92e6fa 100644 --- a/lib/extras.py +++ b/lib/extras.py @@ -437,6 +437,144 @@ class MinTimeLoggingCursor(LoggingCursor): return LoggingCursor.callproc(self, procname, vars) +class ReplicationConnection(_connection): + """A connection that uses `ReplicationCursor` automatically.""" + + def __init__(self, *args, **kwargs): + """Initializes a replication connection, by adding appropriate replication parameter to the provided dsn arguments.""" + + if len(args): + dsn = args[0] + + # FIXME: could really use parse_dsn here + + if dsn.startswith('postgres://') or dsn.startswith('postgresql://'): + # poor man's url parsing + if dsn.rfind('?') > 0: + if not dsn.endswith('?'): + dsn += '&' + else: + dsn += '?' + else: + dsn += ' ' + dsn += 'replication=database' + args = [dsn] + list(args[1:]) + else: + dbname = kwargs.get('dbname', None) + if dbname is None: + kwargs['dbname'] = 'replication' + + if kwargs.get('replication', None) is None: + kwargs['replication'] = 'database' if dbname else 'true' + + super(ReplicationConnection, self).__init__(*args, **kwargs) + + # prevent auto-issued BEGIN statements + self.autocommit = True + + def cursor(self, *args, **kwargs): + kwargs.setdefault('cursor_factory', ReplicationCursor) + return super(ReplicationConnection, self).cursor(*args, **kwargs) + + +"""Streamging replication types.""" +REPLICATION_PHYSICAL = 0 +REPLICATION_LOGICAL = 1 + +class ReplicationCursor(_cursor): + """A cursor used for replication commands.""" + + def identify_system(self): + """Get information about the cluster status.""" + + self.execute("IDENTIFY_SYSTEM") + return dict(zip(['systemid', 'timeline', 'xlogpos', 'dbname'], + self.fetchall()[0])) + + def quote_ident(self, ident): + # FIXME: use PQescapeIdentifier or psycopg_escape_identifier_easy, somehow + return '"%s"' % ident.replace('"', '""') + + def create_replication_slot(self, slot_type, slot_name, output_plugin=None): + """Create streaming replication slot.""" + + command = "CREATE_REPLICATION_SLOT %s " % self.quote_ident(slot_name) + + if slot_type == REPLICATION_LOGICAL: + if output_plugin is None: + raise RuntimeError("output_plugin is required for logical replication slot") + + command += "LOGICAL %s" % self.quote_ident(output_plugin) + + elif slot_type == REPLICATION_PHYSICAL: + if output_plugin is not None: + raise RuntimeError("output_plugin is not applicable to physical replication") + + command += "PHYSICAL" + + else: + raise RuntimeError("unrecognized replication slot type") + + return self.execute(command) + + def drop_replication_slot(self, slot_name): + """Drop streaming replication slot.""" + + command = "DROP_REPLICATION_SLOT %s" % self.quote_ident(slot_name) + return self.execute(command) + + def start_replication(self, o, slot_type, slot_name=None, start_lsn=None, + timeline=0, keepalive_interval=10, options=None): + """Start and consume replication stream.""" + + if keepalive_interval <= 0: + raise RuntimeError("keepalive_interval must be > 0: %d" % keepalive_interval) + + command = "START_REPLICATION " + + if slot_type == REPLICATION_LOGICAL and slot_name is None: + raise RuntimeError("slot_name is required for logical replication slot") + + if slot_name: + command += "SLOT %s " % self.quote_ident(slot_name) + + if slot_type == REPLICATION_LOGICAL: + command += "LOGICAL " + elif slot_type == REPLICATION_PHYSICAL: + command += "PHYSICAL " + else: + raise RuntimeError("unrecognized replication slot type") + + if start_lsn is None: + start_lsn = '0/0' + + # reparse lsn to catch possible garbage + lsn = start_lsn.split('/') + command += "%X/%X" % (int(lsn[0], 16), int(lsn[1], 16)) + + if timeline != 0: + if slot_type == REPLICATION_LOGICAL: + raise RuntimeError("cannot specify timeline for logical replication") + + if timeline < 0: + raise RuntimeError("timeline must be >= 0: %d" % timeline) + + command += " TIMELINE %d" % timeline + + if options: + if slot_type == REPLICATION_PHYSICAL: + raise RuntimeError("cannot specify plugin options for physical replication") + + command += " (" + for k,v in options.iteritems(): + if not command.endswith('('): + command += ", " + command += "%s %s" % (self.quote_ident(k), _A(str(v)).getquoted()) + command += ")" + + return self.start_replication_expert(o, command, keepalive_interval) + + # a dbtype and adapter for Python UUID type class UUID_adapter(object): diff --git a/psycopg/cursor.h b/psycopg/cursor.h index e291d45f..93b697b2 100644 --- a/psycopg/cursor.h +++ b/psycopg/cursor.h @@ -72,6 +72,8 @@ struct cursorObject { #define DEFAULT_COPYSIZE 16384 #define DEFAULT_COPYBUFF 8192 + int keepalive_interval; /* interval for keepalive messages in replication mode */ + PyObject *tuple_factory; /* factory for result tuples */ PyObject *tzinfo_factory; /* factory for tzinfo objects */ @@ -88,6 +90,10 @@ struct cursorObject { }; +/* streaming replication modes */ +#define CURSOR_REPLICATION_PHYSICAL 0 +#define CURSOR_REPLICATION_LOGICAL 1 + /* C-callable functions in cursor_int.c and cursor_type.c */ BORROWED HIDDEN PyObject *curs_get_cast(cursorObject *self, PyObject *oid); diff --git a/psycopg/cursor_type.c b/psycopg/cursor_type.c index cd8d5ca3..954e764d 100644 --- a/psycopg/cursor_type.c +++ b/psycopg/cursor_type.c @@ -1579,6 +1579,43 @@ exit: return res; } +#define psyco_curs_start_replication_expert_doc \ +"start_replication_expert(file, command, keepalive_interval) -- Start and consume replication stream with direct command." + +static PyObject * +psyco_curs_start_replication_expert(cursorObject *self, PyObject *args) +{ + PyObject *file, *res = NULL; + char *command; + int keepalive_interval; + + if (!PyArg_ParseTuple(args, "O&si", + _psyco_curs_has_write_check, &file, + &command, &keepalive_interval)) { + return NULL; + } + + EXC_IF_CURS_CLOSED(self); + EXC_IF_CURS_ASYNC(self, start_replication_expert); + EXC_IF_GREEN(start_replication_expert); + EXC_IF_TPC_PREPARED(self->conn, start_replication_expert); + + Dprintf("psyco_curs_start_replication_expert: command = %s", command); + + self->copysize = 0; + Py_INCREF(file); + self->copyfile = file; + self->keepalive_interval = keepalive_interval; + + if (pq_execute(self, command, 0, 1 /* no_result */, 1 /* no_begin */) >= 0) { + res = Py_None; + Py_INCREF(Py_None); + } + Py_CLEAR(self->copyfile); + + return res; +} + /* extension: closed - return true if cursor is closed */ #define psyco_curs_closed_doc \ @@ -1753,6 +1790,8 @@ static struct PyMethodDef cursorObject_methods[] = { METH_VARARGS|METH_KEYWORDS, psyco_curs_copy_to_doc}, {"copy_expert", (PyCFunction)psyco_curs_copy_expert, METH_VARARGS|METH_KEYWORDS, psyco_curs_copy_expert_doc}, + {"start_replication_expert", (PyCFunction)psyco_curs_start_replication_expert, + METH_VARARGS, psyco_curs_start_replication_expert_doc}, {NULL} }; diff --git a/psycopg/pqpath.c b/psycopg/pqpath.c index 5e1974be..55025d82 100644 --- a/psycopg/pqpath.c +++ b/psycopg/pqpath.c @@ -40,7 +40,14 @@ #include "psycopg/pgtypes.h" #include "psycopg/error.h" +#include "postgres_fe.h" +#include "access/xlog_internal.h" +#include "common/fe_memutils.h" +#include "libpq-fe.h" + #include +#include +#include extern HIDDEN PyObject *psyco_DescriptionType; @@ -1514,6 +1521,302 @@ exit: return ret; } +/* support routines taken from pg_basebackup/streamutil.c */ +/* + * Frontend version of GetCurrentTimestamp(), since we are not linked with + * backend code. The protocol always uses integer timestamps, regardless of + * server setting. + */ +static int64 +feGetCurrentTimestamp(void) +{ + int64 result; + struct timeval tp; + + gettimeofday(&tp, NULL); + + result = (int64) tp.tv_sec - + ((POSTGRES_EPOCH_JDATE - UNIX_EPOCH_JDATE) * SECS_PER_DAY); + + result = (result * USECS_PER_SEC) + tp.tv_usec; + + return result; +} + +/* + * Converts an int64 to network byte order. + */ +static void +fe_sendint64(int64 i, char *buf) +{ + uint32 n32; + + /* High order half first, since we're doing MSB-first */ + n32 = (uint32) (i >> 32); + n32 = htonl(n32); + memcpy(&buf[0], &n32, 4); + + /* Now the low order half */ + n32 = (uint32) i; + n32 = htonl(n32); + memcpy(&buf[4], &n32, 4); +} + +/* + * Converts an int64 from network byte order to native format. + */ +static int64 +fe_recvint64(char *buf) +{ + int64 result; + uint32 h32; + uint32 l32; + + memcpy(&h32, buf, 4); + memcpy(&l32, buf + 4, 4); + h32 = ntohl(h32); + l32 = ntohl(l32); + + result = h32; + result <<= 32; + result |= l32; + + return result; +} + +static int +sendFeedback(PGconn *conn, XLogRecPtr written_lsn, XLogRecPtr fsync_lsn, + int replyRequested) +{ + char replybuf[1 + 8 + 8 + 8 + 8 + 1]; + int len = 0; + + Dprintf("_pq_copy_both_v3: confirming write up to %X/%X, flush to %X/%X\n", + (uint32) (written_lsn >> 32), (uint32) written_lsn, + (uint32) (fsync_lsn >> 32), (uint32) fsync_lsn); + + replybuf[len] = 'r'; + len += 1; + fe_sendint64(written_lsn, &replybuf[len]); /* write */ + len += 8; + fe_sendint64(fsync_lsn, &replybuf[len]); /* flush */ + len += 8; + fe_sendint64(InvalidXLogRecPtr, &replybuf[len]); /* apply */ + len += 8; + fe_sendint64(feGetCurrentTimestamp(), &replybuf[len]); /* sendTime */ + len += 8; + replybuf[len] = replyRequested ? 1 : 0; /* replyRequested */ + len += 1; + + if (PQputCopyData(conn, replybuf, len) <= 0 || PQflush(conn)) { + return 0; + } + + return 1; +} + +/* used for streaming replication only */ +static int +_pq_copy_both_v3(cursorObject *curs) +{ + PyObject *tmp = NULL; + PyObject *write_func = NULL; + PyObject *obj = NULL; + int ret = -1; + int is_text; + + PGconn *conn; + char *buffer = NULL; + fd_set fds; + struct timeval last_comm, curr_time, ping_time, time_diff; + int len, hdr, reply, sel; + + XLogRecPtr written_lsn = InvalidXLogRecPtr; + XLogRecPtr fsync_lsn = InvalidXLogRecPtr; + XLogRecPtr wal_end = InvalidXLogRecPtr; + + if (!curs->copyfile) { + PyErr_SetString(ProgrammingError, + "can't execute START_REPLICATION: use the start_replication() method instead"); + goto exit; + } + + if (curs->keepalive_interval <= 0) { + PyErr_Format(PyExc_RuntimeError, "keepalive_interval must be > 0: %d", + curs->keepalive_interval); + goto exit; + } + + if (!(write_func = PyObject_GetAttrString(curs->copyfile, "write"))) { + Dprintf("_pq_copy_both_v3: can't get o.write"); + goto exit; + } + + /* if the file is text we must pass it unicode. */ + if (-1 == (is_text = psycopg_is_text_file(curs->copyfile))) { + goto exit; + } + + CLEARPGRES(curs->pgres); + + /* timestamp of last communication with the server */ + gettimeofday(&last_comm, NULL); + + conn = curs->conn->pgconn; + + while (1) { + len = PQgetCopyData(conn, &buffer, 1 /* async! */); + if (len < 0) { + break; + } + if (len == 0) { + FD_ZERO(&fds); + FD_SET(PQsocket(conn), &fds); + + /* set up timeout according to keepalive_interval, but no less than 1 second */ + gettimeofday(&curr_time, NULL); + + ping_time = last_comm; + ping_time.tv_sec += curs->keepalive_interval; + + if (timercmp(&ping_time, &curr_time, >)) { + timersub(&ping_time, &curr_time, &time_diff); + + Py_BEGIN_ALLOW_THREADS; + sel = select(PQsocket(conn) + 1, &fds, NULL, NULL, &time_diff); + Py_END_ALLOW_THREADS; + } + else { + sel = 0; /* pretend select() timed out */ + } + + if (sel < 0) { + if (errno != EINTR) { + PyErr_SetFromErrno(PyExc_OSError); + goto exit; + } + if (PyErr_CheckSignals()) { + goto exit; + } + continue; + } + + if (sel > 0) { + if (!PQconsumeInput(conn)) { + Dprintf("_pq_copy_both_v3: PQconsumeInput failed"); + pq_raise(curs->conn, curs, NULL); + goto exit; + } + } + else { /* timeout */ + if (!sendFeedback(conn, written_lsn, fsync_lsn, false)) { + pq_raise(curs->conn, curs, NULL); + goto exit; + } + } + gettimeofday(&last_comm, NULL); + continue; + } + if (len > 0 && buffer) { + gettimeofday(&last_comm, NULL); + + Dprintf("_pq_copy_both_v3: msg=%c, len=%d", buffer[0], len); + if (buffer[0] == 'w') { + /* msgtype(1), dataStart(8), walEnd(8), sendTime(8) */ + hdr = 1 + 8 + 8 + 8; + if (len < hdr + 1) { + PyErr_Format(PyExc_RuntimeError, + "streaming header too small in data message: %d", len); + goto exit; + } + + wal_end = fe_recvint64(buffer + 1 + 8); + + if (is_text) { + obj = PyUnicode_Decode(buffer + hdr, len - hdr, curs->conn->codec, NULL); + } + else { + obj = Bytes_FromStringAndSize(buffer + hdr, len - hdr); + } + if (!obj) { goto exit; } + + tmp = PyObject_CallFunctionObjArgs(write_func, obj, NULL); + Py_DECREF(obj); + + if (tmp == NULL) { + Dprintf("_pq_copy_both_v3: write_func returned NULL"); + goto exit; + } + + written_lsn = Max(wal_end, written_lsn); + + /* if write() returned true-ish, we confirm LSN with the server */ + if (PyObject_IsTrue(tmp)) { + fsync_lsn = written_lsn; + + if (!sendFeedback(conn, written_lsn, fsync_lsn, false)) { + pq_raise(curs->conn, curs, NULL); + goto exit; + } + gettimeofday(&last_comm, NULL); + } + Py_DECREF(tmp); + + } + else if (buffer[0] == 'k') { + /* msgtype(1), walEnd(8), sendTime(8), reply(1) */ + hdr = 1 + 8 + 8; + if (len < hdr + 1) { + PyErr_Format(PyExc_RuntimeError, + "streaming header too small in keepalive message: %d", len); + goto exit; + } + + reply = buffer[hdr]; + if (reply) { + if (!sendFeedback(conn, written_lsn, fsync_lsn, false)) { + pq_raise(curs->conn, curs, NULL); + goto exit; + } + gettimeofday(&last_comm, NULL); + } + } + else { + PyErr_Format(PyExc_RuntimeError, + "unrecognized streaming message type: \"%c\"", buffer[0]); + goto exit; + } + + /* buffer is allocated on every PQgetCopyData() call */ + PQfreemem(buffer); + buffer = NULL; + } + } + + if (len == -2) { + pq_raise(curs->conn, curs, NULL); + goto exit; + } + if (len == -1) { + curs->pgres = PQgetResult(curs->conn->pgconn); + + if (curs->pgres && PQresultStatus(curs->pgres) == PGRES_FATAL_ERROR) + pq_raise(curs->conn, curs, NULL); + + CLEARPGRES(curs->pgres); + } + + ret = 1; + +exit: + if (buffer) { + PQfreemem(buffer); + } + + Py_XDECREF(write_func); + return ret; +} + int pq_fetch(cursorObject *curs, int no_result) { @@ -1573,6 +1876,15 @@ pq_fetch(cursorObject *curs, int no_result) CLEARPGRES(curs->pgres); break; + case PGRES_COPY_BOTH: + Dprintf("pq_fetch: data from a streaming replication slot (no tuples)"); + curs->rowcount = -1; + ex = _pq_copy_both_v3(curs); + /* error caught by out glorious notice handler */ + if (PyErr_Occurred()) ex = -1; + CLEARPGRES(curs->pgres); + break; + case PGRES_TUPLES_OK: if (!no_result) { Dprintf("pq_fetch: got tuples"); From 80da76d43fb23e5ba915eac499927a12d4526496 Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Tue, 2 Jun 2015 11:42:56 +0200 Subject: [PATCH 002/151] Get rid of postgres internal includes; check for Win32 for htonl() --- psycopg/pqpath.c | 46 ++++++++++++++++++++++++++++++---------------- 1 file changed, 30 insertions(+), 16 deletions(-) diff --git a/psycopg/pqpath.c b/psycopg/pqpath.c index 55025d82..9e4424a8 100644 --- a/psycopg/pqpath.c +++ b/psycopg/pqpath.c @@ -40,15 +40,14 @@ #include "psycopg/pgtypes.h" #include "psycopg/error.h" -#include "postgres_fe.h" -#include "access/xlog_internal.h" -#include "common/fe_memutils.h" #include "libpq-fe.h" -#include -#include +/* htonl, ntohl */ +#ifdef _WIN32 +#include +#else #include - +#endif extern HIDDEN PyObject *psyco_DescriptionType; @@ -1522,20 +1521,33 @@ exit: } /* support routines taken from pg_basebackup/streamutil.c */ +/* type and constant definitions from internal postgres includes */ +typedef unsigned int uint32; +typedef unsigned PG_INT64_TYPE XLogRecPtr; + +#define InvalidXLogRecPtr ((XLogRecPtr) 0) + +/* Julian-date equivalents of Day 0 in Unix and Postgres reckoning */ +#define UNIX_EPOCH_JDATE 2440588 /* == date2j(1970, 1, 1) */ +#define POSTGRES_EPOCH_JDATE 2451545 /* == date2j(2000, 1, 1) */ + +#define SECS_PER_DAY 86400 +#define USECS_PER_SEC 1000000LL + /* * Frontend version of GetCurrentTimestamp(), since we are not linked with * backend code. The protocol always uses integer timestamps, regardless of * server setting. */ -static int64 +static pg_int64 feGetCurrentTimestamp(void) { - int64 result; + pg_int64 result; struct timeval tp; gettimeofday(&tp, NULL); - result = (int64) tp.tv_sec - + result = (pg_int64) tp.tv_sec - ((POSTGRES_EPOCH_JDATE - UNIX_EPOCH_JDATE) * SECS_PER_DAY); result = (result * USECS_PER_SEC) + tp.tv_usec; @@ -1547,7 +1559,7 @@ feGetCurrentTimestamp(void) * Converts an int64 to network byte order. */ static void -fe_sendint64(int64 i, char *buf) +fe_sendint64(pg_int64 i, char *buf) { uint32 n32; @@ -1565,10 +1577,10 @@ fe_sendint64(int64 i, char *buf) /* * Converts an int64 from network byte order to native format. */ -static int64 +static pg_int64 fe_recvint64(char *buf) { - int64 result; + pg_int64 result; uint32 h32; uint32 l32; @@ -1709,7 +1721,7 @@ _pq_copy_both_v3(cursorObject *curs) } } else { /* timeout */ - if (!sendFeedback(conn, written_lsn, fsync_lsn, false)) { + if (!sendFeedback(conn, written_lsn, fsync_lsn, 0)) { pq_raise(curs->conn, curs, NULL); goto exit; } @@ -1748,13 +1760,15 @@ _pq_copy_both_v3(cursorObject *curs) goto exit; } - written_lsn = Max(wal_end, written_lsn); + /* update the LSN position we've written up to */ + if (written_lsn < wal_end) + written_lsn = wal_end; /* if write() returned true-ish, we confirm LSN with the server */ if (PyObject_IsTrue(tmp)) { fsync_lsn = written_lsn; - if (!sendFeedback(conn, written_lsn, fsync_lsn, false)) { + if (!sendFeedback(conn, written_lsn, fsync_lsn, 0)) { pq_raise(curs->conn, curs, NULL); goto exit; } @@ -1774,7 +1788,7 @@ _pq_copy_both_v3(cursorObject *curs) reply = buffer[hdr]; if (reply) { - if (!sendFeedback(conn, written_lsn, fsync_lsn, false)) { + if (!sendFeedback(conn, written_lsn, fsync_lsn, 0)) { pq_raise(curs->conn, curs, NULL); goto exit; } From 44b705f88f45211a778c07f909b2aa1d91da05e6 Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Tue, 2 Jun 2015 16:52:48 +0200 Subject: [PATCH 003/151] Improve identify_system: don't hardcode column names --- lib/extras.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/extras.py b/lib/extras.py index 4d92e6fa..92dd8192 100644 --- a/lib/extras.py +++ b/lib/extras.py @@ -488,7 +488,7 @@ class ReplicationCursor(_cursor): """Get information about the cluster status.""" self.execute("IDENTIFY_SYSTEM") - return dict(zip(['systemid', 'timeline', 'xlogpos', 'dbname'], + return dict(zip([_.name for _ in self.description], self.fetchall()[0])) def quote_ident(self, ident): From f14521f8cb567c98814d392ba7ec196b4a7df77c Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Wed, 3 Jun 2015 14:10:20 +0200 Subject: [PATCH 004/151] Add libpq_support.c and win32_support.c Move libpq-specific code for streaming replication support into a separate file. Also provide gettimeofday() on Win32, implementation copied from Postgres core. --- psycopg/libpq_support.c | 111 ++++++++++++++++++++++++++++++++++++++++ psycopg/libpq_support.h | 40 +++++++++++++++ psycopg/pqpath.c | 83 ++---------------------------- psycopg/win32_support.c | 61 ++++++++++++++++++++++ psycopg/win32_support.h | 36 +++++++++++++ setup.py | 2 + 6 files changed, 254 insertions(+), 79 deletions(-) create mode 100644 psycopg/libpq_support.c create mode 100644 psycopg/libpq_support.h create mode 100644 psycopg/win32_support.c create mode 100644 psycopg/win32_support.h diff --git a/psycopg/libpq_support.c b/psycopg/libpq_support.c new file mode 100644 index 00000000..95a3ebc6 --- /dev/null +++ b/psycopg/libpq_support.c @@ -0,0 +1,111 @@ +/* libpq_support.c - functions not provided by libpq, but which are + * required for advanced communication with the server, such as + * streaming replication + * + * Copyright (C) 2003-2015 Federico Di Gregorio + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#define PSYCOPG_MODULE +#include "psycopg/psycopg.h" + +#include "psycopg/libpq_support.h" + +/* htonl(), ntohl() */ +#ifdef _WIN32 +#include +/* gettimeofday() */ +#include "psycopg/win32_support.h" +#else +#include +#endif + +/* support routines taken from pg_basebackup/streamutil.c */ + +/* Julian-date equivalents of Day 0 in Unix and Postgres reckoning */ +#define UNIX_EPOCH_JDATE 2440588 /* == date2j(1970, 1, 1) */ +#define POSTGRES_EPOCH_JDATE 2451545 /* == date2j(2000, 1, 1) */ + +#define SECS_PER_DAY 86400 +#define USECS_PER_SEC 1000000LL + +/* + * Frontend version of GetCurrentTimestamp(), since we are not linked with + * backend code. The protocol always uses integer timestamps, regardless of + * server setting. + */ +pg_int64 +feGetCurrentTimestamp(void) +{ + pg_int64 result; + struct timeval tp; + + gettimeofday(&tp, NULL); + + result = (pg_int64) tp.tv_sec - + ((POSTGRES_EPOCH_JDATE - UNIX_EPOCH_JDATE) * SECS_PER_DAY); + + result = (result * USECS_PER_SEC) + tp.tv_usec; + + return result; +} + +/* + * Converts an int64 to network byte order. + */ +void +fe_sendint64(pg_int64 i, char *buf) +{ + uint32 n32; + + /* High order half first, since we're doing MSB-first */ + n32 = (uint32) (i >> 32); + n32 = htonl(n32); + memcpy(&buf[0], &n32, 4); + + /* Now the low order half */ + n32 = (uint32) i; + n32 = htonl(n32); + memcpy(&buf[4], &n32, 4); +} + +/* + * Converts an int64 from network byte order to native format. + */ +pg_int64 +fe_recvint64(char *buf) +{ + pg_int64 result; + uint32 h32; + uint32 l32; + + memcpy(&h32, buf, 4); + memcpy(&l32, buf + 4, 4); + h32 = ntohl(h32); + l32 = ntohl(l32); + + result = h32; + result <<= 32; + result |= l32; + + return result; +} diff --git a/psycopg/libpq_support.h b/psycopg/libpq_support.h new file mode 100644 index 00000000..007f5e18 --- /dev/null +++ b/psycopg/libpq_support.h @@ -0,0 +1,40 @@ +/* libpq_support.h - definitions for libpq_support.c + * + * Copyright (C) 2003-2015 Federico Di Gregorio + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ +#ifndef PSYCOPG_LIBPQ_SUPPORT_H +#define PSYCOPG_LIBPQ_SUPPORT_H 1 + +#include "psycopg/config.h" + +/* type and constant definitions from internal postgres includes */ +typedef unsigned int uint32; +typedef unsigned PG_INT64_TYPE XLogRecPtr; + +#define InvalidXLogRecPtr ((XLogRecPtr) 0) + +HIDDEN pg_int64 feGetCurrentTimestamp(void); +HIDDEN void fe_sendint64(pg_int64 i, char *buf); +HIDDEN pg_int64 fe_recvint64(char *buf); + +#endif /* !defined(PSYCOPG_LIBPQ_SUPPORT_H) */ diff --git a/psycopg/pqpath.c b/psycopg/pqpath.c index 9e4424a8..4fb4771e 100644 --- a/psycopg/pqpath.c +++ b/psycopg/pqpath.c @@ -40,13 +40,14 @@ #include "psycopg/pgtypes.h" #include "psycopg/error.h" +#include "psycopg/libpq_support.h" #include "libpq-fe.h" -/* htonl, ntohl */ #ifdef _WIN32 +/* select() */ #include -#else -#include +/* gettimeofday() */ +#include "win32_support.h" #endif extern HIDDEN PyObject *psyco_DescriptionType; @@ -1520,82 +1521,6 @@ exit: return ret; } -/* support routines taken from pg_basebackup/streamutil.c */ -/* type and constant definitions from internal postgres includes */ -typedef unsigned int uint32; -typedef unsigned PG_INT64_TYPE XLogRecPtr; - -#define InvalidXLogRecPtr ((XLogRecPtr) 0) - -/* Julian-date equivalents of Day 0 in Unix and Postgres reckoning */ -#define UNIX_EPOCH_JDATE 2440588 /* == date2j(1970, 1, 1) */ -#define POSTGRES_EPOCH_JDATE 2451545 /* == date2j(2000, 1, 1) */ - -#define SECS_PER_DAY 86400 -#define USECS_PER_SEC 1000000LL - -/* - * Frontend version of GetCurrentTimestamp(), since we are not linked with - * backend code. The protocol always uses integer timestamps, regardless of - * server setting. - */ -static pg_int64 -feGetCurrentTimestamp(void) -{ - pg_int64 result; - struct timeval tp; - - gettimeofday(&tp, NULL); - - result = (pg_int64) tp.tv_sec - - ((POSTGRES_EPOCH_JDATE - UNIX_EPOCH_JDATE) * SECS_PER_DAY); - - result = (result * USECS_PER_SEC) + tp.tv_usec; - - return result; -} - -/* - * Converts an int64 to network byte order. - */ -static void -fe_sendint64(pg_int64 i, char *buf) -{ - uint32 n32; - - /* High order half first, since we're doing MSB-first */ - n32 = (uint32) (i >> 32); - n32 = htonl(n32); - memcpy(&buf[0], &n32, 4); - - /* Now the low order half */ - n32 = (uint32) i; - n32 = htonl(n32); - memcpy(&buf[4], &n32, 4); -} - -/* - * Converts an int64 from network byte order to native format. - */ -static pg_int64 -fe_recvint64(char *buf) -{ - pg_int64 result; - uint32 h32; - uint32 l32; - - memcpy(&h32, buf, 4); - memcpy(&l32, buf + 4, 4); - h32 = ntohl(h32); - l32 = ntohl(l32); - - result = h32; - result <<= 32; - result |= l32; - - return result; -} - static int sendFeedback(PGconn *conn, XLogRecPtr written_lsn, XLogRecPtr fsync_lsn, int replyRequested) diff --git a/psycopg/win32_support.c b/psycopg/win32_support.c new file mode 100644 index 00000000..8a760b9f --- /dev/null +++ b/psycopg/win32_support.c @@ -0,0 +1,61 @@ +/* win32_support.c - emulate some functions missing on Win32 + * + * Copyright (C) 2003-2015 Federico Di Gregorio + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#define PSYCOPG_MODULE +#include "psycopg/psycopg.h" + +#include "psycopg/win32_support.h" + +#ifdef _WIN32 +/* millisecond-precision port of gettimeofday for Win32, taken from + src/port/gettimeofday.c in PostgreSQL core */ + +/* FILETIME of Jan 1 1970 00:00:00. */ +static const unsigned __int64 epoch = 116444736000000000ULL; + +/* + * timezone information is stored outside the kernel so tzp isn't used anymore. + * + * Note: this function is not for Win32 high precision timing purpose. See + * elapsed_time(). + */ +int +gettimeofday(struct timeval * tp, struct timezone * tzp) +{ + FILETIME file_time; + SYSTEMTIME system_time; + ULARGE_INTEGER ularge; + + GetSystemTime(&system_time); + SystemTimeToFileTime(&system_time, &file_time); + ularge.LowPart = file_time.dwLowDateTime; + ularge.HighPart = file_time.dwHighDateTime; + + tp->tv_sec = (long) ((ularge.QuadPart - epoch) / 10000000L); + tp->tv_usec = (long) (system_time.wMilliseconds * 1000); + + return 0; +} +#endif /* _WIN32 */ diff --git a/psycopg/win32_support.h b/psycopg/win32_support.h new file mode 100644 index 00000000..c6577317 --- /dev/null +++ b/psycopg/win32_support.h @@ -0,0 +1,36 @@ +/* win32_support.h - definitions for win32_support.c + * + * Copyright (C) 2003-2015 Federico Di Gregorio + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ +#ifndef PSYCOPG_WIN32_SUPPORT_H +#define PSYCOPG_WIN32_SUPPORT_H 1 + +#include "psycopg/config.h" + +#include + +#ifdef _WIN32 +HIDDEN int gettimeofday(struct timeval * tp, struct timezone * tzp); +#endif + +#endif /* !defined(PSYCOPG_WIN32_SUPPORT_H) */ diff --git a/setup.py b/setup.py index fc4f1711..1f87520e 100644 --- a/setup.py +++ b/setup.py @@ -462,6 +462,7 @@ data_files = [] sources = [ 'psycopgmodule.c', 'green.c', 'pqpath.c', 'utils.c', 'bytes_format.c', + 'libpq_support.c', 'win32_support.c', 'connection_int.c', 'connection_type.c', 'cursor_int.c', 'cursor_type.c', @@ -481,6 +482,7 @@ depends = [ 'config.h', 'pgtypes.h', 'psycopg.h', 'python.h', 'connection.h', 'cursor.h', 'diagnostics.h', 'error.h', 'green.h', 'lobject.h', 'notify.h', 'pqpath.h', 'xid.h', + 'libpq_support.h', 'win32_support.h', 'adapter_asis.h', 'adapter_binary.h', 'adapter_datetime.h', 'adapter_list.h', 'adapter_pboolean.h', 'adapter_pdecimal.h', From 50df864f8c63144bad281a1de1d6d38d4a06d4aa Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Thu, 4 Jun 2015 11:00:08 +0200 Subject: [PATCH 005/151] Add timersub for Win32. Fix gettimeofday on MinGW. --- psycopg/pqpath.c | 7 +++---- psycopg/win32_support.c | 17 ++++++++++++++++- psycopg/win32_support.h | 4 ++++ 3 files changed, 23 insertions(+), 5 deletions(-) diff --git a/psycopg/pqpath.c b/psycopg/pqpath.c index 4fb4771e..e87befae 100644 --- a/psycopg/pqpath.c +++ b/psycopg/pqpath.c @@ -1616,15 +1616,14 @@ _pq_copy_both_v3(cursorObject *curs) ping_time = last_comm; ping_time.tv_sec += curs->keepalive_interval; - if (timercmp(&ping_time, &curr_time, >)) { - timersub(&ping_time, &curr_time, &time_diff); - + timersub(&ping_time, &curr_time, &time_diff); + if (time_diff.tv_sec > 0) { Py_BEGIN_ALLOW_THREADS; sel = select(PQsocket(conn) + 1, &fds, NULL, NULL, &time_diff); Py_END_ALLOW_THREADS; } else { - sel = 0; /* pretend select() timed out */ + sel = 0; /* we're past target time, pretend select() timed out */ } if (sel < 0) { diff --git a/psycopg/win32_support.c b/psycopg/win32_support.c index 8a760b9f..d508b220 100644 --- a/psycopg/win32_support.c +++ b/psycopg/win32_support.c @@ -29,6 +29,8 @@ #include "psycopg/win32_support.h" #ifdef _WIN32 + +#ifndef __MINGW32__ /* millisecond-precision port of gettimeofday for Win32, taken from src/port/gettimeofday.c in PostgreSQL core */ @@ -58,4 +60,17 @@ gettimeofday(struct timeval * tp, struct timezone * tzp) return 0; } -#endif /* _WIN32 */ +#endif /* !defined(__MINGW32__) */ + +/* timersub is missing on mingw */ +void +timersub(struct timeval *a, struct timeval *b, struct timeval *c) +{ + c->tv_sec = a->tv_sec - b->tv_sec; + c->tv_usec = a->tv_usec - b->tv_usec; + if (tv_usec < 0) { + c->tv_usec += 1000000; + c->tv_sec -= 1; + } +} +#endif /* defined(_WIN32) */ diff --git a/psycopg/win32_support.h b/psycopg/win32_support.h index c6577317..be963df5 100644 --- a/psycopg/win32_support.h +++ b/psycopg/win32_support.h @@ -30,7 +30,11 @@ #include #ifdef _WIN32 +#ifndef __MINGW32__ HIDDEN int gettimeofday(struct timeval * tp, struct timezone * tzp); #endif +HIDDEN void timersub(struct timeval *a, struct timeval *b, struct timeval *c); +#endif + #endif /* !defined(PSYCOPG_WIN32_SUPPORT_H) */ From f7b84ce843d3df9b95e5485fb881c13709e5c781 Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Thu, 4 Jun 2015 11:01:09 +0200 Subject: [PATCH 006/151] Add {libpq,win32}_support.* to the .cproj file --- psycopg2.cproj | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/psycopg2.cproj b/psycopg2.cproj index 7755b961..18b9727f 100644 --- a/psycopg2.cproj +++ b/psycopg2.cproj @@ -85,6 +85,7 @@ + @@ -93,6 +94,7 @@ + @@ -217,6 +219,7 @@ + @@ -229,6 +232,7 @@ + @@ -251,4 +255,4 @@ - \ No newline at end of file + From 453830f80c111280e090eb35db4494db33ff5e16 Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Fri, 5 Jun 2015 17:44:09 +0200 Subject: [PATCH 007/151] Add ReplicationMessage object --- lib/extras.py | 4 + psycopg/cursor.h | 3 + psycopg/cursor_type.c | 58 +++++++++++++ psycopg/libpq_support.h | 4 + psycopg/pqpath.c | 52 +++++++++--- psycopg/psycopg.h | 1 + psycopg/psycopgmodule.c | 4 + psycopg/replication_message.h | 52 ++++++++++++ psycopg/replication_message_type.c | 127 +++++++++++++++++++++++++++++ setup.py | 2 + 10 files changed, 295 insertions(+), 12 deletions(-) create mode 100644 psycopg/replication_message.h create mode 100644 psycopg/replication_message_type.c diff --git a/lib/extras.py b/lib/extras.py index 92dd8192..8118e134 100644 --- a/lib/extras.py +++ b/lib/extras.py @@ -574,6 +574,10 @@ class ReplicationCursor(_cursor): return self.start_replication_expert(o, command, keepalive_interval) + # thin wrapper + def sync_server(self, msg): + return self.replication_sync_server(msg) + # a dbtype and adapter for Python UUID type diff --git a/psycopg/cursor.h b/psycopg/cursor.h index 93b697b2..78ee21c4 100644 --- a/psycopg/cursor.h +++ b/psycopg/cursor.h @@ -72,7 +72,10 @@ struct cursorObject { #define DEFAULT_COPYSIZE 16384 #define DEFAULT_COPYBUFF 8192 + int in_replication; /* we're in streaming replication loop */ + int stop_replication; /* client requested to stop replication */ int keepalive_interval; /* interval for keepalive messages in replication mode */ + replicationMessageObject *repl_sync_msg; /* set when the client asks us to sync the server */ PyObject *tuple_factory; /* factory for result tuples */ PyObject *tzinfo_factory; /* factory for tzinfo objects */ diff --git a/psycopg/cursor_type.c b/psycopg/cursor_type.c index 954e764d..1ea922bb 100644 --- a/psycopg/cursor_type.c +++ b/psycopg/cursor_type.c @@ -28,6 +28,7 @@ #include "psycopg/cursor.h" #include "psycopg/connection.h" +#include "psycopg/replication_message.h" #include "psycopg/green.h" #include "psycopg/pqpath.h" #include "psycopg/typecast.h" @@ -1605,17 +1606,68 @@ psyco_curs_start_replication_expert(cursorObject *self, PyObject *args) self->copysize = 0; Py_INCREF(file); self->copyfile = file; + self->in_replication = 1; self->keepalive_interval = keepalive_interval; + self->stop_replication = 0; + self->repl_sync_msg = NULL; if (pq_execute(self, command, 0, 1 /* no_result */, 1 /* no_begin */) >= 0) { res = Py_None; Py_INCREF(Py_None); } + + Py_CLEAR(self->repl_sync_msg); Py_CLEAR(self->copyfile); + self->in_replication = 0; return res; } +#define psyco_curs_stop_replication_doc \ +"start_replication() -- Set flag to break out of endless loop in start_replication()." + +static PyObject * +psyco_curs_stop_replication(cursorObject *self) +{ + EXC_IF_CURS_CLOSED(self); + + if (!self->in_replication) { + PyErr_SetString(ProgrammingError, + "stop_replication() called when not in streaming replication loop"); + } else { + self->stop_replication = 1; + } + + Py_RETURN_NONE; +} + +#define psyco_curs_replication_sync_server_doc \ +"replication_sync_server(msg) -- Set flag to sync the server up to this replication message." + +static PyObject * +psyco_curs_replication_sync_server(cursorObject *self, PyObject *args) +{ + replicationMessageObject *msg; + + EXC_IF_CURS_CLOSED(self); + + if (!PyArg_ParseTuple(args, "O!", &replicationMessageType, &msg)) { + return NULL; + } + + if (!self->in_replication) { + PyErr_SetString(ProgrammingError, + "replication_sync_server() called when not in streaming replication loop"); + } else { + Py_CLEAR(self->repl_sync_msg); + + self->repl_sync_msg = msg; + Py_XINCREF(self->repl_sync_msg); + } + + Py_RETURN_NONE; +} + /* extension: closed - return true if cursor is closed */ #define psyco_curs_closed_doc \ @@ -1792,6 +1844,10 @@ static struct PyMethodDef cursorObject_methods[] = { METH_VARARGS|METH_KEYWORDS, psyco_curs_copy_expert_doc}, {"start_replication_expert", (PyCFunction)psyco_curs_start_replication_expert, METH_VARARGS, psyco_curs_start_replication_expert_doc}, + {"stop_replication", (PyCFunction)psyco_curs_stop_replication, + METH_NOARGS, psyco_curs_stop_replication_doc}, + {"replication_sync_server", (PyCFunction)psyco_curs_replication_sync_server, + METH_VARARGS, psyco_curs_replication_sync_server_doc}, {NULL} }; @@ -1908,6 +1964,7 @@ cursor_clear(cursorObject *self) Py_CLEAR(self->casts); Py_CLEAR(self->caster); Py_CLEAR(self->copyfile); + Py_CLEAR(self->repl_sync_msg); Py_CLEAR(self->tuple_factory); Py_CLEAR(self->tzinfo_factory); Py_CLEAR(self->query); @@ -1997,6 +2054,7 @@ cursor_traverse(cursorObject *self, visitproc visit, void *arg) Py_VISIT(self->casts); Py_VISIT(self->caster); Py_VISIT(self->copyfile); + Py_VISIT(self->repl_sync_msg); Py_VISIT(self->tuple_factory); Py_VISIT(self->tzinfo_factory); Py_VISIT(self->query); diff --git a/psycopg/libpq_support.h b/psycopg/libpq_support.h index 007f5e18..e597d24c 100644 --- a/psycopg/libpq_support.h +++ b/psycopg/libpq_support.h @@ -33,6 +33,10 @@ typedef unsigned PG_INT64_TYPE XLogRecPtr; #define InvalidXLogRecPtr ((XLogRecPtr) 0) +/* have to use lowercase %x, as PyString_FromFormat can't do %X */ +#define XLOGFMTSTR "%x/%x" +#define XLOGFMTARGS(x) ((uint32)((x) >> 32)), ((uint32)((x) & 0xFFFFFFFF)) + HIDDEN pg_int64 feGetCurrentTimestamp(void); HIDDEN void fe_sendint64(pg_int64 i, char *buf); HIDDEN pg_int64 fe_recvint64(char *buf); diff --git a/psycopg/pqpath.c b/psycopg/pqpath.c index e87befae..4ae62971 100644 --- a/psycopg/pqpath.c +++ b/psycopg/pqpath.c @@ -35,6 +35,7 @@ #include "psycopg/pqpath.h" #include "psycopg/connection.h" #include "psycopg/cursor.h" +#include "psycopg/replication_message.h" #include "psycopg/green.h" #include "psycopg/typecast.h" #include "psycopg/pgtypes.h" @@ -1528,9 +1529,8 @@ sendFeedback(PGconn *conn, XLogRecPtr written_lsn, XLogRecPtr fsync_lsn, char replybuf[1 + 8 + 8 + 8 + 8 + 1]; int len = 0; - Dprintf("_pq_copy_both_v3: confirming write up to %X/%X, flush to %X/%X\n", - (uint32) (written_lsn >> 32), (uint32) written_lsn, - (uint32) (fsync_lsn >> 32), (uint32) fsync_lsn); + Dprintf("_pq_copy_both_v3: confirming write up to "XLOGFMTSTR", flush to "XLOGFMTSTR, + XLOGFMTARGS(written_lsn), XLOGFMTARGS(fsync_lsn)); replybuf[len] = 'r'; len += 1; @@ -1559,6 +1559,7 @@ _pq_copy_both_v3(cursorObject *curs) PyObject *tmp = NULL; PyObject *write_func = NULL; PyObject *obj = NULL; + replicationMessageObject *msg = NULL; int ret = -1; int is_text; @@ -1568,9 +1569,9 @@ _pq_copy_both_v3(cursorObject *curs) struct timeval last_comm, curr_time, ping_time, time_diff; int len, hdr, reply, sel; - XLogRecPtr written_lsn = InvalidXLogRecPtr; - XLogRecPtr fsync_lsn = InvalidXLogRecPtr; - XLogRecPtr wal_end = InvalidXLogRecPtr; + XLogRecPtr written_lsn = InvalidXLogRecPtr, + fsync_lsn = InvalidXLogRecPtr, + data_start, wal_end; if (!curs->copyfile) { PyErr_SetString(ProgrammingError, @@ -1666,7 +1667,12 @@ _pq_copy_both_v3(cursorObject *curs) goto exit; } - wal_end = fe_recvint64(buffer + 1 + 8); + data_start = fe_recvint64(buffer + 1); + wal_end = fe_recvint64(buffer + 1 + 8); + /*send_time = fe_recvint64(buffer + 1 + 8 + 8);*/ + + Dprintf("_pq_copy_both_v3: data_start="XLOGFMTSTR", wal_end="XLOGFMTSTR, + XLOGFMTARGS(data_start), XLOGFMTARGS(wal_end)); if (is_text) { obj = PyUnicode_Decode(buffer + hdr, len - hdr, curs->conn->codec, NULL); @@ -1676,21 +1682,36 @@ _pq_copy_both_v3(cursorObject *curs) } if (!obj) { goto exit; } - tmp = PyObject_CallFunctionObjArgs(write_func, obj, NULL); + msg = (replicationMessageObject *) + PyObject_CallFunctionObjArgs((PyObject *)&replicationMessageType, + obj, NULL); Py_DECREF(obj); + if (!msg) { goto exit; } + + msg->data_start = data_start; + msg->wal_end = wal_end; + + tmp = PyObject_CallFunctionObjArgs(write_func, msg, NULL); if (tmp == NULL) { Dprintf("_pq_copy_both_v3: write_func returned NULL"); goto exit; } + Py_DECREF(tmp); /* update the LSN position we've written up to */ if (written_lsn < wal_end) written_lsn = wal_end; - /* if write() returned true-ish, we confirm LSN with the server */ - if (PyObject_IsTrue(tmp)) { - fsync_lsn = written_lsn; + /* if requested by sync_server(msg), we confirm LSN with the server */ + if (curs->repl_sync_msg) { + Dprintf("_pq_copy_both_v3: server sync requested at "XLOGFMTSTR, + XLOGFMTARGS(curs->repl_sync_msg->wal_end)); + + if (fsync_lsn < curs->repl_sync_msg->wal_end) + fsync_lsn = curs->repl_sync_msg->wal_end; + + Py_CLEAR(curs->repl_sync_msg); if (!sendFeedback(conn, written_lsn, fsync_lsn, 0)) { pq_raise(curs->conn, curs, NULL); @@ -1698,8 +1719,14 @@ _pq_copy_both_v3(cursorObject *curs) } gettimeofday(&last_comm, NULL); } - Py_DECREF(tmp); + if (curs->stop_replication) { + Dprintf("_pq_copy_both_v3: stop_replication flag set by write_func"); + break; + } + + Py_DECREF(msg); + msg = NULL; } else if (buffer[0] == 'k') { /* msgtype(1), walEnd(8), sendTime(8), reply(1) */ @@ -1751,6 +1778,7 @@ exit: PQfreemem(buffer); } + Py_XDECREF(msg); Py_XDECREF(write_func); return ret; } diff --git a/psycopg/psycopg.h b/psycopg/psycopg.h index eb406fd2..adda12d9 100644 --- a/psycopg/psycopg.h +++ b/psycopg/psycopg.h @@ -117,6 +117,7 @@ HIDDEN PyObject *psyco_GetDecimalType(void); /* forward declarations */ typedef struct cursorObject cursorObject; typedef struct connectionObject connectionObject; +typedef struct replicationMessageObject replicationMessageObject; /* some utility functions */ RAISES HIDDEN PyObject *psyco_set_error(PyObject *exc, cursorObject *curs, const char *msg); diff --git a/psycopg/psycopgmodule.c b/psycopg/psycopgmodule.c index 61e2de57..67393c37 100644 --- a/psycopg/psycopgmodule.c +++ b/psycopg/psycopgmodule.c @@ -28,6 +28,7 @@ #include "psycopg/connection.h" #include "psycopg/cursor.h" +#include "psycopg/replication_message.h" #include "psycopg/green.h" #include "psycopg/lobject.h" #include "psycopg/notify.h" @@ -785,6 +786,9 @@ INIT_MODULE(_psycopg)(void) Py_TYPE(&cursorType) = &PyType_Type; if (PyType_Ready(&cursorType) == -1) goto exit; + Py_TYPE(&replicationMessageType) = &PyType_Type; + if (PyType_Ready(&replicationMessageType) == -1) goto exit; + Py_TYPE(&typecastType) = &PyType_Type; if (PyType_Ready(&typecastType) == -1) goto exit; diff --git a/psycopg/replication_message.h b/psycopg/replication_message.h new file mode 100644 index 00000000..bf2b5f16 --- /dev/null +++ b/psycopg/replication_message.h @@ -0,0 +1,52 @@ +/* replication_message.h - definition for the psycopg ReplicationMessage type + * + * Copyright (C) 2003-2015 Federico Di Gregorio + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#ifndef PSYCOPG_REPLICATION_MESSAGE_H +#define PSYCOPG_REPLICATION_MESSAGE_H 1 + +#include "libpq_support.h" + +#ifdef __cplusplus +extern "C" { +#endif + +extern HIDDEN PyTypeObject replicationMessageType; + +/* the typedef is forward-declared in psycopg.h */ +struct replicationMessageObject { + PyObject_HEAD + + PyObject *payload; + + XLogRecPtr data_start; + XLogRecPtr wal_end; + /* send_time */ +}; + +#ifdef __cplusplus +} +#endif + +#endif /* !defined(PSYCOPG_REPLICATION_MESSAGE_H) */ diff --git a/psycopg/replication_message_type.c b/psycopg/replication_message_type.c new file mode 100644 index 00000000..6968955e --- /dev/null +++ b/psycopg/replication_message_type.c @@ -0,0 +1,127 @@ +/* replication_message_type.c - python interface to ReplcationMessage objects + * + * Copyright (C) 2003-2015 Federico Di Gregorio + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#define PSYCOPG_MODULE +#include "psycopg/psycopg.h" + +#include "psycopg/replication_message.h" + + +static PyObject * +replmsg_repr(replicationMessageObject *self) +{ + return PyString_FromFormat( + "", + self, XLOGFMTARGS(self->data_start), XLOGFMTARGS(self->wal_end)); +} + +static int +replmsg_init(PyObject *obj, PyObject *args, PyObject *kwargs) +{ + replicationMessageObject *self = (replicationMessageObject*) obj; + + if (!PyArg_ParseTuple(args, "O", &self->payload)) + return -1; + Py_XINCREF(self->payload); + + self->data_start = 0; + self->wal_end = 0; + + return 0; +} + +static int +replmsg_clear(PyObject *self) +{ + Py_CLEAR(((replicationMessageObject*) self)->payload); + return 0; +} + +static void +replmsg_dealloc(PyObject* obj) +{ + replmsg_clear(obj); +} + + +#define OFFSETOF(x) offsetof(replicationMessageObject, x) + +/* object member list */ + +static struct PyMemberDef replicationMessageObject_members[] = { + {"payload", T_OBJECT, OFFSETOF(payload), READONLY, + "TODO"}, + {"data_start", T_ULONGLONG, OFFSETOF(data_start), READONLY, + "TODO"}, + {"wal_end", T_ULONGLONG, OFFSETOF(wal_end), READONLY, + "TODO"}, + {NULL} +}; + +/* object type */ + +#define replicationMessageType_doc \ +"A database replication message." + +PyTypeObject replicationMessageType = { + PyVarObject_HEAD_INIT(NULL, 0) + "psycopg2.extensions.ReplicationMessage", + sizeof(replicationMessageObject), 0, + replmsg_dealloc, /*tp_dealloc*/ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + 0, /*tp_compare*/ + (reprfunc)replmsg_repr, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash */ + 0, /*tp_call*/ + 0, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, + /*tp_flags*/ + replicationMessageType_doc, /*tp_doc*/ + 0, /*tp_traverse*/ + replmsg_clear, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + 0, /*tp_methods*/ + replicationMessageObject_members, /*tp_members*/ + 0, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + replmsg_init, /*tp_init*/ + 0, /*tp_alloc*/ + PyType_GenericNew, /*tp_new*/ +}; diff --git a/setup.py b/setup.py index 1f87520e..7c1a479f 100644 --- a/setup.py +++ b/setup.py @@ -466,6 +466,7 @@ sources = [ 'connection_int.c', 'connection_type.c', 'cursor_int.c', 'cursor_type.c', + 'replication_message_type.c', 'diagnostics_type.c', 'error_type.c', 'lobject_int.c', 'lobject_type.c', 'notify_type.c', 'xid_type.c', @@ -481,6 +482,7 @@ depends = [ # headers 'config.h', 'pgtypes.h', 'psycopg.h', 'python.h', 'connection.h', 'cursor.h', 'diagnostics.h', 'error.h', 'green.h', 'lobject.h', + 'replication_message.h', 'notify.h', 'pqpath.h', 'xid.h', 'libpq_support.h', 'win32_support.h', From 1ac385d1fb4328ba2220943741e4049fe472495b Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Wed, 10 Jun 2015 13:39:35 +0200 Subject: [PATCH 008/151] Fix logical decoding plugin options adaptation on python3 --- lib/extras.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/extras.py b/lib/extras.py index 8118e134..7de48d78 100644 --- a/lib/extras.py +++ b/lib/extras.py @@ -569,7 +569,7 @@ class ReplicationCursor(_cursor): for k,v in options.iteritems(): if not command.endswith('('): command += ", " - command += "%s %s" % (self.quote_ident(k), _A(str(v)).getquoted()) + command += "%s %s" % (self.quote_ident(k), _A(str(v))) command += ")" return self.start_replication_expert(o, command, keepalive_interval) From 9fc5bf44368eb381955c8bd164ccac145363e950 Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Wed, 10 Jun 2015 18:21:06 +0200 Subject: [PATCH 009/151] Add handling of send_time field in replmsg --- psycopg/libpq_support.c | 7 ----- psycopg/libpq_support.h | 7 +++++ psycopg/pqpath.c | 8 +++-- psycopg/psycopgmodule.c | 1 + psycopg/replication_message.h | 4 ++- psycopg/replication_message_type.c | 49 ++++++++++++++++++++++++++++-- 6 files changed, 62 insertions(+), 14 deletions(-) diff --git a/psycopg/libpq_support.c b/psycopg/libpq_support.c index 95a3ebc6..160c8491 100644 --- a/psycopg/libpq_support.c +++ b/psycopg/libpq_support.c @@ -41,13 +41,6 @@ /* support routines taken from pg_basebackup/streamutil.c */ -/* Julian-date equivalents of Day 0 in Unix and Postgres reckoning */ -#define UNIX_EPOCH_JDATE 2440588 /* == date2j(1970, 1, 1) */ -#define POSTGRES_EPOCH_JDATE 2451545 /* == date2j(2000, 1, 1) */ - -#define SECS_PER_DAY 86400 -#define USECS_PER_SEC 1000000LL - /* * Frontend version of GetCurrentTimestamp(), since we are not linked with * backend code. The protocol always uses integer timestamps, regardless of diff --git a/psycopg/libpq_support.h b/psycopg/libpq_support.h index e597d24c..ab35fef5 100644 --- a/psycopg/libpq_support.h +++ b/psycopg/libpq_support.h @@ -37,6 +37,13 @@ typedef unsigned PG_INT64_TYPE XLogRecPtr; #define XLOGFMTSTR "%x/%x" #define XLOGFMTARGS(x) ((uint32)((x) >> 32)), ((uint32)((x) & 0xFFFFFFFF)) +/* Julian-date equivalents of Day 0 in Unix and Postgres reckoning */ +#define UNIX_EPOCH_JDATE 2440588 /* == date2j(1970, 1, 1) */ +#define POSTGRES_EPOCH_JDATE 2451545 /* == date2j(2000, 1, 1) */ + +#define SECS_PER_DAY 86400 +#define USECS_PER_SEC 1000000LL + HIDDEN pg_int64 feGetCurrentTimestamp(void); HIDDEN void fe_sendint64(pg_int64 i, char *buf); HIDDEN pg_int64 fe_recvint64(char *buf); diff --git a/psycopg/pqpath.c b/psycopg/pqpath.c index 4ae62971..7a3ec19e 100644 --- a/psycopg/pqpath.c +++ b/psycopg/pqpath.c @@ -1572,6 +1572,7 @@ _pq_copy_both_v3(cursorObject *curs) XLogRecPtr written_lsn = InvalidXLogRecPtr, fsync_lsn = InvalidXLogRecPtr, data_start, wal_end; + pg_int64 send_time; if (!curs->copyfile) { PyErr_SetString(ProgrammingError, @@ -1669,10 +1670,10 @@ _pq_copy_both_v3(cursorObject *curs) data_start = fe_recvint64(buffer + 1); wal_end = fe_recvint64(buffer + 1 + 8); - /*send_time = fe_recvint64(buffer + 1 + 8 + 8);*/ + send_time = fe_recvint64(buffer + 1 + 8 + 8); - Dprintf("_pq_copy_both_v3: data_start="XLOGFMTSTR", wal_end="XLOGFMTSTR, - XLOGFMTARGS(data_start), XLOGFMTARGS(wal_end)); + Dprintf("_pq_copy_both_v3: data_start="XLOGFMTSTR", wal_end="XLOGFMTSTR", send_time=%lld", + XLOGFMTARGS(data_start), XLOGFMTARGS(wal_end), send_time); if (is_text) { obj = PyUnicode_Decode(buffer + hdr, len - hdr, curs->conn->codec, NULL); @@ -1690,6 +1691,7 @@ _pq_copy_both_v3(cursorObject *curs) msg->data_start = data_start; msg->wal_end = wal_end; + msg->send_time = send_time; tmp = PyObject_CallFunctionObjArgs(write_func, msg, NULL); diff --git a/psycopg/psycopgmodule.c b/psycopg/psycopgmodule.c index 67393c37..27af2112 100644 --- a/psycopg/psycopgmodule.c +++ b/psycopg/psycopgmodule.c @@ -869,6 +869,7 @@ INIT_MODULE(_psycopg)(void) /* Initialize the PyDateTimeAPI everywhere is used */ PyDateTime_IMPORT; if (psyco_adapter_datetime_init()) { goto exit; } + if (psyco_replmsg_datetime_init()) { goto exit; } Py_TYPE(&pydatetimeType) = &PyType_Type; if (PyType_Ready(&pydatetimeType) == -1) goto exit; diff --git a/psycopg/replication_message.h b/psycopg/replication_message.h index bf2b5f16..b03d1c4f 100644 --- a/psycopg/replication_message.h +++ b/psycopg/replication_message.h @@ -42,9 +42,11 @@ struct replicationMessageObject { XLogRecPtr data_start; XLogRecPtr wal_end; - /* send_time */ + pg_int64 send_time; }; +RAISES_NEG int psyco_replmsg_datetime_init(void); + #ifdef __cplusplus } #endif diff --git a/psycopg/replication_message_type.c b/psycopg/replication_message_type.c index 6968955e..5d15ca61 100644 --- a/psycopg/replication_message_type.c +++ b/psycopg/replication_message_type.c @@ -27,14 +27,31 @@ #include "psycopg/psycopg.h" #include "psycopg/replication_message.h" +#include "psycopg/libpq_support.h" + +#include "datetime.h" + +RAISES_NEG int +psyco_replmsg_datetime_init(void) +{ + Dprintf("psyco_replmsg_datetime_init: datetime init"); + + PyDateTime_IMPORT; + + if (!PyDateTimeAPI) { + PyErr_SetString(PyExc_ImportError, "datetime initialization failed"); + return -1; + } + return 0; +} static PyObject * replmsg_repr(replicationMessageObject *self) { return PyString_FromFormat( - "", - self, XLOGFMTARGS(self->data_start), XLOGFMTARGS(self->wal_end)); + "", + self, XLOGFMTARGS(self->data_start), XLOGFMTARGS(self->wal_end), self->send_time); } static int @@ -65,6 +82,26 @@ replmsg_dealloc(PyObject* obj) replmsg_clear(obj); } +#define psyco_replmsg_send_time_doc \ +"send_time - Timestamp of the replication message departure from the server." + +static PyObject * +psyco_replmsg_get_send_time(replicationMessageObject *self) +{ + PyObject *tval, *res = NULL; + double t; + + t = (double)self->send_time / USECS_PER_SEC + + ((POSTGRES_EPOCH_JDATE - UNIX_EPOCH_JDATE) * SECS_PER_DAY); + + tval = Py_BuildValue("(d)", t); + if (tval) { + res = PyDateTime_FromTimestamp(tval); + Py_DECREF(tval); + } + + return res; +} #define OFFSETOF(x) offsetof(replicationMessageObject, x) @@ -80,6 +117,12 @@ static struct PyMemberDef replicationMessageObject_members[] = { {NULL} }; +static struct PyGetSetDef replicationMessageObject_getsets[] = { + { "send_time", (getter)psyco_replmsg_get_send_time, NULL, + psyco_replmsg_send_time_doc, NULL }, + {NULL} +}; + /* object type */ #define replicationMessageType_doc \ @@ -115,7 +158,7 @@ PyTypeObject replicationMessageType = { 0, /*tp_iternext*/ 0, /*tp_methods*/ replicationMessageObject_members, /*tp_members*/ - 0, /*tp_getset*/ + replicationMessageObject_getsets, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ From 35a3262fe345b12fbc1cc7f89c2e0d35631811f7 Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Thu, 11 Jun 2015 12:20:52 +0200 Subject: [PATCH 010/151] Expose ReplicationMessage type in extras --- lib/extensions.py | 2 +- lib/extras.py | 5 +++-- psycopg/psycopgmodule.c | 1 + 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/lib/extensions.py b/lib/extensions.py index 216d8ad2..faa8b1de 100644 --- a/lib/extensions.py +++ b/lib/extensions.py @@ -56,7 +56,7 @@ try: except ImportError: pass -from psycopg2._psycopg import adapt, adapters, encodings, connection, cursor, lobject, Xid +from psycopg2._psycopg import adapt, adapters, encodings, connection, cursor, replicationMessage, lobject, Xid from psycopg2._psycopg import string_types, binary_types, new_type, new_array_type, register_type from psycopg2._psycopg import ISQLQuote, Notify, Diagnostics, Column diff --git a/lib/extras.py b/lib/extras.py index 7de48d78..2f32bf12 100644 --- a/lib/extras.py +++ b/lib/extras.py @@ -39,6 +39,7 @@ import psycopg2 from psycopg2 import extensions as _ext from psycopg2.extensions import cursor as _cursor from psycopg2.extensions import connection as _connection +from psycopg2.extensions import replicationMessage as ReplicationMessage from psycopg2.extensions import adapt as _A from psycopg2.extensions import b @@ -515,13 +516,13 @@ class ReplicationCursor(_cursor): else: raise RuntimeError("unrecognized replication slot type") - return self.execute(command) + self.execute(command) def drop_replication_slot(self, slot_name): """Drop streaming replication slot.""" command = "DROP_REPLICATION_SLOT %s" % self.quote_ident(slot_name) - return self.execute(command) + self.execute(command) def start_replication(self, o, slot_type, slot_name=None, start_lsn=None, timeline=0, keepalive_interval=10, options=None): diff --git a/psycopg/psycopgmodule.c b/psycopg/psycopgmodule.c index 27af2112..d44a4b68 100644 --- a/psycopg/psycopgmodule.c +++ b/psycopg/psycopgmodule.c @@ -911,6 +911,7 @@ INIT_MODULE(_psycopg)(void) /* put new types in module dictionary */ PyModule_AddObject(module, "connection", (PyObject*)&connectionType); PyModule_AddObject(module, "cursor", (PyObject*)&cursorType); + PyModule_AddObject(module, "replicationMessage", (PyObject*)&replicationMessageType); PyModule_AddObject(module, "ISQLQuote", (PyObject*)&isqlquoteType); PyModule_AddObject(module, "Notify", (PyObject*)¬ifyType); PyModule_AddObject(module, "Xid", (PyObject*)&xidType); From 9ed90b1216828351ccbd9e9e28951bf7933fb1b3 Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Thu, 11 Jun 2015 14:52:01 +0200 Subject: [PATCH 011/151] Refer cursor from ReplicationMessage object. At the same time, for the sync use LSN instead of msg reference in cursor. --- psycopg/cursor.h | 3 ++- psycopg/cursor_type.c | 19 ++----------------- psycopg/pqpath.c | 12 ++++++------ psycopg/replication_message.h | 2 ++ psycopg/replication_message_type.c | 15 +++++++++------ 5 files changed, 21 insertions(+), 30 deletions(-) diff --git a/psycopg/cursor.h b/psycopg/cursor.h index 78ee21c4..1a630553 100644 --- a/psycopg/cursor.h +++ b/psycopg/cursor.h @@ -27,6 +27,7 @@ #define PSYCOPG_CURSOR_H 1 #include "psycopg/connection.h" +#include "libpq_support.h" #ifdef __cplusplus extern "C" { @@ -75,7 +76,7 @@ struct cursorObject { int in_replication; /* we're in streaming replication loop */ int stop_replication; /* client requested to stop replication */ int keepalive_interval; /* interval for keepalive messages in replication mode */ - replicationMessageObject *repl_sync_msg; /* set when the client asks us to sync the server */ + XLogRecPtr repl_sync_lsn; /* set when the client asks us to sync the server */ PyObject *tuple_factory; /* factory for result tuples */ PyObject *tzinfo_factory; /* factory for tzinfo objects */ diff --git a/psycopg/cursor_type.c b/psycopg/cursor_type.c index 1ea922bb..19f82c60 100644 --- a/psycopg/cursor_type.c +++ b/psycopg/cursor_type.c @@ -1609,14 +1609,13 @@ psyco_curs_start_replication_expert(cursorObject *self, PyObject *args) self->in_replication = 1; self->keepalive_interval = keepalive_interval; self->stop_replication = 0; - self->repl_sync_msg = NULL; + self->repl_sync_lsn = InvalidXLogRecPtr; if (pq_execute(self, command, 0, 1 /* no_result */, 1 /* no_begin */) >= 0) { res = Py_None; Py_INCREF(Py_None); } - Py_CLEAR(self->repl_sync_msg); Py_CLEAR(self->copyfile); self->in_replication = 0; @@ -1647,24 +1646,12 @@ psyco_curs_stop_replication(cursorObject *self) static PyObject * psyco_curs_replication_sync_server(cursorObject *self, PyObject *args) { - replicationMessageObject *msg; - EXC_IF_CURS_CLOSED(self); - if (!PyArg_ParseTuple(args, "O!", &replicationMessageType, &msg)) { + if (!PyArg_ParseTuple(args, "K", &self->repl_sync_lsn)) { return NULL; } - if (!self->in_replication) { - PyErr_SetString(ProgrammingError, - "replication_sync_server() called when not in streaming replication loop"); - } else { - Py_CLEAR(self->repl_sync_msg); - - self->repl_sync_msg = msg; - Py_XINCREF(self->repl_sync_msg); - } - Py_RETURN_NONE; } @@ -1964,7 +1951,6 @@ cursor_clear(cursorObject *self) Py_CLEAR(self->casts); Py_CLEAR(self->caster); Py_CLEAR(self->copyfile); - Py_CLEAR(self->repl_sync_msg); Py_CLEAR(self->tuple_factory); Py_CLEAR(self->tzinfo_factory); Py_CLEAR(self->query); @@ -2054,7 +2040,6 @@ cursor_traverse(cursorObject *self, visitproc visit, void *arg) Py_VISIT(self->casts); Py_VISIT(self->caster); Py_VISIT(self->copyfile); - Py_VISIT(self->repl_sync_msg); Py_VISIT(self->tuple_factory); Py_VISIT(self->tzinfo_factory); Py_VISIT(self->query); diff --git a/psycopg/pqpath.c b/psycopg/pqpath.c index 7a3ec19e..7ce06a86 100644 --- a/psycopg/pqpath.c +++ b/psycopg/pqpath.c @@ -1685,7 +1685,7 @@ _pq_copy_both_v3(cursorObject *curs) msg = (replicationMessageObject *) PyObject_CallFunctionObjArgs((PyObject *)&replicationMessageType, - obj, NULL); + curs, obj, NULL); Py_DECREF(obj); if (!msg) { goto exit; } @@ -1706,14 +1706,14 @@ _pq_copy_both_v3(cursorObject *curs) written_lsn = wal_end; /* if requested by sync_server(msg), we confirm LSN with the server */ - if (curs->repl_sync_msg) { + if (curs->repl_sync_lsn != InvalidXLogRecPtr) { Dprintf("_pq_copy_both_v3: server sync requested at "XLOGFMTSTR, - XLOGFMTARGS(curs->repl_sync_msg->wal_end)); + XLOGFMTARGS(curs->repl_sync_lsn)); - if (fsync_lsn < curs->repl_sync_msg->wal_end) - fsync_lsn = curs->repl_sync_msg->wal_end; + if (fsync_lsn < curs->repl_sync_lsn) + fsync_lsn = curs->repl_sync_lsn; - Py_CLEAR(curs->repl_sync_msg); + curs->repl_sync_lsn = InvalidXLogRecPtr; if (!sendFeedback(conn, written_lsn, fsync_lsn, 0)) { pq_raise(curs->conn, curs, NULL); diff --git a/psycopg/replication_message.h b/psycopg/replication_message.h index b03d1c4f..a7567a1d 100644 --- a/psycopg/replication_message.h +++ b/psycopg/replication_message.h @@ -26,6 +26,7 @@ #ifndef PSYCOPG_REPLICATION_MESSAGE_H #define PSYCOPG_REPLICATION_MESSAGE_H 1 +#include "cursor.h" #include "libpq_support.h" #ifdef __cplusplus @@ -38,6 +39,7 @@ extern HIDDEN PyTypeObject replicationMessageType; struct replicationMessageObject { PyObject_HEAD + cursorObject *cursor; PyObject *payload; XLogRecPtr data_start; diff --git a/psycopg/replication_message_type.c b/psycopg/replication_message_type.c index 5d15ca61..27a9c916 100644 --- a/psycopg/replication_message_type.c +++ b/psycopg/replication_message_type.c @@ -27,7 +27,6 @@ #include "psycopg/psycopg.h" #include "psycopg/replication_message.h" -#include "psycopg/libpq_support.h" #include "datetime.h" @@ -59,8 +58,9 @@ replmsg_init(PyObject *obj, PyObject *args, PyObject *kwargs) { replicationMessageObject *self = (replicationMessageObject*) obj; - if (!PyArg_ParseTuple(args, "O", &self->payload)) + if (!PyArg_ParseTuple(args, "O!O", &cursorType, &self->cursor, &self->payload)) return -1; + Py_XINCREF(self->cursor); Py_XINCREF(self->payload); self->data_start = 0; @@ -70,16 +70,17 @@ replmsg_init(PyObject *obj, PyObject *args, PyObject *kwargs) } static int -replmsg_clear(PyObject *self) +replmsg_clear(replicationMessageObject *self) { - Py_CLEAR(((replicationMessageObject*) self)->payload); + Py_CLEAR(self->cursor); + Py_CLEAR(self->payload); return 0; } static void replmsg_dealloc(PyObject* obj) { - replmsg_clear(obj); + replmsg_clear((replicationMessageObject*) obj); } #define psyco_replmsg_send_time_doc \ @@ -108,6 +109,8 @@ psyco_replmsg_get_send_time(replicationMessageObject *self) /* object member list */ static struct PyMemberDef replicationMessageObject_members[] = { + {"cursor", T_OBJECT, OFFSETOF(cursor), READONLY, + "TODO"}, {"payload", T_OBJECT, OFFSETOF(payload), READONLY, "TODO"}, {"data_start", T_ULONGLONG, OFFSETOF(data_start), READONLY, @@ -151,7 +154,7 @@ PyTypeObject replicationMessageType = { /*tp_flags*/ replicationMessageType_doc, /*tp_doc*/ 0, /*tp_traverse*/ - replmsg_clear, /*tp_clear*/ + (inquiry)replmsg_clear, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ From 61e52ce8793472ff1348ab93ccdeb682a1e7b3df Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Wed, 10 Jun 2015 09:06:08 +0200 Subject: [PATCH 012/151] Rework replication protocol This change exposes lower level functions for operating the (logical) replication protocol, while keeping the high-level start_replication function that does all the job for you in case of a synchronous connection. A number of other changes and fixes are put into this commit. --- lib/extras.py | 36 +++-- psycopg/cursor.h | 11 +- psycopg/cursor_type.c | 183 +++++++++++++++++---- psycopg/pqpath.c | 360 +++++++++++++++++++++--------------------- psycopg/pqpath.h | 3 + psycopg2.cproj | 2 + 6 files changed, 357 insertions(+), 238 deletions(-) diff --git a/lib/extras.py b/lib/extras.py index 2f32bf12..85debc68 100644 --- a/lib/extras.py +++ b/lib/extras.py @@ -471,7 +471,8 @@ class ReplicationConnection(_connection): super(ReplicationConnection, self).__init__(*args, **kwargs) # prevent auto-issued BEGIN statements - self.autocommit = True + if not self.async: + self.autocommit = True def cursor(self, *args, **kwargs): kwargs.setdefault('cursor_factory', ReplicationCursor) @@ -503,18 +504,18 @@ class ReplicationCursor(_cursor): if slot_type == REPLICATION_LOGICAL: if output_plugin is None: - raise RuntimeError("output_plugin is required for logical replication slot") + raise psycopg2.ProgrammingError("output_plugin is required for logical replication slot") command += "LOGICAL %s" % self.quote_ident(output_plugin) elif slot_type == REPLICATION_PHYSICAL: if output_plugin is not None: - raise RuntimeError("output_plugin is not applicable to physical replication") + raise psycopg2.ProgrammingError("output_plugin is not applicable to physical replication") command += "PHYSICAL" else: - raise RuntimeError("unrecognized replication slot type") + raise psycopg2.ProgrammingError("unrecognized replication slot type") self.execute(command) @@ -524,17 +525,14 @@ class ReplicationCursor(_cursor): command = "DROP_REPLICATION_SLOT %s" % self.quote_ident(slot_name) self.execute(command) - def start_replication(self, o, slot_type, slot_name=None, start_lsn=None, + def start_replication(self, slot_type, slot_name=None, writer=None, start_lsn=None, timeline=0, keepalive_interval=10, options=None): """Start and consume replication stream.""" - if keepalive_interval <= 0: - raise RuntimeError("keepalive_interval must be > 0: %d" % keepalive_interval) - command = "START_REPLICATION " if slot_type == REPLICATION_LOGICAL and slot_name is None: - raise RuntimeError("slot_name is required for logical replication slot") + raise psycopg2.ProgrammingError("slot_name is required for logical replication slot") if slot_name: command += "SLOT %s " % self.quote_ident(slot_name) @@ -544,7 +542,7 @@ class ReplicationCursor(_cursor): elif slot_type == REPLICATION_PHYSICAL: command += "PHYSICAL " else: - raise RuntimeError("unrecognized replication slot type") + raise psycopg2.ProgrammingError("unrecognized replication slot type") if start_lsn is None: start_lsn = '0/0' @@ -555,16 +553,16 @@ class ReplicationCursor(_cursor): if timeline != 0: if slot_type == REPLICATION_LOGICAL: - raise RuntimeError("cannot specify timeline for logical replication") + raise psycopg2.ProgrammingError("cannot specify timeline for logical replication") if timeline < 0: - raise RuntimeError("timeline must be >= 0: %d" % timeline) + raise psycopg2.ProgrammingError("timeline must be >= 0: %d" % timeline) command += " TIMELINE %d" % timeline if options: if slot_type == REPLICATION_PHYSICAL: - raise RuntimeError("cannot specify plugin options for physical replication") + raise psycopg2.ProgrammingError("cannot specify plugin options for physical replication") command += " (" for k,v in options.iteritems(): @@ -573,11 +571,15 @@ class ReplicationCursor(_cursor): command += "%s %s" % (self.quote_ident(k), _A(str(v))) command += ")" - return self.start_replication_expert(o, command, keepalive_interval) + return self.start_replication_expert(command, writer=writer, + keepalive_interval=keepalive_interval) - # thin wrapper - def sync_server(self, msg): - return self.replication_sync_server(msg) + def send_feedback_message(self, written_lsn=0, sync_lsn=0, apply_lsn=0, reply_requested=False): + return self.send_replication_feedback(written_lsn, sync_lsn, apply_lsn, reply_requested) + + # allows replication cursors to be used in select.select() directly + def fileno(self): + return self.connection.fileno() # a dbtype and adapter for Python UUID type diff --git a/psycopg/cursor.h b/psycopg/cursor.h index 1a630553..380abbf4 100644 --- a/psycopg/cursor.h +++ b/psycopg/cursor.h @@ -73,10 +73,13 @@ struct cursorObject { #define DEFAULT_COPYSIZE 16384 #define DEFAULT_COPYBUFF 8192 - int in_replication; /* we're in streaming replication loop */ - int stop_replication; /* client requested to stop replication */ - int keepalive_interval; /* interval for keepalive messages in replication mode */ - XLogRecPtr repl_sync_lsn; /* set when the client asks us to sync the server */ + int repl_stop; /* if client requested to stop replication */ + struct timeval repl_keepalive_interval; /* interval for keepalive messages in replication mode */ + XLogRecPtr repl_write_lsn; /* LSN stats for replication feedback messages */ + XLogRecPtr repl_flush_lsn; + XLogRecPtr repl_apply_lsn; + int repl_feedback_pending; /* flag set when we couldn't send the feedback to the server */ + struct timeval repl_last_io; /* timestamp of the last exchange with the server */ PyObject *tuple_factory; /* factory for result tuples */ PyObject *tzinfo_factory; /* factory for tzinfo objects */ diff --git a/psycopg/cursor_type.c b/psycopg/cursor_type.c index 19f82c60..9de5b085 100644 --- a/psycopg/cursor_type.c +++ b/psycopg/cursor_type.c @@ -36,9 +36,11 @@ #include "psycopg/microprotocols_proto.h" #include - #include +/* python */ +#include "datetime.h" + /** DBAPI methods **/ @@ -1581,78 +1583,182 @@ exit: } #define psyco_curs_start_replication_expert_doc \ -"start_replication_expert(file, command, keepalive_interval) -- Start and consume replication stream with direct command." +"start_replication_expert(command, writer=None, keepalive_interval=10) -- Start and consume replication stream with direct command." static PyObject * -psyco_curs_start_replication_expert(cursorObject *self, PyObject *args) +psyco_curs_start_replication_expert(cursorObject *self, PyObject *args, PyObject *kwargs) { - PyObject *file, *res = NULL; + PyObject *writer = NULL, *res = NULL; char *command; - int keepalive_interval; + double keepalive_interval = 10; + static char *kwlist[] = {"command", "writer", "keepalive_interval", NULL}; - if (!PyArg_ParseTuple(args, "O&si", - _psyco_curs_has_write_check, &file, - &command, &keepalive_interval)) { + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s|Od", kwlist, + &command, &writer, &keepalive_interval)) { return NULL; } EXC_IF_CURS_CLOSED(self); - EXC_IF_CURS_ASYNC(self, start_replication_expert); EXC_IF_GREEN(start_replication_expert); EXC_IF_TPC_PREPARED(self->conn, start_replication_expert); Dprintf("psyco_curs_start_replication_expert: command = %s", command); - self->copysize = 0; - Py_INCREF(file); - self->copyfile = file; - self->in_replication = 1; - self->keepalive_interval = keepalive_interval; - self->stop_replication = 0; - self->repl_sync_lsn = InvalidXLogRecPtr; + if (keepalive_interval < 1.0) { + psyco_set_error(ProgrammingError, self, "keepalive_interval must be >= 1sec"); + return NULL; + } - if (pq_execute(self, command, 0, 1 /* no_result */, 1 /* no_begin */) >= 0) { + self->copysize = 0; + Py_XINCREF(writer); + self->copyfile = writer; + + self->repl_stop = 0; + self->repl_keepalive_interval.tv_sec = (int)keepalive_interval; + self->repl_keepalive_interval.tv_usec = + (keepalive_interval - (int)keepalive_interval)*1.0e6; + + self->repl_write_lsn = InvalidXLogRecPtr; + self->repl_flush_lsn = InvalidXLogRecPtr; + self->repl_apply_lsn = InvalidXLogRecPtr; + self->repl_feedback_pending = 0; + + gettimeofday(&self->repl_last_io, NULL); + + if (pq_execute(self, command, self->conn->async, + 1 /* no_result */, 1 /* no_begin */) >= 0) { res = Py_None; - Py_INCREF(Py_None); + Py_INCREF(res); } Py_CLEAR(self->copyfile); - self->in_replication = 0; return res; } #define psyco_curs_stop_replication_doc \ -"start_replication() -- Set flag to break out of endless loop in start_replication()." +"stop_replication() -- Set flag to break out of endless loop in start_replication() on sync connection." static PyObject * psyco_curs_stop_replication(cursorObject *self) { EXC_IF_CURS_CLOSED(self); - if (!self->in_replication) { - PyErr_SetString(ProgrammingError, - "stop_replication() called when not in streaming replication loop"); - } else { - self->stop_replication = 1; - } + self->repl_stop = 1; Py_RETURN_NONE; } -#define psyco_curs_replication_sync_server_doc \ -"replication_sync_server(msg) -- Set flag to sync the server up to this replication message." +#define psyco_curs_read_replication_message_doc \ +"read_replication_message(decode=True) -- Try reading a replication message from the server (non-blocking)." static PyObject * -psyco_curs_replication_sync_server(cursorObject *self, PyObject *args) +psyco_curs_read_replication_message(cursorObject *self, PyObject *args, PyObject *kwargs) { - EXC_IF_CURS_CLOSED(self); + int decode = 1; + static char *kwlist[] = {"decode", NULL}; - if (!PyArg_ParseTuple(args, "K", &self->repl_sync_lsn)) { + EXC_IF_CURS_CLOSED(self); + EXC_IF_GREEN(read_replication_message); + EXC_IF_TPC_PREPARED(self->conn, read_replication_message); + + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", kwlist, + &decode)) { return NULL; } - Py_RETURN_NONE; + return pq_read_replication_message(self, decode); +} + +static PyObject * +curs_flush_replication_feedback(cursorObject *self, int reply) +{ + if (!self->repl_feedback_pending) + Py_RETURN_FALSE; + + if (pq_send_replication_feedback(self, reply)) { + self->repl_feedback_pending = 0; + Py_RETURN_TRUE; + } else { + self->repl_feedback_pending = 1; + Py_RETURN_FALSE; + } +} + +#define psyco_curs_send_replication_feedback_doc \ +"send_replication_feedback(write_lsn=0, flush_lsn=0, apply_lsn=0, reply=False) -- Try sending a replication feedback message to the server and optionally request a reply." + +static PyObject * +psyco_curs_send_replication_feedback(cursorObject *self, PyObject *args, PyObject *kwargs) +{ + XLogRecPtr write_lsn = InvalidXLogRecPtr, + flush_lsn = InvalidXLogRecPtr, + apply_lsn = InvalidXLogRecPtr; + int reply = 0; + static char* kwlist[] = {"write_lsn", "flush_lsn", "apply_lsn", "reply", NULL}; + + EXC_IF_CURS_CLOSED(self); + + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|KKKi", kwlist, + &write_lsn, &flush_lsn, &apply_lsn, &reply)) { + return NULL; + } + + if (write_lsn > self->repl_write_lsn) + self->repl_write_lsn = write_lsn; + + if (flush_lsn > self->repl_flush_lsn) + self->repl_flush_lsn = flush_lsn; + + if (apply_lsn > self->repl_apply_lsn) + self->repl_apply_lsn = apply_lsn; + + self->repl_feedback_pending = 1; + + return curs_flush_replication_feedback(self, reply); +} + +#define psyco_curs_flush_replication_feedback_doc \ +"flush_replication_feedback(reply=False) -- Try flushing the latest pending replication feedback message to the server and optionally request a reply." + +static PyObject * +psyco_curs_flush_replication_feedback(cursorObject *self, PyObject *args, PyObject *kwargs) +{ + int reply = 0; + static char *kwlist[] = {"reply", NULL}; + + EXC_IF_CURS_CLOSED(self); + + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", kwlist, + &reply)) { + return NULL; + } + + return curs_flush_replication_feedback(self, reply); +} + +#define psyco_curs_replication_io_timestamp_doc \ +"replication_io_timestamp -- the timestamp of latest IO with the server" + +static PyObject * +psyco_curs_get_replication_io_timestamp(cursorObject *self) +{ + PyObject *tval, *res = NULL; + double seconds; + + EXC_IF_CURS_CLOSED(self); + + // TODO: move to a one-call init function + PyDateTime_IMPORT; + + seconds = self->repl_last_io.tv_sec + self->repl_last_io.tv_usec / 1.0e6; + + tval = Py_BuildValue("(d)", seconds); + if (tval) { + res = PyDateTime_FromTimestamp(tval); + Py_DECREF(tval); + } + return res; } /* extension: closed - return true if cursor is closed */ @@ -1830,11 +1936,15 @@ static struct PyMethodDef cursorObject_methods[] = { {"copy_expert", (PyCFunction)psyco_curs_copy_expert, METH_VARARGS|METH_KEYWORDS, psyco_curs_copy_expert_doc}, {"start_replication_expert", (PyCFunction)psyco_curs_start_replication_expert, - METH_VARARGS, psyco_curs_start_replication_expert_doc}, + METH_VARARGS|METH_KEYWORDS, psyco_curs_start_replication_expert_doc}, {"stop_replication", (PyCFunction)psyco_curs_stop_replication, METH_NOARGS, psyco_curs_stop_replication_doc}, - {"replication_sync_server", (PyCFunction)psyco_curs_replication_sync_server, - METH_VARARGS, psyco_curs_replication_sync_server_doc}, + {"read_replication_message", (PyCFunction)psyco_curs_read_replication_message, + METH_VARARGS|METH_KEYWORDS, psyco_curs_read_replication_message_doc}, + {"send_replication_feedback", (PyCFunction)psyco_curs_send_replication_feedback, + METH_VARARGS|METH_KEYWORDS, psyco_curs_send_replication_feedback_doc}, + {"flush_replication_feedback", (PyCFunction)psyco_curs_flush_replication_feedback, + METH_VARARGS|METH_KEYWORDS, psyco_curs_flush_replication_feedback_doc}, {NULL} }; @@ -1885,6 +1995,9 @@ static struct PyGetSetDef cursorObject_getsets[] = { (getter)psyco_curs_scrollable_get, (setter)psyco_curs_scrollable_set, psyco_curs_scrollable_doc, NULL }, + { "replication_io_timestamp", + (getter)psyco_curs_get_replication_io_timestamp, NULL, + psyco_curs_replication_io_timestamp_doc, NULL }, {NULL} }; diff --git a/psycopg/pqpath.c b/psycopg/pqpath.c index 7ce06a86..03d928cf 100644 --- a/psycopg/pqpath.c +++ b/psycopg/pqpath.c @@ -1062,6 +1062,9 @@ pq_get_last_result(connectionObject *conn) PQclear(result); } result = res; + if (PQresultStatus(result) == PGRES_COPY_BOTH) { + break; + } } return result; @@ -1522,32 +1525,151 @@ exit: return ret; } -static int -sendFeedback(PGconn *conn, XLogRecPtr written_lsn, XLogRecPtr fsync_lsn, - int replyRequested) +/* ignores keepalive messages */ +PyObject * +pq_read_replication_message(cursorObject *curs, int decode) +{ + char *buffer = NULL; + int len, hdr, reply; + XLogRecPtr data_start, wal_end; + pg_int64 send_time; + PyObject *str = NULL, *msg = NULL; + + Dprintf("pq_read_replication_message(decode=%d)", decode); + +retry: + if (!PQconsumeInput(curs->conn->pgconn)) { + goto none; + } + + Py_BEGIN_ALLOW_THREADS; + len = PQgetCopyData(curs->conn->pgconn, &buffer, 1 /* async */); + Py_END_ALLOW_THREADS; + if (len == 0) { + goto none; + } + + if (len == -2) { + pq_raise(curs->conn, curs, NULL); + goto exit; + } + if (len == -1) { + curs->pgres = PQgetResult(curs->conn->pgconn); + + if (curs->pgres && PQresultStatus(curs->pgres) == PGRES_FATAL_ERROR) { + pq_raise(curs->conn, curs, NULL); + goto exit; + } + + CLEARPGRES(curs->pgres); + goto none; + } + + /* ok, we did really read something: update the io timestamp */ + gettimeofday(&curs->repl_last_io, NULL); + + Dprintf("pq_read_replication_message: msg=%c, len=%d", buffer[0], len); + if (buffer[0] == 'w') { + /* msgtype(1), dataStart(8), walEnd(8), sendTime(8) */ + hdr = 1 + 8 + 8 + 8; + if (len < hdr + 1) { + psyco_set_error(OperationalError, curs, "data message header too small"); + goto exit; + } + + data_start = fe_recvint64(buffer + 1); + wal_end = fe_recvint64(buffer + 1 + 8); + send_time = fe_recvint64(buffer + 1 + 8 + 8); + + Dprintf("pq_read_replication_message: data_start="XLOGFMTSTR", wal_end="XLOGFMTSTR, + XLOGFMTARGS(data_start), XLOGFMTARGS(wal_end)); + + Dprintf("pq_read_replication_message: >>%.*s<<", len - hdr, buffer + hdr); + + if (decode) { + str = PyUnicode_Decode(buffer + hdr, len - hdr, curs->conn->codec, NULL); + } else { + str = Bytes_FromStringAndSize(buffer + hdr, len - hdr); + } + if (!str) { goto exit; } + + msg = PyObject_CallFunctionObjArgs((PyObject *)&replicationMessageType, + curs, str, NULL); + Py_DECREF(str); + if (!msg) { goto exit; } + + ((replicationMessageObject *)msg)->data_start = data_start; + ((replicationMessageObject *)msg)->wal_end = wal_end; + ((replicationMessageObject *)msg)->send_time = send_time; + } + else if (buffer[0] == 'k') { + /* msgtype(1), walEnd(8), sendTime(8), reply(1) */ + hdr = 1 + 8 + 8; + if (len < hdr + 1) { + psyco_set_error(OperationalError, curs, "keepalive message header too small"); + goto exit; + } + + reply = buffer[hdr]; + if (reply) { + if (!pq_send_replication_feedback(curs, 0)) { + if (curs->conn->async) { + curs->repl_feedback_pending = 1; + } else { + pq_raise(curs->conn, curs, NULL); + goto exit; + } + } + else { + gettimeofday(&curs->repl_last_io, NULL); + } + } + + PQfreemem(buffer); + buffer = NULL; + goto retry; + } + else { + psyco_set_error(OperationalError, curs, "unrecognized replication message type"); + goto exit; + } + +exit: + if (buffer) { + PQfreemem(buffer); + } + + return msg; + +none: + msg = Py_None; + Py_INCREF(msg); + goto exit; +} + +int +pq_send_replication_feedback(cursorObject* curs, int reply_requested) { char replybuf[1 + 8 + 8 + 8 + 8 + 1]; int len = 0; - Dprintf("_pq_copy_both_v3: confirming write up to "XLOGFMTSTR", flush to "XLOGFMTSTR, - XLOGFMTARGS(written_lsn), XLOGFMTARGS(fsync_lsn)); + Dprintf("pq_send_replication_feedback: write="XLOGFMTSTR", flush="XLOGFMTSTR", apply="XLOGFMTSTR, + XLOGFMTARGS(curs->repl_write_lsn), + XLOGFMTARGS(curs->repl_flush_lsn), + XLOGFMTARGS(curs->repl_apply_lsn)); - replybuf[len] = 'r'; - len += 1; - fe_sendint64(written_lsn, &replybuf[len]); /* write */ - len += 8; - fe_sendint64(fsync_lsn, &replybuf[len]); /* flush */ - len += 8; - fe_sendint64(InvalidXLogRecPtr, &replybuf[len]); /* apply */ - len += 8; - fe_sendint64(feGetCurrentTimestamp(), &replybuf[len]); /* sendTime */ - len += 8; - replybuf[len] = replyRequested ? 1 : 0; /* replyRequested */ - len += 1; + replybuf[len] = 'r'; len += 1; + fe_sendint64(curs->repl_write_lsn, &replybuf[len]); len += 8; + fe_sendint64(curs->repl_flush_lsn, &replybuf[len]); len += 8; + fe_sendint64(curs->repl_apply_lsn, &replybuf[len]); len += 8; + fe_sendint64(feGetCurrentTimestamp(), &replybuf[len]); len += 8; + replybuf[len] = reply_requested ? 1 : 0; len += 1; - if (PQputCopyData(conn, replybuf, len) <= 0 || PQflush(conn)) { + if (PQputCopyData(curs->conn->pgconn, replybuf, len) <= 0 || + PQflush(curs->conn->pgconn) != 0) { return 0; } + gettimeofday(&curs->repl_last_io, NULL); return 1; } @@ -1556,33 +1678,19 @@ sendFeedback(PGconn *conn, XLogRecPtr written_lsn, XLogRecPtr fsync_lsn, static int _pq_copy_both_v3(cursorObject *curs) { - PyObject *tmp = NULL; + PyObject *msg, *tmp = NULL; PyObject *write_func = NULL; - PyObject *obj = NULL; - replicationMessageObject *msg = NULL; int ret = -1; int is_text; - PGconn *conn; - char *buffer = NULL; + PGconn *pgconn; fd_set fds; - struct timeval last_comm, curr_time, ping_time, time_diff; - int len, hdr, reply, sel; - - XLogRecPtr written_lsn = InvalidXLogRecPtr, - fsync_lsn = InvalidXLogRecPtr, - data_start, wal_end; - pg_int64 send_time; + struct timeval curr_time, ping_time, time_diff; + int sel; if (!curs->copyfile) { - PyErr_SetString(ProgrammingError, - "can't execute START_REPLICATION: use the start_replication() method instead"); - goto exit; - } - - if (curs->keepalive_interval <= 0) { - PyErr_Format(PyExc_RuntimeError, "keepalive_interval must be > 0: %d", - curs->keepalive_interval); + psyco_set_error(ProgrammingError, curs, + "can't execute START_REPLICATION directly: use the start_replication() method instead"); goto exit; } @@ -1597,31 +1705,29 @@ _pq_copy_both_v3(cursorObject *curs) } CLEARPGRES(curs->pgres); - - /* timestamp of last communication with the server */ - gettimeofday(&last_comm, NULL); - - conn = curs->conn->pgconn; + pgconn = curs->conn->pgconn; while (1) { - len = PQgetCopyData(conn, &buffer, 1 /* async! */); - if (len < 0) { - break; + msg = pq_read_replication_message(curs, is_text); + if (!msg) { + goto exit; } - if (len == 0) { - FD_ZERO(&fds); - FD_SET(PQsocket(conn), &fds); + else if (msg == Py_None) { + Py_DECREF(msg); + + FD_ZERO(&fds); + FD_SET(PQsocket(pgconn), &fds); - /* set up timeout according to keepalive_interval, but no less than 1 second */ gettimeofday(&curr_time, NULL); - ping_time = last_comm; - ping_time.tv_sec += curs->keepalive_interval; + ping_time = curs->repl_last_io; + ping_time.tv_sec += curs->repl_keepalive_interval.tv_sec; + ping_time.tv_usec += curs->repl_keepalive_interval.tv_usec; timersub(&ping_time, &curr_time, &time_diff); if (time_diff.tv_sec > 0) { Py_BEGIN_ALLOW_THREADS; - sel = select(PQsocket(conn) + 1, &fds, NULL, NULL, &time_diff); + sel = select(PQsocket(pgconn) + 1, &fds, NULL, NULL, &time_diff); Py_END_ALLOW_THREADS; } else { @@ -1639,148 +1745,34 @@ _pq_copy_both_v3(cursorObject *curs) continue; } - if (sel > 0) { - if (!PQconsumeInput(conn)) { - Dprintf("_pq_copy_both_v3: PQconsumeInput failed"); + if (sel == 0) { + if (!pq_send_replication_feedback(curs, 0)) { pq_raise(curs->conn, curs, NULL); goto exit; } } - else { /* timeout */ - if (!sendFeedback(conn, written_lsn, fsync_lsn, 0)) { - pq_raise(curs->conn, curs, NULL); - goto exit; - } - } - gettimeofday(&last_comm, NULL); continue; } - if (len > 0 && buffer) { - gettimeofday(&last_comm, NULL); + else { + tmp = PyObject_CallFunctionObjArgs(write_func, msg, NULL); + Py_DECREF(msg); - Dprintf("_pq_copy_both_v3: msg=%c, len=%d", buffer[0], len); - if (buffer[0] == 'w') { - /* msgtype(1), dataStart(8), walEnd(8), sendTime(8) */ - hdr = 1 + 8 + 8 + 8; - if (len < hdr + 1) { - PyErr_Format(PyExc_RuntimeError, - "streaming header too small in data message: %d", len); - goto exit; - } - - data_start = fe_recvint64(buffer + 1); - wal_end = fe_recvint64(buffer + 1 + 8); - send_time = fe_recvint64(buffer + 1 + 8 + 8); - - Dprintf("_pq_copy_both_v3: data_start="XLOGFMTSTR", wal_end="XLOGFMTSTR", send_time=%lld", - XLOGFMTARGS(data_start), XLOGFMTARGS(wal_end), send_time); - - if (is_text) { - obj = PyUnicode_Decode(buffer + hdr, len - hdr, curs->conn->codec, NULL); - } - else { - obj = Bytes_FromStringAndSize(buffer + hdr, len - hdr); - } - if (!obj) { goto exit; } - - msg = (replicationMessageObject *) - PyObject_CallFunctionObjArgs((PyObject *)&replicationMessageType, - curs, obj, NULL); - Py_DECREF(obj); - if (!msg) { goto exit; } - - msg->data_start = data_start; - msg->wal_end = wal_end; - msg->send_time = send_time; - - tmp = PyObject_CallFunctionObjArgs(write_func, msg, NULL); - - if (tmp == NULL) { - Dprintf("_pq_copy_both_v3: write_func returned NULL"); - goto exit; - } - Py_DECREF(tmp); - - /* update the LSN position we've written up to */ - if (written_lsn < wal_end) - written_lsn = wal_end; - - /* if requested by sync_server(msg), we confirm LSN with the server */ - if (curs->repl_sync_lsn != InvalidXLogRecPtr) { - Dprintf("_pq_copy_both_v3: server sync requested at "XLOGFMTSTR, - XLOGFMTARGS(curs->repl_sync_lsn)); - - if (fsync_lsn < curs->repl_sync_lsn) - fsync_lsn = curs->repl_sync_lsn; - - curs->repl_sync_lsn = InvalidXLogRecPtr; - - if (!sendFeedback(conn, written_lsn, fsync_lsn, 0)) { - pq_raise(curs->conn, curs, NULL); - goto exit; - } - gettimeofday(&last_comm, NULL); - } - - if (curs->stop_replication) { - Dprintf("_pq_copy_both_v3: stop_replication flag set by write_func"); - break; - } - - Py_DECREF(msg); - msg = NULL; - } - else if (buffer[0] == 'k') { - /* msgtype(1), walEnd(8), sendTime(8), reply(1) */ - hdr = 1 + 8 + 8; - if (len < hdr + 1) { - PyErr_Format(PyExc_RuntimeError, - "streaming header too small in keepalive message: %d", len); - goto exit; - } - - reply = buffer[hdr]; - if (reply) { - if (!sendFeedback(conn, written_lsn, fsync_lsn, 0)) { - pq_raise(curs->conn, curs, NULL); - goto exit; - } - gettimeofday(&last_comm, NULL); - } - } - else { - PyErr_Format(PyExc_RuntimeError, - "unrecognized streaming message type: \"%c\"", buffer[0]); + if (tmp == NULL) { + Dprintf("_pq_copy_both_v3: write_func returned NULL"); goto exit; } + Py_DECREF(tmp); - /* buffer is allocated on every PQgetCopyData() call */ - PQfreemem(buffer); - buffer = NULL; + if (curs->repl_stop) { + Dprintf("_pq_copy_both_v3: repl_stop flag set by write_func"); + break; + } } } - if (len == -2) { - pq_raise(curs->conn, curs, NULL); - goto exit; - } - if (len == -1) { - curs->pgres = PQgetResult(curs->conn->pgconn); - - if (curs->pgres && PQresultStatus(curs->pgres) == PGRES_FATAL_ERROR) - pq_raise(curs->conn, curs, NULL); - - CLEARPGRES(curs->pgres); - } - ret = 1; exit: - if (buffer) { - PQfreemem(buffer); - } - - Py_XDECREF(msg); Py_XDECREF(write_func); return ret; } @@ -1847,9 +1839,13 @@ pq_fetch(cursorObject *curs, int no_result) case PGRES_COPY_BOTH: Dprintf("pq_fetch: data from a streaming replication slot (no tuples)"); curs->rowcount = -1; - ex = _pq_copy_both_v3(curs); - /* error caught by out glorious notice handler */ - if (PyErr_Occurred()) ex = -1; + if (curs->conn->async) { + ex = 0; + } else { + ex = _pq_copy_both_v3(curs); + /* error caught by out glorious notice handler */ + if (PyErr_Occurred()) ex = -1; + } CLEARPGRES(curs->pgres); break; diff --git a/psycopg/pqpath.h b/psycopg/pqpath.h index bd3293f8..9a348bc2 100644 --- a/psycopg/pqpath.h +++ b/psycopg/pqpath.h @@ -72,4 +72,7 @@ HIDDEN int pq_execute_command_locked(connectionObject *conn, RAISES HIDDEN void pq_complete_error(connectionObject *conn, PGresult **pgres, char **error); +HIDDEN PyObject *pq_read_replication_message(cursorObject *curs, int decode); +HIDDEN int pq_send_replication_feedback(cursorObject *curs, int reply_requested); + #endif /* !defined(PSYCOPG_PQPATH_H) */ diff --git a/psycopg2.cproj b/psycopg2.cproj index 18b9727f..386287c1 100644 --- a/psycopg2.cproj +++ b/psycopg2.cproj @@ -92,6 +92,7 @@ + @@ -224,6 +225,7 @@ + From 318706f28c07444c1a73a3022eab2018ec73817c Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Tue, 30 Jun 2015 16:17:31 +0200 Subject: [PATCH 013/151] Update docs for Replication protocol --- doc/src/extras.rst | 199 +++++++++++++++++++++++++++++++++++++-------- 1 file changed, 165 insertions(+), 34 deletions(-) diff --git a/doc/src/extras.rst b/doc/src/extras.rst index 9bc302e2..7cca8400 100644 --- a/doc/src/extras.rst +++ b/doc/src/extras.rst @@ -165,8 +165,8 @@ Replication cursor .. method:: identify_system() - Get information about the cluster status in form of a dict with - ``systemid``, ``timeline``, ``xlogpos`` and ``dbname`` as keys. + This method executes ``IDENTIFY_SYSTEM`` command of the streaming + replication protocol and returns a result as a dictionary. Example:: @@ -197,65 +197,196 @@ Replication cursor cur.drop_replication_slot("testslot") - .. method:: start_replication(file, slot_type, slot_name=None, start_lsn=None, timeline=0, keepalive_interval=10, options=None) + .. method:: start_replication(slot_type, slot_name=None, writer=None, start_lsn=None, timeline=0, keepalive_interval=10, options=None) Start and consume replication stream. - :param file: a file-like object to write replication stream messages to :param slot_type: type of replication: either `REPLICATION_PHYSICAL` or `REPLICATION_LOGICAL` :param slot_name: name of the replication slot to use (required for logical replication) + :param writer: a file-like object to write replication messages to :param start_lsn: the point in replication stream (WAL position) to start from, in the form ``XXX/XXX`` (forward-slash separated pair of hexadecimals) :param timeline: WAL history timeline to start streaming from (optional, can only be used with physical replication) :param keepalive_interval: interval (in seconds) to send keepalive - messages to the server, in case there was no - communication during that period of time + messages to the server :param options: an dictionary of options to pass to logical replication slot - The ``keepalive_interval`` must be greater than zero. + With non-asynchronous connection, this method enters an endless loop, + reading messages from the server and passing them to ``write()`` method + of the *writer* object. This is similar to operation of the + `~cursor.copy_to()` method. It also sends keepalive messages to the + server, in case there were no new data from it for the duration of + *keepalive_interval* seconds (this parameter must be greater than 1 + second, but it can have a fractional part). - This method never returns unless an error message is sent from the - server, or the server closes connection, or there is an exception in the - ``write()`` method of the ``file`` object. + With asynchronous connection, this method returns immediately and the + calling code can start reading the replication messages in a loop. - One can even use ``sys.stdout`` as the destination (this is only good for - testing purposes, however):: + A sketch implementation of the *writer* object might look similar to + the following:: - >>> cur.start_replication(sys.stdout, "testslot") - ... + from io import TextIOBase - This method acts much like the `~cursor.copy_to()` with an important - distinction that ``write()`` method return value is dirving the - server-side replication cursor. In order to report to the server that - the all the messages up to the current one have been stored reliably, one - should return true value (i.e. something that satisfies ``if retval:`` - conidtion) from the ``write`` callback:: + class ReplicationStreamWriter(TextIOBase): - class ReplicationStreamWriter(object): def write(self, msg): - if store_message_reliably(msg): - return True + self.store_data_reliably(msg) - cur.start_replication(writer, "testslot") - ... + if self.should_report_to_the_server(msg): + msg.cursor.send_replication_feedback(flush_lsn=msg.wal_end) + + def store_data_reliably(self, msg): + ... + + def shoud_report_to_the_server(self, msg): + ... + + First, like with the `~cursor.copy_to()` method, the code that is + calling the provided write method checks if the *writer* object is + inherited from `~io.TextIOBase`. If that is the case, the message + payload to be passed is converted to unicode using the connection's + encoding information. Otherwise, the message is passed as is. + + The *msg* object being passed is an instance of `~ReplicationMessage` + class. + + After storing the data passed in the message object, the writer object + should consider sending a confirmation message to the server. This is + done by calling `~send_replication_feedback()` method on the + corresponding replication cursor. A reference to the cursor producing + a given message is provided in the `~ReplicationMessage` as an + attribute. .. note:: - One needs to be aware that failure to update the server-side cursor - on any one replication slot properly by constantly consuming and - reporting success to the server can eventually lead to "disk full" - condition on the server, because the server retains all the WAL - segments that might be needed to stream the changes via currently - open replication slots. + One needs to be aware that failure to properly notify the server on + any one replication slot by constantly consuming and reporting + success to the server at appropriate times can eventually lead to + "disk full" condition on the server, because the server retains all + the WAL segments that might be needed to stream the changes via + currently open replication slots. + + .. method:: stop_replication() + + In non-asynchronous connection, when called from the ``write()`` + method tells the code in `~start_replication` to break out of the + endless loop and return. + + .. method:: send_replication_feedback(write_lsn=0, flush_lsn=0, apply_lsn=0, reply=False) + + :param write_lsn: a LSN position up to which the client has written the data locally + :param flush_lsn: a LSN position up to which the client has stored the + data reliably (the server is allowed to discard all + and every data that predates this LSN) + :param apply_lsn: a LSN position up to which the warm standby server + has applied the changes (physical replication + master-slave protocol only) + :param reply: request the server to send back a keepalive message immediately + + Use this method to report to the server that all messages up to a + certain LSN position have been stored and may be discarded. + + This method can also be called with default parameters to send a + keepalive message to the server. + + In case the message cannot be sent at the moment, remembers the + positions for a later successful call or call to + `~flush_replication_feedback()`. + + .. method:: flush_replication_feedback(reply=False) + + :param reply: request the server to send back a keepalive message immediately + + This method tries to flush the latest replication feedback message + that `~send_replication_feedback()` was trying to send, if any. + + Low-level methods for asynchronous connection operation. + + While with the non-asynchronous connection, a single call to + `~start_replication()` handles all the complexity, at times it might be + beneficial to use low-level interface for better control, in particular to + `~select.select()` on multiple sockets. The following methods are + provided for asynchronous operation: + + .. method:: read_replication_message(decode=True) + + :param decode: a flag indicating that unicode conversion should be + performed on the data received from the server + + This method should be used in a loop with asynchronous connections + after calling `~start_replication()`. + + It tries to read the next message from the server, without blocking + and returns an instance of `~ReplicationMessage` or *None*, in case + there are no more data messages from the server at the moment. After + receiving a *None* value from this method, one should use a + `~select.select()` or `~select.poll()` on the corresponding connection + to block the process until there is more data from the server. + + The server can send keepalive messages to the client periodically. + Such messages are silently consumed by this method and are never + reported to the caller. + + .. method:: fileno() + + Calls the corresponding connection's `~connection.fileno()` method + and returns the result. + + This is a convenience method which allows replication cursor to be + used directly in `~select.select()` or `~select.poll()` calls. + + .. attribute:: replication_io_timestamp + + A `~datetime` object representing the timestamp at the moment of last + communication with the server (a data or keepalive message in either + direction). + + An actual example of asynchronous operation might look like this:: + + keepalive_interval = 10.0 + while True: + if (datetime.now() - cur.replication_io_timestamp).total_seconds() >= keepalive_interval: + cur.send_replication_feedback() + + while True: + msg = cur.read_replication_message() + if not msg: + break + writer.write(msg) + + timeout = keepalive_interval - (datetime.now() - cur.replication_io_timestamp).total_seconds() + if timeout > 0: + select.select([cur], [], [], timeout) + +.. autoclass:: ReplicationMessage + + .. attribute:: payload + + The actual data received from the server. An instance of either + ``str`` or ``unicode``. + + .. attribute:: data_start + + LSN position of the start of the message. + + .. attribute:: wal_end + + LSN position of the end of the message. + + .. attribute:: send_time + + A `~datetime` object representing the server timestamp at the moment + when the message was sent. + + .. attribute:: cursor + + A reference to the corresponding `~ReplicationCursor` object. - Drop any open replication slots that are no longer being used. The - list of open slots can be obtained by running a query like ``SELECT * - FROM pg_replication_slots``. .. data:: REPLICATION_PHYSICAL From 0d731aa12e6d9a59e61cebe9c0a7d71025f000f8 Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Tue, 30 Jun 2015 16:34:17 +0200 Subject: [PATCH 014/151] Comment on special handling of PGRES_COPY_BOTH --- psycopg/pqpath.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/psycopg/pqpath.c b/psycopg/pqpath.c index 03d928cf..04789d35 100644 --- a/psycopg/pqpath.c +++ b/psycopg/pqpath.c @@ -1062,6 +1062,10 @@ pq_get_last_result(connectionObject *conn) PQclear(result); } result = res; + + /* After entering copy both mode, libpq will make a phony + * PGresult for us every time we query for it, so we need to + * break out of this endless loop. */ if (PQresultStatus(result) == PGRES_COPY_BOTH) { break; } From 9386653d721229eae3f9e691a93d711575d2e5c6 Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Wed, 1 Jul 2015 14:08:32 +0200 Subject: [PATCH 015/151] Update docs on ReplicationCursor --- doc/src/extras.rst | 117 +++++++++++++++++++++++---------------------- 1 file changed, 60 insertions(+), 57 deletions(-) diff --git a/doc/src/extras.rst b/doc/src/extras.rst index 7cca8400..19c81523 100644 --- a/doc/src/extras.rst +++ b/doc/src/extras.rst @@ -165,12 +165,12 @@ Replication cursor .. method:: identify_system() - This method executes ``IDENTIFY_SYSTEM`` command of the streaming - replication protocol and returns a result as a dictionary. + Execute ``IDENTIFY_SYSTEM`` command of the streaming replication + protocol and return the result as a dictionary. Example:: - >>> print cur.identify_system() + >>> cur.identify_system() {'timeline': 1, 'systemid': '1234567890123456789', 'dbname': 'test', 'xlogpos': '0/1ABCDEF'} .. method:: create_replication_slot(slot_type, slot_name, output_plugin=None) @@ -199,82 +199,81 @@ Replication cursor .. method:: start_replication(slot_type, slot_name=None, writer=None, start_lsn=None, timeline=0, keepalive_interval=10, options=None) - Start and consume replication stream. + Start a replication stream. On non-asynchronous connection, also + consume the stream messages. :param slot_type: type of replication: either `REPLICATION_PHYSICAL` or `REPLICATION_LOGICAL` :param slot_name: name of the replication slot to use (required for logical replication) :param writer: a file-like object to write replication messages to - :param start_lsn: the point in replication stream (WAL position) to start - from, in the form ``XXX/XXX`` (forward-slash separated - pair of hexadecimals) + :param start_lsn: the LSN position to start from, in the form + ``XXX/XXX`` (forward-slash separated pair of + hexadecimals) :param timeline: WAL history timeline to start streaming from (optional, can only be used with physical replication) :param keepalive_interval: interval (in seconds) to send keepalive messages to the server - :param options: an dictionary of options to pass to logical replication + :param options: a dictionary of options to pass to logical replication slot - With non-asynchronous connection, this method enters an endless loop, - reading messages from the server and passing them to ``write()`` method - of the *writer* object. This is similar to operation of the + When used on non-asynchronous connection this method enters an endless + loop, reading messages from the server and passing them to ``write()`` + method of the *writer* object. This is similar to operation of the `~cursor.copy_to()` method. It also sends keepalive messages to the server, in case there were no new data from it for the duration of - *keepalive_interval* seconds (this parameter must be greater than 1 - second, but it can have a fractional part). + *keepalive_interval* seconds (this parameter's value must be equal to + at least than 1 second, but it can have a fractional part). With asynchronous connection, this method returns immediately and the calling code can start reading the replication messages in a loop. - A sketch implementation of the *writer* object might look similar to - the following:: + A sketch implementation of the *writer* object for logical replication + might look similar to the following:: from io import TextIOBase - class ReplicationStreamWriter(TextIOBase): + class LogicalStreamWriter(TextIOBase): def write(self, msg): - self.store_data_reliably(msg) + self.store_message_data(msg.payload) - if self.should_report_to_the_server(msg): + if self.should_report_to_the_server_now(msg): msg.cursor.send_replication_feedback(flush_lsn=msg.wal_end) - def store_data_reliably(self, msg): - ... - - def shoud_report_to_the_server(self, msg): - ... - - First, like with the `~cursor.copy_to()` method, the code that is - calling the provided write method checks if the *writer* object is + First, like with the `~cursor.copy_to()` method, the code that calls + the provided ``write()`` method checks if the *writer* object is inherited from `~io.TextIOBase`. If that is the case, the message payload to be passed is converted to unicode using the connection's - encoding information. Otherwise, the message is passed as is. + `~connection.encoding` information. Otherwise, the message is passed + as is. The *msg* object being passed is an instance of `~ReplicationMessage` class. - After storing the data passed in the message object, the writer object - should consider sending a confirmation message to the server. This is - done by calling `~send_replication_feedback()` method on the - corresponding replication cursor. A reference to the cursor producing - a given message is provided in the `~ReplicationMessage` as an - attribute. + After storing certain amount of messages' data reliably, the client + should send a confirmation message to the server. This should be done + by calling `~send_replication_feedback()` method on the corresponding + replication cursor. A reference to the cursor is provided in the + `~ReplicationMessage` as an attribute. - .. note:: + .. warning:: - One needs to be aware that failure to properly notify the server on - any one replication slot by constantly consuming and reporting - success to the server at appropriate times can eventually lead to - "disk full" condition on the server, because the server retains all - the WAL segments that might be needed to stream the changes via - currently open replication slots. + Failure to properly notify the server by constantly consuming and + reporting success at appropriate times can eventually lead to "disk + full" condition on the server, because the server retains all the + WAL segments that might be needed to stream the changes via all of + the currently open replication slots. + + On the other hand, it is not recommended to send a confirmation + after every processed message, since that will put an unnecessary + load on network and the server. A possible strategy is to confirm + after every COMMIT message. .. method:: stop_replication() In non-asynchronous connection, when called from the ``write()`` - method tells the code in `~start_replication` to break out of the + method, tell the code in `~start_replication` to break out of the endless loop and return. .. method:: send_replication_feedback(write_lsn=0, flush_lsn=0, apply_lsn=0, reply=False) @@ -291,12 +290,12 @@ Replication cursor Use this method to report to the server that all messages up to a certain LSN position have been stored and may be discarded. - This method can also be called with default parameters to send a - keepalive message to the server. + This method can also be called with all default parameters' values to + send a keepalive message to the server. - In case the message cannot be sent at the moment, remembers the - positions for a later successful call or call to - `~flush_replication_feedback()`. + In case of asynchronous connection, if the feedback message cannot be + sent at the moment, remembers the passed LSN positions for a later + hopefully successful call or call to `~flush_replication_feedback()`. .. method:: flush_replication_feedback(reply=False) @@ -307,10 +306,10 @@ Replication cursor Low-level methods for asynchronous connection operation. - While with the non-asynchronous connection, a single call to - `~start_replication()` handles all the complexity, at times it might be - beneficial to use low-level interface for better control, in particular to - `~select.select()` on multiple sockets. The following methods are + With the non-asynchronous connection, a single call to + `~start_replication()` handles all the complexity, but at times it might + be beneficial to use low-level interface for better control, in particular + to `~select.select()` on multiple sockets. The following methods are provided for asynchronous operation: .. method:: read_replication_message(decode=True) @@ -319,14 +318,18 @@ Replication cursor performed on the data received from the server This method should be used in a loop with asynchronous connections - after calling `~start_replication()`. + after calling `~start_replication()` once. It tries to read the next message from the server, without blocking and returns an instance of `~ReplicationMessage` or *None*, in case - there are no more data messages from the server at the moment. After - receiving a *None* value from this method, one should use a - `~select.select()` or `~select.poll()` on the corresponding connection - to block the process until there is more data from the server. + there are no more data messages from the server at the moment. + + It is expected that the calling code will call this method repeatedly + in order to consume all of the messages that might have been buffered, + until *None* is returned. After receiving a *None* value from this + method, one might use `~select.select()` or `~select.poll()` on the + corresponding connection to block the process until there is more data + from the server. The server can send keepalive messages to the client periodically. Such messages are silently consumed by this method and are never @@ -334,8 +337,8 @@ Replication cursor .. method:: fileno() - Calls the corresponding connection's `~connection.fileno()` method - and returns the result. + Call the corresponding connection's `~connection.fileno()` method and + return the result. This is a convenience method which allows replication cursor to be used directly in `~select.select()` or `~select.poll()` calls. From dab41c699a3e20a3577ad52529d879741185df13 Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Thu, 2 Jul 2015 14:34:09 +0200 Subject: [PATCH 016/151] Fix PQconsumeInput usage. Only call when no data is available in the internal buffer. --- psycopg/pqpath.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/psycopg/pqpath.c b/psycopg/pqpath.c index 04789d35..ed8b37f3 100644 --- a/psycopg/pqpath.c +++ b/psycopg/pqpath.c @@ -1542,14 +1542,19 @@ pq_read_replication_message(cursorObject *curs, int decode) Dprintf("pq_read_replication_message(decode=%d)", decode); retry: - if (!PQconsumeInput(curs->conn->pgconn)) { - goto none; - } - Py_BEGIN_ALLOW_THREADS; len = PQgetCopyData(curs->conn->pgconn, &buffer, 1 /* async */); Py_END_ALLOW_THREADS; + if (len == 0) { + /* We should only try reading more data into the internal buffer when + * there is nothing available at the moment. Otherwise, with a really + * highly loaded server we might be reading a number of messages for + * every single one we process, thus overgrowing the internal buffer + * until the system runs out of memory. */ + if (PQconsumeInput(curs->conn->pgconn)) { + goto retry; + } goto none; } From 9c1f2acf3e3608ba0d13b0b3c3d01b68f2a29d90 Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Thu, 2 Jul 2015 14:39:51 +0200 Subject: [PATCH 017/151] Check return value of PQsocket When connection is closed by the server, we might get -1 there. --- psycopg/pqpath.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/psycopg/pqpath.c b/psycopg/pqpath.c index ed8b37f3..e550d796 100644 --- a/psycopg/pqpath.c +++ b/psycopg/pqpath.c @@ -1689,13 +1689,10 @@ _pq_copy_both_v3(cursorObject *curs) { PyObject *msg, *tmp = NULL; PyObject *write_func = NULL; - int ret = -1; - int is_text; - + int is_text, fd, sel, ret = -1; PGconn *pgconn; fd_set fds; struct timeval curr_time, ping_time, time_diff; - int sel; if (!curs->copyfile) { psyco_set_error(ProgrammingError, curs, @@ -1724,8 +1721,14 @@ _pq_copy_both_v3(cursorObject *curs) else if (msg == Py_None) { Py_DECREF(msg); + fd = PQsocket(pgconn); + if (fd < 0) { + pq_raise(curs->conn, curs, NULL); + goto exit; + } + FD_ZERO(&fds); - FD_SET(PQsocket(pgconn), &fds); + FD_SET(fd, &fds); gettimeofday(&curr_time, NULL); @@ -1736,7 +1739,7 @@ _pq_copy_both_v3(cursorObject *curs) timersub(&ping_time, &curr_time, &time_diff); if (time_diff.tv_sec > 0) { Py_BEGIN_ALLOW_THREADS; - sel = select(PQsocket(pgconn) + 1, &fds, NULL, NULL, &time_diff); + sel = select(fd + 1, &fds, NULL, NULL, &time_diff); Py_END_ALLOW_THREADS; } else { From 06f18237f7932aab066cae2c09b6e335af5225f2 Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Fri, 3 Jul 2015 11:40:00 +0200 Subject: [PATCH 018/151] Fix missing free in replmsg_dealloc --- psycopg/replication_message_type.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/psycopg/replication_message_type.c b/psycopg/replication_message_type.c index 27a9c916..e52b32ee 100644 --- a/psycopg/replication_message_type.c +++ b/psycopg/replication_message_type.c @@ -80,7 +80,11 @@ replmsg_clear(replicationMessageObject *self) static void replmsg_dealloc(PyObject* obj) { + PyObject_GC_UnTrack(obj); + replmsg_clear((replicationMessageObject*) obj); + + Py_TYPE(obj)->tp_free(obj); } #define psyco_replmsg_send_time_doc \ From eac16d048ac597e3602e7ebddb3ea191e0537cff Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Fri, 3 Jul 2015 15:44:45 +0200 Subject: [PATCH 019/151] Fix missing GC flag in ReplicationMessage type --- psycopg/replication_message_type.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/psycopg/replication_message_type.c b/psycopg/replication_message_type.c index e52b32ee..edfe6c16 100644 --- a/psycopg/replication_message_type.c +++ b/psycopg/replication_message_type.c @@ -69,6 +69,14 @@ replmsg_init(PyObject *obj, PyObject *args, PyObject *kwargs) return 0; } +static int +replmsg_traverse(replicationMessageObject *self, visitproc visit, void *arg) +{ + Py_VISIT((PyObject* )self->cursor); + Py_VISIT(self->payload); + return 0; +} + static int replmsg_clear(replicationMessageObject *self) { @@ -154,10 +162,10 @@ PyTypeObject replicationMessageType = { 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, - /*tp_flags*/ + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | + Py_TPFLAGS_HAVE_GC, /*tp_flags*/ replicationMessageType_doc, /*tp_doc*/ - 0, /*tp_traverse*/ + (traverseproc)replmsg_traverse, /*tp_traverse*/ (inquiry)replmsg_clear, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ From 26fe1f230fb073033d3279eb054bccfe4aecee99 Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Tue, 7 Jul 2015 19:04:32 +0200 Subject: [PATCH 020/151] Fix use of PQconsumeInput() in pq_read_replication_message() The libpq's PQconsumeInput() returns 0 in case of an error only, but we need to know if it was able to actually read something. Work around this by setting an internal flag before retry. --- psycopg/pqpath.c | 36 +++++++++++++++++++++++++----------- 1 file changed, 25 insertions(+), 11 deletions(-) diff --git a/psycopg/pqpath.c b/psycopg/pqpath.c index e550d796..edfdcd3a 100644 --- a/psycopg/pqpath.c +++ b/psycopg/pqpath.c @@ -1534,7 +1534,7 @@ PyObject * pq_read_replication_message(cursorObject *curs, int decode) { char *buffer = NULL; - int len, hdr, reply; + int len, consumed = 0, hdr, reply; XLogRecPtr data_start, wal_end; pg_int64 send_time; PyObject *str = NULL, *msg = NULL; @@ -1542,20 +1542,29 @@ pq_read_replication_message(cursorObject *curs, int decode) Dprintf("pq_read_replication_message(decode=%d)", decode); retry: - Py_BEGIN_ALLOW_THREADS; len = PQgetCopyData(curs->conn->pgconn, &buffer, 1 /* async */); - Py_END_ALLOW_THREADS; if (len == 0) { - /* We should only try reading more data into the internal buffer when - * there is nothing available at the moment. Otherwise, with a really - * highly loaded server we might be reading a number of messages for - * every single one we process, thus overgrowing the internal buffer - * until the system runs out of memory. */ - if (PQconsumeInput(curs->conn->pgconn)) { - goto retry; + /* If we've tried reading some data, but there was none, bail out. */ + if (consumed) { + goto none; } - goto none; + /* We should only try reading more data when there is nothing + available at the moment. Otherwise, with a really highly loaded + server we might be reading a number of messages for every single + one we process, thus overgrowing the internal buffer until the + client system runs out of memory. */ + if (!PQconsumeInput(curs->conn->pgconn)) { + pq_raise(curs->conn, curs, NULL); + goto exit; + } + /* But PQconsumeInput() doesn't tell us if it has actually read + anything into the internal buffer and there is no (supported) way + to ask libpq about this directly. The way we check is setting the + flag and re-trying PQgetCopyData(): if that returns 0 again, + there's no more data available in the buffer, so we return None. */ + consumed = 1; + goto retry; } if (len == -2) { @@ -1574,6 +1583,11 @@ retry: goto none; } + /* It also makes sense to set this flag here to make us return early in + case of retry due to keepalive message. Any pending data on the socket + will trigger read condition in select() in the calling code anyway. */ + consumed = 1; + /* ok, we did really read something: update the io timestamp */ gettimeofday(&curs->repl_last_io, NULL); From f872a2aabbf69bc7f16a4c25f226d634f9d019c9 Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Wed, 30 Sep 2015 14:34:45 +0200 Subject: [PATCH 021/151] Remove typedef for uint32, include internal/c.h --- psycopg/libpq_support.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/psycopg/libpq_support.h b/psycopg/libpq_support.h index ab35fef5..c7139463 100644 --- a/psycopg/libpq_support.h +++ b/psycopg/libpq_support.h @@ -26,9 +26,9 @@ #define PSYCOPG_LIBPQ_SUPPORT_H 1 #include "psycopg/config.h" +#include "internal/c.h" -/* type and constant definitions from internal postgres includes */ -typedef unsigned int uint32; +/* type and constant definitions from internal postgres includes not available otherwise */ typedef unsigned PG_INT64_TYPE XLogRecPtr; #define InvalidXLogRecPtr ((XLogRecPtr) 0) From 937a7a90246916bff0e956947b1bab6058c72d08 Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Thu, 1 Oct 2015 11:08:56 +0200 Subject: [PATCH 022/151] Cleanup start replication wrt. slot type a bit. --- doc/src/extras.rst | 11 +++++----- lib/extras.py | 53 +++++++++++++++++++++++----------------------- psycopg/cursor.h | 4 ---- 3 files changed, 33 insertions(+), 35 deletions(-) diff --git a/doc/src/extras.rst b/doc/src/extras.rst index 19c81523..1da983a4 100644 --- a/doc/src/extras.rst +++ b/doc/src/extras.rst @@ -197,7 +197,7 @@ Replication cursor cur.drop_replication_slot("testslot") - .. method:: start_replication(slot_type, slot_name=None, writer=None, start_lsn=None, timeline=0, keepalive_interval=10, options=None) + .. method:: start_replication(slot_type, slot_name=None, writer=None, start_lsn=0, timeline=0, keepalive_interval=10, options=None) Start a replication stream. On non-asynchronous connection, also consume the stream messages. @@ -207,15 +207,16 @@ Replication cursor :param slot_name: name of the replication slot to use (required for logical replication) :param writer: a file-like object to write replication messages to - :param start_lsn: the LSN position to start from, in the form - ``XXX/XXX`` (forward-slash separated pair of - hexadecimals) + :param start_lsn: the optional LSN position to start replicating from, + can be an integer or a string of hexadecimal digits + in the form ``XXX/XXX`` :param timeline: WAL history timeline to start streaming from (optional, can only be used with physical replication) :param keepalive_interval: interval (in seconds) to send keepalive messages to the server :param options: a dictionary of options to pass to logical replication - slot + slot (not allowed with physical replication, use + *None*) When used on non-asynchronous connection this method enters an endless loop, reading messages from the server and passing them to ``write()`` diff --git a/lib/extras.py b/lib/extras.py index 85debc68..36138c63 100644 --- a/lib/extras.py +++ b/lib/extras.py @@ -480,8 +480,8 @@ class ReplicationConnection(_connection): """Streamging replication types.""" -REPLICATION_PHYSICAL = 0 -REPLICATION_LOGICAL = 1 +REPLICATION_LOGICAL = "LOGICAL" +REPLICATION_PHYSICAL = "PHYSICAL" class ReplicationCursor(_cursor): """A cursor used for replication commands.""" @@ -504,18 +504,18 @@ class ReplicationCursor(_cursor): if slot_type == REPLICATION_LOGICAL: if output_plugin is None: - raise psycopg2.ProgrammingError("output_plugin is required for logical replication slot") + raise psycopg2.ProgrammingError("output plugin name is required for logical replication slot") - command += "LOGICAL %s" % self.quote_ident(output_plugin) + command += "%s %s" % (slot_type, self.quote_ident(output_plugin)) elif slot_type == REPLICATION_PHYSICAL: if output_plugin is not None: - raise psycopg2.ProgrammingError("output_plugin is not applicable to physical replication") + raise psycopg2.ProgrammingError("cannot specify output plugin name for physical replication slot") - command += "PHYSICAL" + command += slot_type else: - raise psycopg2.ProgrammingError("unrecognized replication slot type") + raise psycopg2.ProgrammingError("unrecognized replication slot type: %s" % slot_type) self.execute(command) @@ -525,44 +525,45 @@ class ReplicationCursor(_cursor): command = "DROP_REPLICATION_SLOT %s" % self.quote_ident(slot_name) self.execute(command) - def start_replication(self, slot_type, slot_name=None, writer=None, start_lsn=None, + def start_replication(self, slot_type, slot_name=None, writer=None, start_lsn=0, timeline=0, keepalive_interval=10, options=None): """Start and consume replication stream.""" command = "START_REPLICATION " - if slot_type == REPLICATION_LOGICAL and slot_name is None: - raise psycopg2.ProgrammingError("slot_name is required for logical replication slot") - - if slot_name: - command += "SLOT %s " % self.quote_ident(slot_name) - if slot_type == REPLICATION_LOGICAL: - command += "LOGICAL " + if slot_name: + command += "SLOT %s " % self.quote_ident(slot_name) + else: + raise psycopg2.ProgrammingError("slot name is required for logical replication") + + command += "%s " % slot_type + elif slot_type == REPLICATION_PHYSICAL: - command += "PHYSICAL " + if slot_name: + command += "SLOT %s " % self.quote_ident(slot_name) + + # don't add "PHYSICAL", before 9.4 it was just START_REPLICATION XXX/XXX else: - raise psycopg2.ProgrammingError("unrecognized replication slot type") + raise psycopg2.ProgrammingError("unrecognized replication slot type: %s" % slot_type) - if start_lsn is None: - start_lsn = '0/0' + if type(start_lsn) is str: + lsn = start_lsn.split('/') + lsn = "%X/%08X" % (int(lsn[0], 16), int(lsn[1], 16)) + else: + lsn = "%X/%08X" % ((start_lsn >> 32) & 0xFFFFFFFF, start_lsn & 0xFFFFFFFF) - # reparse lsn to catch possible garbage - lsn = start_lsn.split('/') - command += "%X/%X" % (int(lsn[0], 16), int(lsn[1], 16)) + command += lsn if timeline != 0: if slot_type == REPLICATION_LOGICAL: raise psycopg2.ProgrammingError("cannot specify timeline for logical replication") - if timeline < 0: - raise psycopg2.ProgrammingError("timeline must be >= 0: %d" % timeline) - command += " TIMELINE %d" % timeline if options: if slot_type == REPLICATION_PHYSICAL: - raise psycopg2.ProgrammingError("cannot specify plugin options for physical replication") + raise psycopg2.ProgrammingError("cannot specify output plugin options for physical replication") command += " (" for k,v in options.iteritems(): diff --git a/psycopg/cursor.h b/psycopg/cursor.h index 380abbf4..dd07243f 100644 --- a/psycopg/cursor.h +++ b/psycopg/cursor.h @@ -97,10 +97,6 @@ struct cursorObject { }; -/* streaming replication modes */ -#define CURSOR_REPLICATION_PHYSICAL 0 -#define CURSOR_REPLICATION_LOGICAL 1 - /* C-callable functions in cursor_int.c and cursor_type.c */ BORROWED HIDDEN PyObject *curs_get_cast(cursorObject *self, PyObject *oid); From 95ee218c6d1e3ee5d7339c1980f7c4c410c8d827 Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Thu, 1 Oct 2015 15:34:51 +0200 Subject: [PATCH 023/151] Update replication connection/cursor interface and docs. --- doc/src/extras.rst | 101 ++++++++++++++++++++++++++++--------- lib/extras.py | 121 +++++++++++++++++++++++++++------------------ 2 files changed, 151 insertions(+), 71 deletions(-) diff --git a/doc/src/extras.rst b/doc/src/extras.rst index 1da983a4..de94e6d0 100644 --- a/doc/src/extras.rst +++ b/doc/src/extras.rst @@ -144,20 +144,36 @@ Logging cursor Replication cursor ^^^^^^^^^^^^^^^^^^ -.. autoclass:: ReplicationConnection +.. autoclass:: LogicalReplicationConnection This connection factory class can be used to open a special type of - connection that is used for streaming replication. + connection that is used for logical replication. Example:: - from psycopg2.extras import ReplicationConnection, REPLICATION_PHYSICAL, REPLICATION_LOGICAL - conn = psycopg2.connect(dsn, connection_factory=ReplicationConnection) - cur = conn.cursor() + from psycopg2.extras import LogicalReplicationConnection + log_conn = psycopg2.connect(dsn, connection_factory=LogicalReplicationConnection) + log_cur = log_conn.cursor() + + +.. autoclass:: PhysicalReplicationConnection + + This connection factory class can be used to open a special type of + connection that is used for physical replication. + + Example:: + + from psycopg2.extras import PhysicalReplicationConnection + phys_conn = psycopg2.connect(dsn, connection_factory=PhysicalReplicationConnection) + phys_cur = phys_conn.cursor() + + + Both `LogicalReplicationConnection` and `PhysicalReplicationConnection` use + `ReplicationCursor` for actual communication on the connection. .. seealso:: - - PostgreSQL `Replication protocol`__ + - PostgreSQL `Streaming Replication Protocol`__ .. __: http://www.postgresql.org/docs/current/static/protocol-replication.html @@ -173,19 +189,38 @@ Replication cursor >>> cur.identify_system() {'timeline': 1, 'systemid': '1234567890123456789', 'dbname': 'test', 'xlogpos': '0/1ABCDEF'} - .. method:: create_replication_slot(slot_type, slot_name, output_plugin=None) + .. method:: create_replication_slot(slot_name, output_plugin=None) Create streaming replication slot. - :param slot_type: type of replication: either `REPLICATION_PHYSICAL` or - `REPLICATION_LOGICAL` :param slot_name: name of the replication slot to be created - :param output_plugin: name of the logical decoding output plugin to use - (logical replication only) + :param slot_type: type of replication: should be either + `REPLICATION_LOGICAL` or `REPLICATION_PHYSICAL` + :param output_plugin: name of the logical decoding output plugin to be + used by the slot; required for logical + replication connections, disallowed for physical Example:: - cur.create_replication_slot(REPLICATION_LOGICAL, "testslot", "test_decoding") + log_cur.create_replication_slot("logical1", "test_decoding") + phys_cur.create_replication_slot("physical1") + + # either logical or physical replication connection + cur.create_replication_slot("slot1", slot_type=REPLICATION_LOGICAL) + + When creating a slot on a logical replication connection, a logical + replication slot is created by default. Logical replication requires + name of the logical decoding output plugin to be specified. + + When creating a slot on a physical replication connection, a physical + replication slot is created by default. No output plugin parameter is + required or allowed when creating a physical replication slot. + + In either case, the type of slot being created can be specified + explicitly using *slot_type* parameter. + + Replication slots are a feature of PostgreSQL server starting with + version 9.4. .. method:: drop_replication_slot(slot_name) @@ -195,18 +230,24 @@ Replication cursor Example:: - cur.drop_replication_slot("testslot") + # either logical or physical replication connection + cur.drop_replication_slot("slot1") - .. method:: start_replication(slot_type, slot_name=None, writer=None, start_lsn=0, timeline=0, keepalive_interval=10, options=None) + This + + Replication slots are a feature of PostgreSQL server starting with + version 9.4. - Start a replication stream. On non-asynchronous connection, also - consume the stream messages. + .. method:: start_replication(slot_name=None, writer=None, slot_type=None, start_lsn=0, timeline=0, keepalive_interval=10, options=None) - :param slot_type: type of replication: either `REPLICATION_PHYSICAL` or - `REPLICATION_LOGICAL` - :param slot_name: name of the replication slot to use (required for - logical replication) + Start replication on the connection. + + :param slot_name: name of the replication slot to use; required for + logical replication, physical replication can work + with or without a slot :param writer: a file-like object to write replication messages to + :param slot_type: type of replication: should be either + `REPLICATION_LOGICAL` or `REPLICATION_PHYSICAL` :param start_lsn: the optional LSN position to start replicating from, can be an integer or a string of hexadecimal digits in the form ``XXX/XXX`` @@ -215,9 +256,23 @@ Replication cursor :param keepalive_interval: interval (in seconds) to send keepalive messages to the server :param options: a dictionary of options to pass to logical replication - slot (not allowed with physical replication, use + slot (not allowed with physical replication, set to *None*) + If not specified using *slot_type* parameter, the type of replication + to be started is defined by the type of replication connection. + Logical replication is only allowed on logical replication connection, + but physical replication can be used with both types of connection. + + On the other hand, physical replication doesn't require a named + replication slot to be used, only logical one does. In any case, + logical replication and replication slots are a feature of PostgreSQL + server starting with version 9.4. Physical replication can be used + starting with 9.0. + + If a *slot_name* is specified, the slot must exist on the server and + its type must match the replication type used. + When used on non-asynchronous connection this method enters an endless loop, reading messages from the server and passing them to ``write()`` method of the *writer* object. This is similar to operation of the @@ -391,10 +446,8 @@ Replication cursor A reference to the corresponding `~ReplicationCursor` object. - -.. data:: REPLICATION_PHYSICAL - .. data:: REPLICATION_LOGICAL +.. data:: REPLICATION_PHYSICAL .. index:: pair: Cursor; Replication diff --git a/lib/extras.py b/lib/extras.py index 36138c63..4587afea 100644 --- a/lib/extras.py +++ b/lib/extras.py @@ -438,53 +438,78 @@ class MinTimeLoggingCursor(LoggingCursor): return LoggingCursor.callproc(self, procname, vars) -class ReplicationConnection(_connection): - """A connection that uses `ReplicationCursor` automatically.""" +"""Replication connection types.""" +REPLICATION_LOGICAL = "LOGICAL" +REPLICATION_PHYSICAL = "PHYSICAL" + + +class ReplicationConnectionBase(_connection): + """ + Base class for Logical and Physical replication connection + classes. Uses `ReplicationCursor` automatically. + """ def __init__(self, *args, **kwargs): - """Initializes a replication connection, by adding appropriate replication parameter to the provided dsn arguments.""" + """ + Initializes a replication connection by adding appropriate + parameters to the provided DSN and tweaking the connection + attributes. + """ - if len(args): - dsn = args[0] + # replication_type is set in subclasses + if self.replication_type == REPLICATION_LOGICAL: + replication = 'database' - # FIXME: could really use parse_dsn here + elif self.replication_type == REPLICATION_PHYSICAL: + replication = 'true' - if dsn.startswith('postgres://') or dsn.startswith('postgresql://'): - # poor man's url parsing - if dsn.rfind('?') > 0: - if not dsn.endswith('?'): - dsn += '&' - else: - dsn += '?' - else: - dsn += ' ' - dsn += 'replication=database' - args = [dsn] + list(args[1:]) else: - dbname = kwargs.get('dbname', None) - if dbname is None: - kwargs['dbname'] = 'replication' + raise psycopg2.ProgrammingError("unrecognized replication type: %s" % self.replication_type) - if kwargs.get('replication', None) is None: - kwargs['replication'] = 'database' if dbname else 'true' + # FIXME: could really use parse_dsn here + dsn = args[0] + if dsn.startswith('postgres://') or dsn.startswith('postgresql://'): + # poor man's url parsing + if dsn.rfind('?') > 0: + if not dsn.endswith('?'): + dsn += '&' + else: + dsn += '?' + else: + dsn += ' ' + dsn += 'replication=%s' % replication + args = [dsn] + list(args[1:]) - super(ReplicationConnection, self).__init__(*args, **kwargs) + super(ReplicationConnectionBase, self).__init__(*args, **kwargs) # prevent auto-issued BEGIN statements if not self.async: self.autocommit = True - def cursor(self, *args, **kwargs): - kwargs.setdefault('cursor_factory', ReplicationCursor) - return super(ReplicationConnection, self).cursor(*args, **kwargs) + if self.cursor_factory is None: + self.cursor_factory = ReplicationCursor + + def quote_ident(self, ident): + # FIXME: use PQescapeIdentifier or psycopg_escape_identifier_easy, somehow + return '"%s"' % ident.replace('"', '""') -"""Streamging replication types.""" -REPLICATION_LOGICAL = "LOGICAL" -REPLICATION_PHYSICAL = "PHYSICAL" +class LogicalReplicationConnection(ReplicationConnectionBase): + + def __init__(self, *args, **kwargs): + self.replication_type = REPLICATION_LOGICAL + super(LogicalReplicationConnection, self).__init__(*args, **kwargs) + + +class PhysicalReplicationConnection(ReplicationConnectionBase): + + def __init__(self, *args, **kwargs): + self.replication_type = REPLICATION_PHYSICAL + super(PhysicalReplicationConnection, self).__init__(*args, **kwargs) + class ReplicationCursor(_cursor): - """A cursor used for replication commands.""" + """A cursor used for communication on the replication protocol.""" def identify_system(self): """Get information about the cluster status.""" @@ -493,47 +518,49 @@ class ReplicationCursor(_cursor): return dict(zip([_.name for _ in self.description], self.fetchall()[0])) - def quote_ident(self, ident): - # FIXME: use PQescapeIdentifier or psycopg_escape_identifier_easy, somehow - return '"%s"' % ident.replace('"', '""') - - def create_replication_slot(self, slot_type, slot_name, output_plugin=None): + def create_replication_slot(self, slot_name, slot_type=None, output_plugin=None): """Create streaming replication slot.""" - command = "CREATE_REPLICATION_SLOT %s " % self.quote_ident(slot_name) + command = "CREATE_REPLICATION_SLOT %s " % self.connection.quote_ident(slot_name) + + if slot_type is None: + slot_type = self.connection.replication_type if slot_type == REPLICATION_LOGICAL: if output_plugin is None: - raise psycopg2.ProgrammingError("output plugin name is required for logical replication slot") + raise psycopg2.ProgrammingError("output plugin name is required to create logical replication slot") - command += "%s %s" % (slot_type, self.quote_ident(output_plugin)) + command += "%s %s" % (slot_type, self.connection.quote_ident(output_plugin)) elif slot_type == REPLICATION_PHYSICAL: if output_plugin is not None: - raise psycopg2.ProgrammingError("cannot specify output plugin name for physical replication slot") + raise psycopg2.ProgrammingError("cannot specify output plugin name when creating physical replication slot") command += slot_type else: - raise psycopg2.ProgrammingError("unrecognized replication slot type: %s" % slot_type) + raise psycopg2.ProgrammingError("unrecognized replication type: %s" % slot_type) self.execute(command) def drop_replication_slot(self, slot_name): """Drop streaming replication slot.""" - command = "DROP_REPLICATION_SLOT %s" % self.quote_ident(slot_name) + command = "DROP_REPLICATION_SLOT %s" % self.connection.quote_ident(slot_name) self.execute(command) - def start_replication(self, slot_type, slot_name=None, writer=None, start_lsn=0, + def start_replication(self, slot_name=None, writer=None, slot_type=None, start_lsn=0, timeline=0, keepalive_interval=10, options=None): """Start and consume replication stream.""" command = "START_REPLICATION " + if slot_type is None: + slot_type = self.connection.replication_type + if slot_type == REPLICATION_LOGICAL: if slot_name: - command += "SLOT %s " % self.quote_ident(slot_name) + command += "SLOT %s " % self.connection.quote_ident(slot_name) else: raise psycopg2.ProgrammingError("slot name is required for logical replication") @@ -541,11 +568,11 @@ class ReplicationCursor(_cursor): elif slot_type == REPLICATION_PHYSICAL: if slot_name: - command += "SLOT %s " % self.quote_ident(slot_name) - + command += "SLOT %s " % self.connection.quote_ident(slot_name) # don't add "PHYSICAL", before 9.4 it was just START_REPLICATION XXX/XXX + else: - raise psycopg2.ProgrammingError("unrecognized replication slot type: %s" % slot_type) + raise psycopg2.ProgrammingError("unrecognized replication type: %s" % slot_type) if type(start_lsn) is str: lsn = start_lsn.split('/') @@ -569,7 +596,7 @@ class ReplicationCursor(_cursor): for k,v in options.iteritems(): if not command.endswith('('): command += ", " - command += "%s %s" % (self.quote_ident(k), _A(str(v))) + command += "%s %s" % (self.connection.quote_ident(k), _A(str(v))) command += ")" return self.start_replication_expert(command, writer=writer, From cac83da5dbb77e142040be66b7d0e85e3e10f9c3 Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Thu, 1 Oct 2015 16:04:19 +0200 Subject: [PATCH 024/151] Use parse_dsn in ReplicationConnectionBase --- lib/extras.py | 25 ++++++++++--------------- 1 file changed, 10 insertions(+), 15 deletions(-) diff --git a/lib/extras.py b/lib/extras.py index 4587afea..998c792f 100644 --- a/lib/extras.py +++ b/lib/extras.py @@ -449,7 +449,7 @@ class ReplicationConnectionBase(_connection): classes. Uses `ReplicationCursor` automatically. """ - def __init__(self, *args, **kwargs): + def __init__(self, dsn, **kwargs): """ Initializes a replication connection by adding appropriate parameters to the provided DSN and tweaking the connection @@ -466,21 +466,16 @@ class ReplicationConnectionBase(_connection): else: raise psycopg2.ProgrammingError("unrecognized replication type: %s" % self.replication_type) - # FIXME: could really use parse_dsn here - dsn = args[0] - if dsn.startswith('postgres://') or dsn.startswith('postgresql://'): - # poor man's url parsing - if dsn.rfind('?') > 0: - if not dsn.endswith('?'): - dsn += '&' - else: - dsn += '?' - else: - dsn += ' ' - dsn += 'replication=%s' % replication - args = [dsn] + list(args[1:]) + items = _ext.parse_dsn(dsn) - super(ReplicationConnectionBase, self).__init__(*args, **kwargs) + # we add an appropriate replication keyword parameter, unless + # user has specified one explicitly in the DSN + items.setdefault('replication', replication) + + dsn = " ".join(["%s=%s" % (k, psycopg2._param_escape(str(v))) + for (k, v) in items.iteritems()]) + + super(ReplicationConnectionBase, self).__init__(dsn, **kwargs) # prevent auto-issued BEGIN statements if not self.async: From 0233620c26c5df32b1ad8d5b0363a5fd75be3e91 Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Thu, 1 Oct 2015 19:28:00 +0200 Subject: [PATCH 025/151] Rework replication connection/cursor classes --- doc/src/extras.rst | 421 +++++++++++++++++------------ lib/extras.py | 9 +- psycopg/cursor.h | 4 +- psycopg/cursor_type.c | 71 +++-- psycopg/pqpath.c | 93 ++++--- psycopg/pqpath.h | 2 + psycopg/replication_message.h | 1 + psycopg/replication_message_type.c | 9 +- 8 files changed, 368 insertions(+), 242 deletions(-) diff --git a/doc/src/extras.rst b/doc/src/extras.rst index de94e6d0..82a2be18 100644 --- a/doc/src/extras.rst +++ b/doc/src/extras.rst @@ -144,32 +144,40 @@ Logging cursor Replication cursor ^^^^^^^^^^^^^^^^^^ +.. autoclass:: ReplicationConnectionBase + + +The following replication types are defined: + +.. data:: REPLICATION_LOGICAL +.. data:: REPLICATION_PHYSICAL + + .. autoclass:: LogicalReplicationConnection - This connection factory class can be used to open a special type of - connection that is used for logical replication. + This connection factory class can be used to open a special type of + connection that is used for logical replication. - Example:: + Example:: - from psycopg2.extras import LogicalReplicationConnection - log_conn = psycopg2.connect(dsn, connection_factory=LogicalReplicationConnection) - log_cur = log_conn.cursor() + from psycopg2.extras import LogicalReplicationConnection + log_conn = psycopg2.connect(dsn, connection_factory=LogicalReplicationConnection) + log_cur = log_conn.cursor() .. autoclass:: PhysicalReplicationConnection - This connection factory class can be used to open a special type of - connection that is used for physical replication. + This connection factory class can be used to open a special type of + connection that is used for physical replication. - Example:: + Example:: - from psycopg2.extras import PhysicalReplicationConnection - phys_conn = psycopg2.connect(dsn, connection_factory=PhysicalReplicationConnection) - phys_cur = phys_conn.cursor() + from psycopg2.extras import PhysicalReplicationConnection + phys_conn = psycopg2.connect(dsn, connection_factory=PhysicalReplicationConnection) + phys_cur = phys_conn.cursor() - - Both `LogicalReplicationConnection` and `PhysicalReplicationConnection` use - `ReplicationCursor` for actual communication on the connection. + Both `LogicalReplicationConnection` and `PhysicalReplicationConnection` use + `ReplicationCursor` for actual communication on the connection. .. seealso:: @@ -177,160 +185,237 @@ Replication cursor .. __: http://www.postgresql.org/docs/current/static/protocol-replication.html + +The individual messages in the replication stream are presented by +`ReplicationMessage` objects: + +.. autoclass:: ReplicationMessage + + .. attribute:: payload + + The actual data received from the server. An instance of either + ``str`` or ``unicode``, depending on the method that was used to + produce this message. + + .. attribute:: data_size + + The raw size of the message payload (before possible unicode + conversion). + + .. attribute:: data_start + + LSN position of the start of the message. + + .. attribute:: wal_end + + LSN position of the current end of WAL on the server. + + .. attribute:: send_time + + A `~datetime` object representing the server timestamp at the moment + when the message was sent. + + .. attribute:: cursor + + A reference to the corresponding `ReplicationCursor` object. + + .. autoclass:: ReplicationCursor .. method:: identify_system() - Execute ``IDENTIFY_SYSTEM`` command of the streaming replication - protocol and return the result as a dictionary. + Execute ``IDENTIFY_SYSTEM`` command of the streaming replication + protocol and return the result as a dictionary. - Example:: + Example:: - >>> cur.identify_system() - {'timeline': 1, 'systemid': '1234567890123456789', 'dbname': 'test', 'xlogpos': '0/1ABCDEF'} + >>> cur.identify_system() + {'timeline': 1, 'systemid': '1234567890123456789', 'dbname': 'test', 'xlogpos': '0/1ABCDEF'} .. method:: create_replication_slot(slot_name, output_plugin=None) - Create streaming replication slot. + Create streaming replication slot. - :param slot_name: name of the replication slot to be created - :param slot_type: type of replication: should be either - `REPLICATION_LOGICAL` or `REPLICATION_PHYSICAL` - :param output_plugin: name of the logical decoding output plugin to be - used by the slot; required for logical - replication connections, disallowed for physical + :param slot_name: name of the replication slot to be created + :param slot_type: type of replication: should be either + `REPLICATION_LOGICAL` or `REPLICATION_PHYSICAL` + :param output_plugin: name of the logical decoding output plugin to be + used by the slot; required for logical + replication connections, disallowed for physical - Example:: + Example:: - log_cur.create_replication_slot("logical1", "test_decoding") - phys_cur.create_replication_slot("physical1") + log_cur.create_replication_slot("logical1", "test_decoding") + phys_cur.create_replication_slot("physical1") - # either logical or physical replication connection - cur.create_replication_slot("slot1", slot_type=REPLICATION_LOGICAL) + # either logical or physical replication connection + cur.create_replication_slot("slot1", slot_type=REPLICATION_LOGICAL) - When creating a slot on a logical replication connection, a logical - replication slot is created by default. Logical replication requires - name of the logical decoding output plugin to be specified. + When creating a slot on a logical replication connection, a logical + replication slot is created by default. Logical replication requires + name of the logical decoding output plugin to be specified. - When creating a slot on a physical replication connection, a physical - replication slot is created by default. No output plugin parameter is - required or allowed when creating a physical replication slot. + When creating a slot on a physical replication connection, a physical + replication slot is created by default. No output plugin parameter is + required or allowed when creating a physical replication slot. - In either case, the type of slot being created can be specified - explicitly using *slot_type* parameter. + In either case, the type of slot being created can be specified + explicitly using *slot_type* parameter. - Replication slots are a feature of PostgreSQL server starting with - version 9.4. + Replication slots are a feature of PostgreSQL server starting with + version 9.4. .. method:: drop_replication_slot(slot_name) - Drop streaming replication slot. + Drop streaming replication slot. - :param slot_name: name of the replication slot to drop + :param slot_name: name of the replication slot to drop - Example:: + Example:: - # either logical or physical replication connection - cur.drop_replication_slot("slot1") + # either logical or physical replication connection + cur.drop_replication_slot("slot1") - This - - Replication slots are a feature of PostgreSQL server starting with - version 9.4. + Replication slots are a feature of PostgreSQL server starting with + version 9.4. - .. method:: start_replication(slot_name=None, writer=None, slot_type=None, start_lsn=0, timeline=0, keepalive_interval=10, options=None) + .. method:: start_replication(slot_name=None, slot_type=None, start_lsn=0, timeline=0, options=None) - Start replication on the connection. + Start replication on the connection. - :param slot_name: name of the replication slot to use; required for - logical replication, physical replication can work - with or without a slot - :param writer: a file-like object to write replication messages to - :param slot_type: type of replication: should be either - `REPLICATION_LOGICAL` or `REPLICATION_PHYSICAL` - :param start_lsn: the optional LSN position to start replicating from, - can be an integer or a string of hexadecimal digits - in the form ``XXX/XXX`` - :param timeline: WAL history timeline to start streaming from (optional, - can only be used with physical replication) - :param keepalive_interval: interval (in seconds) to send keepalive - messages to the server - :param options: a dictionary of options to pass to logical replication - slot (not allowed with physical replication, set to - *None*) + :param slot_name: name of the replication slot to use; required for + logical replication, physical replication can work + with or without a slot + :param slot_type: type of replication: should be either + `REPLICATION_LOGICAL` or `REPLICATION_PHYSICAL` + :param start_lsn: the optional LSN position to start replicating from, + can be an integer or a string of hexadecimal digits + in the form ``XXX/XXX`` + :param timeline: WAL history timeline to start streaming from (optional, + can only be used with physical replication) + :param options: a dictionary of options to pass to logical replication + slot (not allowed with physical replication) - If not specified using *slot_type* parameter, the type of replication - to be started is defined by the type of replication connection. - Logical replication is only allowed on logical replication connection, - but physical replication can be used with both types of connection. + If a *slot_name* is specified, the slot must exist on the server and + its type must match the replication type used. - On the other hand, physical replication doesn't require a named - replication slot to be used, only logical one does. In any case, - logical replication and replication slots are a feature of PostgreSQL - server starting with version 9.4. Physical replication can be used - starting with 9.0. + If not specified using *slot_type* parameter, the type of replication + is defined by the type of replication connection. Logical replication + is only allowed on logical replication connection, but physical + replication can be used with both types of connection. - If a *slot_name* is specified, the slot must exist on the server and - its type must match the replication type used. + On the other hand, physical replication doesn't require a named + replication slot to be used, only logical one does. In any case, + logical replication and replication slots are a feature of PostgreSQL + server starting with version 9.4. Physical replication can be used + starting with 9.0. - When used on non-asynchronous connection this method enters an endless - loop, reading messages from the server and passing them to ``write()`` - method of the *writer* object. This is similar to operation of the - `~cursor.copy_to()` method. It also sends keepalive messages to the - server, in case there were no new data from it for the duration of - *keepalive_interval* seconds (this parameter's value must be equal to - at least than 1 second, but it can have a fractional part). + If *start_lsn* is specified, the requested stream will start from that + LSN. The default is `!None`, which passes the LSN ``0/0``, causing + replay to begin at the last point at which the server got replay + confirmation from the client for, or the oldest available point for a + new slot. - With asynchronous connection, this method returns immediately and the - calling code can start reading the replication messages in a loop. + The server might produce an error if a WAL file for the given LSN has + already been recycled, or it may silently start streaming from a later + position: the client can verify the actual position using information + provided the `ReplicationMessage` attributes. The exact server + behavior depends on the type of replication and use of slots. - A sketch implementation of the *writer* object for logical replication - might look similar to the following:: + A *timeline* parameter can only be specified with physical replication + and only starting with server version 9.3. - from io import TextIOBase + A dictionary of *options* may be passed to the logical decoding plugin + on a logical replication slot. The set of supported options depends + on the output plugin that was used to create the slot. Must be + `!None` for physical replication. - class LogicalStreamWriter(TextIOBase): + This function constructs a ``START_REPLICATION`` command and calls + `start_replication_expert()` internally. - def write(self, msg): - self.store_message_data(msg.payload) + After starting the replication, to actually consume the incoming + server messages, use `consume_replication_stream()` or implement a + loop around `read_replication_message()` in case of asynchronous + connection. - if self.should_report_to_the_server_now(msg): - msg.cursor.send_replication_feedback(flush_lsn=msg.wal_end) + .. method:: start_replication_expert(command) - First, like with the `~cursor.copy_to()` method, the code that calls - the provided ``write()`` method checks if the *writer* object is - inherited from `~io.TextIOBase`. If that is the case, the message - payload to be passed is converted to unicode using the connection's - `~connection.encoding` information. Otherwise, the message is passed - as is. + Start replication on the connection using provided ``START_REPLICATION`` + command. - The *msg* object being passed is an instance of `~ReplicationMessage` - class. + .. method:: consume_replication_stream(consumer, decode=False, keepalive_interval=10) - After storing certain amount of messages' data reliably, the client - should send a confirmation message to the server. This should be done - by calling `~send_replication_feedback()` method on the corresponding - replication cursor. A reference to the cursor is provided in the - `~ReplicationMessage` as an attribute. + :param consumer: an object providing ``consume()`` method + :param decode: a flag indicating that unicode conversion should be + performed on the messages received from the server + :param keepalive_interval: interval (in seconds) to send keepalive + messages to the server - .. warning:: + This method can only be used with synchronous connection. For + asynchronous connections see `read_replication_message()`. - Failure to properly notify the server by constantly consuming and - reporting success at appropriate times can eventually lead to "disk - full" condition on the server, because the server retains all the - WAL segments that might be needed to stream the changes via all of - the currently open replication slots. + Before calling this method to consume the stream, use + `start_replication()` first. - On the other hand, it is not recommended to send a confirmation - after every processed message, since that will put an unnecessary - load on network and the server. A possible strategy is to confirm - after every COMMIT message. + When called, this method enters an endless loop, reading messages from + the server and passing them to ``consume()`` method of the *consumer* + object. In order to make this method break out of the loop and + return, the ``consume()`` method can call `stop_replication()` on the + cursor or it can throw an exception. + + If *decode* is set to `!True`, the messages read from the server are + converted according to the connection `~connection.encoding`. This + parameter should not be set with physical replication. + + This method also sends keepalive messages to the server, in case there + were no new data from the server for the duration of + *keepalive_interval* (in seconds). The value of this parameter must + be equal to at least 1 second, but it can have a fractional part. + + The following example is a sketch implementation of *consumer* object + for logical replication:: + + class LogicalStreamConsumer(object): + + def consume(self, msg): + self.store_message_data(msg.payload) + + if self.should_report_to_the_server_now(msg): + msg.cursor.send_replication_feedback(flush_lsn=msg.data_start) + + consumer = LogicalStreamConsumer() + cur.consume_replication_stream(consumer, decode=True) + + The *msg* objects passed to the ``consume()`` method are instances of + `ReplicationMessage` class. + + After storing certain amount of messages' data reliably, the client + should send a confirmation message to the server. This should be done + by calling `send_replication_feedback()` method on the corresponding + replication cursor. A reference to the cursor is provided in the + `ReplicationMessage` as an attribute. + + .. warning:: + + When using replication with slots, failure to properly notify the + server by constantly consuming and reporting success at + appropriate times can eventually lead to "disk full" condition on + the server, because the server retains all the WAL segments that + might be needed to stream the changes via all of the currently + open replication slots. + + On the other hand, it is not recommended to send a confirmation + after every processed message, since that will put an unnecessary + load on network and the server. A possible strategy is to confirm + after every COMMIT message. .. method:: stop_replication() - In non-asynchronous connection, when called from the ``write()`` - method, tell the code in `~start_replication` to break out of the - endless loop and return. + This method can be called on synchronous connections from the + ``consume()`` method of a ``consumer`` object in order to break out of + the endless loop in `consume_replication_stream()`. If called on + asynchronous connection or outside of the consume loop, this method + raises an error. .. method:: send_replication_feedback(write_lsn=0, flush_lsn=0, apply_lsn=0, reply=False) @@ -344,29 +429,37 @@ Replication cursor :param reply: request the server to send back a keepalive message immediately Use this method to report to the server that all messages up to a - certain LSN position have been stored and may be discarded. + certain LSN position have been stored on the client and may be + discarded on the server. This method can also be called with all default parameters' values to - send a keepalive message to the server. + just send a keepalive message to the server. - In case of asynchronous connection, if the feedback message cannot be - sent at the moment, remembers the passed LSN positions for a later - hopefully successful call or call to `~flush_replication_feedback()`. + If the feedback message could not be sent, updates the passed LSN + positions in the cursor for a later call to + `flush_replication_feedback()` and returns `!False`, otherwise returns + `!True`. .. method:: flush_replication_feedback(reply=False) :param reply: request the server to send back a keepalive message immediately This method tries to flush the latest replication feedback message - that `~send_replication_feedback()` was trying to send, if any. + that `send_replication_feedback()` was trying to send but couldn't. + + If *reply* is `!True` sends a keepalive message in either case. + + Returns `!True` if the feedback message was sent successfully, + `!False` otherwise. Low-level methods for asynchronous connection operation. - With the non-asynchronous connection, a single call to - `~start_replication()` handles all the complexity, but at times it might - be beneficial to use low-level interface for better control, in particular - to `~select.select()` on multiple sockets. The following methods are - provided for asynchronous operation: + With the synchronous connection, a call to `consume_replication_stream()` + handles all the complexity of handling the incoming messages and sending + keepalive replies, but at times it might be beneficial to use low-level + interface for better control, in particular to `~select.select()` on + multiple sockets. The following methods are provided for asynchronous + operation: .. method:: read_replication_message(decode=True) @@ -374,18 +467,18 @@ Replication cursor performed on the data received from the server This method should be used in a loop with asynchronous connections - after calling `~start_replication()` once. + after calling `start_replication()` once. It tries to read the next message from the server, without blocking - and returns an instance of `~ReplicationMessage` or *None*, in case + and returns an instance of `ReplicationMessage` or `!None`, in case there are no more data messages from the server at the moment. It is expected that the calling code will call this method repeatedly in order to consume all of the messages that might have been buffered, - until *None* is returned. After receiving a *None* value from this - method, one might use `~select.select()` or `~select.poll()` on the - corresponding connection to block the process until there is more data - from the server. + until `!None` is returned. After receiving a `!None` value from this + method, the caller should use `~select.select()` or `~select.poll()` + on the corresponding connection to block the process until there is + more data from the server. The server can send keepalive messages to the client periodically. Such messages are silently consumed by this method and are never @@ -409,45 +502,19 @@ Replication cursor keepalive_interval = 10.0 while True: - if (datetime.now() - cur.replication_io_timestamp).total_seconds() >= keepalive_interval: - cur.send_replication_feedback() + msg = cur.read_replication_message() + if msg: + consumer.consume(msg) + else: + timeout = keepalive_interval - (datetime.now() - cur.replication_io_timestamp).total_seconds() + if timeout > 0: + sel = select.select([cur], [], [], timeout) + else: + sel = [] - while True: - msg = cur.read_replication_message() - if not msg: - break - writer.write(msg) + if not sel: + cur.send_replication_feedback() - timeout = keepalive_interval - (datetime.now() - cur.replication_io_timestamp).total_seconds() - if timeout > 0: - select.select([cur], [], [], timeout) - -.. autoclass:: ReplicationMessage - - .. attribute:: payload - - The actual data received from the server. An instance of either - ``str`` or ``unicode``. - - .. attribute:: data_start - - LSN position of the start of the message. - - .. attribute:: wal_end - - LSN position of the end of the message. - - .. attribute:: send_time - - A `~datetime` object representing the server timestamp at the moment - when the message was sent. - - .. attribute:: cursor - - A reference to the corresponding `~ReplicationCursor` object. - -.. data:: REPLICATION_LOGICAL -.. data:: REPLICATION_PHYSICAL .. index:: pair: Cursor; Replication diff --git a/lib/extras.py b/lib/extras.py index 998c792f..c05536ad 100644 --- a/lib/extras.py +++ b/lib/extras.py @@ -544,9 +544,9 @@ class ReplicationCursor(_cursor): command = "DROP_REPLICATION_SLOT %s" % self.connection.quote_ident(slot_name) self.execute(command) - def start_replication(self, slot_name=None, writer=None, slot_type=None, start_lsn=0, - timeline=0, keepalive_interval=10, options=None): - """Start and consume replication stream.""" + def start_replication(self, slot_name=None, slot_type=None, start_lsn=0, + timeline=0, options=None): + """Start replication stream.""" command = "START_REPLICATION " @@ -594,8 +594,7 @@ class ReplicationCursor(_cursor): command += "%s %s" % (self.connection.quote_ident(k), _A(str(v))) command += ")" - return self.start_replication_expert(command, writer=writer, - keepalive_interval=keepalive_interval) + return self.start_replication_expert(command) def send_feedback_message(self, written_lsn=0, sync_lsn=0, apply_lsn=0, reply_requested=False): return self.send_replication_feedback(written_lsn, sync_lsn, apply_lsn, reply_requested) diff --git a/psycopg/cursor.h b/psycopg/cursor.h index dd07243f..941e279e 100644 --- a/psycopg/cursor.h +++ b/psycopg/cursor.h @@ -73,7 +73,9 @@ struct cursorObject { #define DEFAULT_COPYSIZE 16384 #define DEFAULT_COPYBUFF 8192 - int repl_stop; /* if client requested to stop replication */ + /* replication cursor attrs */ + int repl_started:1; /* if replication is started */ + int repl_stop:1; /* if client requested to stop replication */ struct timeval repl_keepalive_interval; /* interval for keepalive messages in replication mode */ XLogRecPtr repl_write_lsn; /* LSN stats for replication feedback messages */ XLogRecPtr repl_flush_lsn; diff --git a/psycopg/cursor_type.c b/psycopg/cursor_type.c index 9de5b085..d033a3df 100644 --- a/psycopg/cursor_type.c +++ b/psycopg/cursor_type.c @@ -36,6 +36,7 @@ #include "psycopg/microprotocols_proto.h" #include + #include /* python */ @@ -1588,13 +1589,11 @@ exit: static PyObject * psyco_curs_start_replication_expert(cursorObject *self, PyObject *args, PyObject *kwargs) { - PyObject *writer = NULL, *res = NULL; + PyObject *res = NULL; char *command; - double keepalive_interval = 10; - static char *kwlist[] = {"command", "writer", "keepalive_interval", NULL}; + static char *kwlist[] = {"command", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s|Od", kwlist, - &command, &writer, &keepalive_interval)) { + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s", kwlist, &command)) { return NULL; } @@ -1602,21 +1601,15 @@ psyco_curs_start_replication_expert(cursorObject *self, PyObject *args, PyObject EXC_IF_GREEN(start_replication_expert); EXC_IF_TPC_PREPARED(self->conn, start_replication_expert); - Dprintf("psyco_curs_start_replication_expert: command = %s", command); - - if (keepalive_interval < 1.0) { - psyco_set_error(ProgrammingError, self, "keepalive_interval must be >= 1sec"); + if (self->repl_started) { + psyco_set_error(ProgrammingError, self, "replication already in progress"); return NULL; } - self->copysize = 0; - Py_XINCREF(writer); - self->copyfile = writer; + Dprintf("psyco_curs_start_replication_expert: command = %s", command); + self->copysize = 0; self->repl_stop = 0; - self->repl_keepalive_interval.tv_sec = (int)keepalive_interval; - self->repl_keepalive_interval.tv_usec = - (keepalive_interval - (int)keepalive_interval)*1.0e6; self->repl_write_lsn = InvalidXLogRecPtr; self->repl_flush_lsn = InvalidXLogRecPtr; @@ -1631,7 +1624,7 @@ psyco_curs_start_replication_expert(cursorObject *self, PyObject *args, PyObject Py_INCREF(res); } - Py_CLEAR(self->copyfile); + self->repl_started = 1; return res; } @@ -1643,12 +1636,54 @@ static PyObject * psyco_curs_stop_replication(cursorObject *self) { EXC_IF_CURS_CLOSED(self); + EXC_IF_CURS_ASYNC(self, stop_replication); + + if (!self->repl_started || self->repl_stop) { + psyco_set_error(ProgrammingError, self, "replication is not in progress"); + return NULL; + } self->repl_stop = 1; Py_RETURN_NONE; } +#define psyco_curs_consume_replication_stream_doc \ +"consume_replication_stream(consumer, keepalive_interval=10) -- Consume replication stream." + +static PyObject * +psyco_curs_consume_replication_stream(cursorObject *self, PyObject *args, PyObject *kwargs) +{ + PyObject *consumer = NULL, *res = NULL; + int decode = 0; + double keepalive_interval = 10; + static char *kwlist[] = {"consumer", "decode", "keepalive_interval", NULL}; + + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|id", kwlist, + &consumer, &decode, &keepalive_interval)) { + return NULL; + } + + EXC_IF_CURS_CLOSED(self); + EXC_IF_CURS_ASYNC(self, consume_replication_stream); + EXC_IF_GREEN(consume_replication_stream); + EXC_IF_TPC_PREPARED(self->conn, consume_replication_stream); + + Dprintf("psyco_curs_consume_replication_stream"); + + if (keepalive_interval < 1.0) { + psyco_set_error(ProgrammingError, self, "keepalive_interval must be >= 1 (sec)"); + return NULL; + } + + if (pq_copy_both(self, consumer, decode, keepalive_interval) >= 0) { + res = Py_None; + Py_INCREF(res); + } + + return res; +} + #define psyco_curs_read_replication_message_doc \ "read_replication_message(decode=True) -- Try reading a replication message from the server (non-blocking)." @@ -1673,7 +1708,7 @@ psyco_curs_read_replication_message(cursorObject *self, PyObject *args, PyObject static PyObject * curs_flush_replication_feedback(cursorObject *self, int reply) { - if (!self->repl_feedback_pending) + if (!(self->repl_feedback_pending || reply)) Py_RETURN_FALSE; if (pq_send_replication_feedback(self, reply)) { @@ -1939,6 +1974,8 @@ static struct PyMethodDef cursorObject_methods[] = { METH_VARARGS|METH_KEYWORDS, psyco_curs_start_replication_expert_doc}, {"stop_replication", (PyCFunction)psyco_curs_stop_replication, METH_NOARGS, psyco_curs_stop_replication_doc}, + {"consume_replication_stream", (PyCFunction)psyco_curs_consume_replication_stream, + METH_VARARGS|METH_KEYWORDS, psyco_curs_consume_replication_stream_doc}, {"read_replication_message", (PyCFunction)psyco_curs_read_replication_message, METH_VARARGS|METH_KEYWORDS, psyco_curs_read_replication_message_doc}, {"send_replication_feedback", (PyCFunction)psyco_curs_send_replication_feedback, diff --git a/psycopg/pqpath.c b/psycopg/pqpath.c index b524b14a..4f1427de 100644 --- a/psycopg/pqpath.c +++ b/psycopg/pqpath.c @@ -1531,18 +1531,28 @@ exit: return ret; } -/* ignores keepalive messages */ +/* Tries to read the next message from the replication stream, without + blocking, in both sync and async connection modes. If no message + is ready in the CopyData buffer, tries to read from the server, + again without blocking. If that doesn't help, returns Py_None. + The caller is then supposed to block on the socket(s) and call this + function again. + + Any keepalive messages from the server are silently consumed and + are never returned to the caller. + */ PyObject * pq_read_replication_message(cursorObject *curs, int decode) { char *buffer = NULL; - int len, consumed = 0, hdr, reply; + int len, data_size, consumed, hdr, reply; XLogRecPtr data_start, wal_end; pg_int64 send_time; PyObject *str = NULL, *msg = NULL; Dprintf("pq_read_replication_message(decode=%d)", decode); + consumed = 0; retry: len = PQgetCopyData(curs->conn->pgconn, &buffer, 1 /* async */); @@ -1570,10 +1580,12 @@ retry: } if (len == -2) { + /* serious error */ pq_raise(curs->conn, curs, NULL); goto exit; } if (len == -1) { + /* EOF */ curs->pgres = PQgetResult(curs->conn->pgconn); if (curs->pgres && PQresultStatus(curs->pgres) == PGRES_FATAL_ERROR) { @@ -1595,13 +1607,14 @@ retry: Dprintf("pq_read_replication_message: msg=%c, len=%d", buffer[0], len); if (buffer[0] == 'w') { - /* msgtype(1), dataStart(8), walEnd(8), sendTime(8) */ + /* XLogData: msgtype(1), dataStart(8), walEnd(8), sendTime(8) */ hdr = 1 + 8 + 8 + 8; if (len < hdr + 1) { psyco_set_error(OperationalError, curs, "data message header too small"); goto exit; } + data_size = len - hdr; data_start = fe_recvint64(buffer + 1); wal_end = fe_recvint64(buffer + 1 + 8); send_time = fe_recvint64(buffer + 1 + 8 + 8); @@ -1609,12 +1622,13 @@ retry: Dprintf("pq_read_replication_message: data_start="XLOGFMTSTR", wal_end="XLOGFMTSTR, XLOGFMTARGS(data_start), XLOGFMTARGS(wal_end)); - Dprintf("pq_read_replication_message: >>%.*s<<", len - hdr, buffer + hdr); + Dprintf("pq_read_replication_message: >>%.*s<<", data_size, buffer + hdr); + /* XXX it would be wise to check if it's really a logical replication */ if (decode) { - str = PyUnicode_Decode(buffer + hdr, len - hdr, curs->conn->codec, NULL); + str = PyUnicode_Decode(buffer + hdr, data_size, curs->conn->codec, NULL); } else { - str = Bytes_FromStringAndSize(buffer + hdr, len - hdr); + str = Bytes_FromStringAndSize(buffer + hdr, data_size); } if (!str) { goto exit; } @@ -1623,12 +1637,13 @@ retry: Py_DECREF(str); if (!msg) { goto exit; } + ((replicationMessageObject *)msg)->data_size = data_size; ((replicationMessageObject *)msg)->data_start = data_start; ((replicationMessageObject *)msg)->wal_end = wal_end; ((replicationMessageObject *)msg)->send_time = send_time; } else if (buffer[0] == 'k') { - /* msgtype(1), walEnd(8), sendTime(8), reply(1) */ + /* Primary keepalive message: msgtype(1), walEnd(8), sendTime(8), reply(1) */ hdr = 1 + 8 + 8; if (len < hdr + 1) { psyco_set_error(OperationalError, curs, "keepalive message header too small"); @@ -1641,6 +1656,7 @@ retry: if (curs->conn->async) { curs->repl_feedback_pending = 1; } else { + /* XXX not sure if this was a good idea after all */ pq_raise(curs->conn, curs, NULL); goto exit; } @@ -1699,38 +1715,36 @@ pq_send_replication_feedback(cursorObject* curs, int reply_requested) return 1; } -/* used for streaming replication only */ -static int -_pq_copy_both_v3(cursorObject *curs) +/* Calls pq_read_replication_message in an endless loop, until + stop_replication is called or a fatal error occurs. The messages + are passed to the consumer object. + + When no message is available, blocks on the connection socket, but + manages to send keepalive messages to the server as needed. +*/ +int +pq_copy_both(cursorObject *curs, PyObject *consumer, int decode, double keepalive_interval) { PyObject *msg, *tmp = NULL; - PyObject *write_func = NULL; - int is_text, fd, sel, ret = -1; + PyObject *consume_func = NULL; + int fd, sel, ret = -1; PGconn *pgconn; fd_set fds; - struct timeval curr_time, ping_time, time_diff; + struct timeval keep_intr, curr_time, ping_time, timeout; - if (!curs->copyfile) { - psyco_set_error(ProgrammingError, curs, - "can't execute START_REPLICATION directly: use the start_replication() method instead"); - goto exit; - } - - if (!(write_func = PyObject_GetAttrString(curs->copyfile, "write"))) { - Dprintf("_pq_copy_both_v3: can't get o.write"); - goto exit; - } - - /* if the file is text we must pass it unicode. */ - if (-1 == (is_text = psycopg_is_text_file(curs->copyfile))) { + if (!(consume_func = PyObject_GetAttrString(consumer, "consume"))) { + Dprintf("pq_copy_both: can't get o.consume"); goto exit; } CLEARPGRES(curs->pgres); pgconn = curs->conn->pgconn; + keep_intr.tv_sec = (int)keepalive_interval; + keep_intr.tv_usec = (keepalive_interval - keep_intr.tv_sec)*1.0e6; + while (1) { - msg = pq_read_replication_message(curs, is_text); + msg = pq_read_replication_message(curs, decode); if (!msg) { goto exit; } @@ -1748,14 +1762,12 @@ _pq_copy_both_v3(cursorObject *curs) gettimeofday(&curr_time, NULL); - ping_time = curs->repl_last_io; - ping_time.tv_sec += curs->repl_keepalive_interval.tv_sec; - ping_time.tv_usec += curs->repl_keepalive_interval.tv_usec; + timeradd(&curs->repl_last_io, &keep_intr, &ping_time); + timersub(&ping_time, &curr_time, &timeout); - timersub(&ping_time, &curr_time, &time_diff); - if (time_diff.tv_sec > 0) { + if (timeout.tv_sec >= 0) { Py_BEGIN_ALLOW_THREADS; - sel = select(fd + 1, &fds, NULL, NULL, &time_diff); + sel = select(fd + 1, &fds, NULL, NULL, &timeout); Py_END_ALLOW_THREADS; } else { @@ -1782,17 +1794,17 @@ _pq_copy_both_v3(cursorObject *curs) continue; } else { - tmp = PyObject_CallFunctionObjArgs(write_func, msg, NULL); + tmp = PyObject_CallFunctionObjArgs(consume_func, msg, NULL); Py_DECREF(msg); if (tmp == NULL) { - Dprintf("_pq_copy_both_v3: write_func returned NULL"); + Dprintf("pq_copy_both: consume_func returned NULL"); goto exit; } Py_DECREF(tmp); if (curs->repl_stop) { - Dprintf("_pq_copy_both_v3: repl_stop flag set by write_func"); + Dprintf("pq_copy_both: repl_stop flag set by consume_func"); break; } } @@ -1801,7 +1813,7 @@ _pq_copy_both_v3(cursorObject *curs) ret = 1; exit: - Py_XDECREF(write_func); + Py_XDECREF(consume_func); return ret; } @@ -1867,13 +1879,14 @@ pq_fetch(cursorObject *curs, int no_result) case PGRES_COPY_BOTH: Dprintf("pq_fetch: data from a streaming replication slot (no tuples)"); curs->rowcount = -1; - if (curs->conn->async) { + ex = 0; + /*if (curs->conn->async) { ex = 0; } else { ex = _pq_copy_both_v3(curs); - /* error caught by out glorious notice handler */ + if (PyErr_Occurred()) ex = -1; - } + }*/ CLEARPGRES(curs->pgres); break; diff --git a/psycopg/pqpath.h b/psycopg/pqpath.h index 9a348bc2..a858a269 100644 --- a/psycopg/pqpath.h +++ b/psycopg/pqpath.h @@ -72,6 +72,8 @@ HIDDEN int pq_execute_command_locked(connectionObject *conn, RAISES HIDDEN void pq_complete_error(connectionObject *conn, PGresult **pgres, char **error); +HIDDEN int pq_copy_both(cursorObject *curs, PyObject *consumer, + int decode, double keepalive_interval); HIDDEN PyObject *pq_read_replication_message(cursorObject *curs, int decode); HIDDEN int pq_send_replication_feedback(cursorObject *curs, int reply_requested); diff --git a/psycopg/replication_message.h b/psycopg/replication_message.h index a7567a1d..201b9fb4 100644 --- a/psycopg/replication_message.h +++ b/psycopg/replication_message.h @@ -42,6 +42,7 @@ struct replicationMessageObject { cursorObject *cursor; PyObject *payload; + int data_size; XLogRecPtr data_start; XLogRecPtr wal_end; pg_int64 send_time; diff --git a/psycopg/replication_message_type.c b/psycopg/replication_message_type.c index edfe6c16..61833931 100644 --- a/psycopg/replication_message_type.c +++ b/psycopg/replication_message_type.c @@ -49,8 +49,9 @@ static PyObject * replmsg_repr(replicationMessageObject *self) { return PyString_FromFormat( - "", - self, XLOGFMTARGS(self->data_start), XLOGFMTARGS(self->wal_end), self->send_time); + "", + self, self->data_size, XLOGFMTARGS(self->data_start), XLOGFMTARGS(self->wal_end), + self->send_time); } static int @@ -63,8 +64,10 @@ replmsg_init(PyObject *obj, PyObject *args, PyObject *kwargs) Py_XINCREF(self->cursor); Py_XINCREF(self->payload); + self->data_size = 0; self->data_start = 0; self->wal_end = 0; + self->send_time = 0; return 0; } @@ -125,6 +128,8 @@ static struct PyMemberDef replicationMessageObject_members[] = { "TODO"}, {"payload", T_OBJECT, OFFSETOF(payload), READONLY, "TODO"}, + {"data_size", T_INT, OFFSETOF(data_size), READONLY, + "TODO"}, {"data_start", T_ULONGLONG, OFFSETOF(data_start), READONLY, "TODO"}, {"wal_end", T_ULONGLONG, OFFSETOF(wal_end), READONLY, From ea2b87eade9bb0a1eb0f4f9398ce9daeb3dcb930 Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Tue, 13 Oct 2015 11:01:13 +0200 Subject: [PATCH 026/151] Fix create_replication_slot doc signature --- doc/src/extras.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/src/extras.rst b/doc/src/extras.rst index 82a2be18..bdf8fc1b 100644 --- a/doc/src/extras.rst +++ b/doc/src/extras.rst @@ -232,7 +232,7 @@ The individual messages in the replication stream are presented by >>> cur.identify_system() {'timeline': 1, 'systemid': '1234567890123456789', 'dbname': 'test', 'xlogpos': '0/1ABCDEF'} - .. method:: create_replication_slot(slot_name, output_plugin=None) + .. method:: create_replication_slot(slot_name, slot_type=None, output_plugin=None) Create streaming replication slot. From 6ad299945fc431d162f53b08a3de5dda729fcb3e Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Tue, 13 Oct 2015 18:05:33 +0200 Subject: [PATCH 027/151] Remove IDENTIFY_SYSTEM wrapper method (it can't work with async anyway). --- doc/src/extras.rst | 10 ---------- lib/extras.py | 9 +-------- 2 files changed, 1 insertion(+), 18 deletions(-) diff --git a/doc/src/extras.rst b/doc/src/extras.rst index bdf8fc1b..356e10e0 100644 --- a/doc/src/extras.rst +++ b/doc/src/extras.rst @@ -222,16 +222,6 @@ The individual messages in the replication stream are presented by .. autoclass:: ReplicationCursor - .. method:: identify_system() - - Execute ``IDENTIFY_SYSTEM`` command of the streaming replication - protocol and return the result as a dictionary. - - Example:: - - >>> cur.identify_system() - {'timeline': 1, 'systemid': '1234567890123456789', 'dbname': 'test', 'xlogpos': '0/1ABCDEF'} - .. method:: create_replication_slot(slot_name, slot_type=None, output_plugin=None) Create streaming replication slot. diff --git a/lib/extras.py b/lib/extras.py index c05536ad..913a6aae 100644 --- a/lib/extras.py +++ b/lib/extras.py @@ -506,13 +506,6 @@ class PhysicalReplicationConnection(ReplicationConnectionBase): class ReplicationCursor(_cursor): """A cursor used for communication on the replication protocol.""" - def identify_system(self): - """Get information about the cluster status.""" - - self.execute("IDENTIFY_SYSTEM") - return dict(zip([_.name for _ in self.description], - self.fetchall()[0])) - def create_replication_slot(self, slot_name, slot_type=None, output_plugin=None): """Create streaming replication slot.""" @@ -594,7 +587,7 @@ class ReplicationCursor(_cursor): command += "%s %s" % (self.connection.quote_ident(k), _A(str(v))) command += ")" - return self.start_replication_expert(command) + self.start_replication_expert(command) def send_feedback_message(self, written_lsn=0, sync_lsn=0, apply_lsn=0, reply_requested=False): return self.send_replication_feedback(written_lsn, sync_lsn, apply_lsn, reply_requested) From 54079072db3a6ff0794b8ce141e2dd929416bd14 Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Wed, 14 Oct 2015 12:43:26 +0200 Subject: [PATCH 028/151] Fix ReplicationTest: no NotSupportedError now. --- tests/test_connection.py | 18 +++++++++++++----- tests/testconfig.py | 4 +--- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/tests/test_connection.py b/tests/test_connection.py index 68bb6f05..91ea51f5 100755 --- a/tests/test_connection.py +++ b/tests/test_connection.py @@ -1180,14 +1180,22 @@ class AutocommitTests(ConnectingTestCase): class ReplicationTest(ConnectingTestCase): @skip_before_postgres(9, 0) - def test_replication_not_supported(self): - conn = self.repl_connect() + def test_physical_replication_connection(self): + import psycopg2.extras + conn = self.repl_connect(connection_factory=psycopg2.extras.PhysicalReplicationConnection) if conn is None: return cur = conn.cursor() - f = StringIO() - self.assertRaises(psycopg2.NotSupportedError, - cur.copy_expert, "START_REPLICATION 0/0", f) + cur.execute("IDENTIFY_SYSTEM") + cur.fetchall() + @skip_before_postgres(9, 4) + def test_logical_replication_connection(self): + import psycopg2.extras + conn = self.repl_connect(connection_factory=psycopg2.extras.LogicalReplicationConnection) + if conn is None: return + cur = conn.cursor() + cur.execute("IDENTIFY_SYSTEM") + cur.fetchall() def test_suite(): return unittest.TestLoader().loadTestsFromName(__name__) diff --git a/tests/testconfig.py b/tests/testconfig.py index 0f995fbf..d59e5a0d 100644 --- a/tests/testconfig.py +++ b/tests/testconfig.py @@ -7,8 +7,6 @@ dbhost = os.environ.get('PSYCOPG2_TESTDB_HOST', None) dbport = os.environ.get('PSYCOPG2_TESTDB_PORT', None) dbuser = os.environ.get('PSYCOPG2_TESTDB_USER', None) dbpass = os.environ.get('PSYCOPG2_TESTDB_PASSWORD', None) -repl_dsn = os.environ.get('PSYCOPG2_TEST_REPL_DSN', - "dbname=psycopg2_test replication=1") # Check if we want to test psycopg's green path. green = os.environ.get('PSYCOPG2_TEST_GREEN', None) @@ -35,4 +33,4 @@ if dbuser is not None: if dbpass is not None: dsn += ' password=%s' % dbpass - +repl_dsn = os.environ.get('PSYCOPG2_TEST_REPL_DSN', dsn) From fea2260fc5ec8dda9904eed9509b1a834b05747f Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Wed, 14 Oct 2015 12:50:08 +0200 Subject: [PATCH 029/151] Fix stop_replication: always raise outside the loop. --- psycopg/cursor_type.c | 4 ++-- tests/test_connection.py | 12 ++++++++++++ 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/psycopg/cursor_type.c b/psycopg/cursor_type.c index d033a3df..5dd08cc9 100644 --- a/psycopg/cursor_type.c +++ b/psycopg/cursor_type.c @@ -1624,8 +1624,6 @@ psyco_curs_start_replication_expert(cursorObject *self, PyObject *args, PyObject Py_INCREF(res); } - self->repl_started = 1; - return res; } @@ -1676,6 +1674,8 @@ psyco_curs_consume_replication_stream(cursorObject *self, PyObject *args, PyObje return NULL; } + self->repl_started = 1; + if (pq_copy_both(self, consumer, decode, keepalive_interval) >= 0) { res = Py_None; Py_INCREF(res); diff --git a/tests/test_connection.py b/tests/test_connection.py index 91ea51f5..18f1ff3e 100755 --- a/tests/test_connection.py +++ b/tests/test_connection.py @@ -1197,6 +1197,18 @@ class ReplicationTest(ConnectingTestCase): cur.execute("IDENTIFY_SYSTEM") cur.fetchall() + @skip_before_postgres(9, 0) + def test_stop_replication_raises(self): + import psycopg2.extras + conn = self.repl_connect(connection_factory=psycopg2.extras.PhysicalReplicationConnection) + if conn is None: return + cur = conn.cursor() + self.assertRaises(psycopg2.ProgrammingError, cur.stop_replication) + + cur.start_replication() + self.assertRaises(psycopg2.ProgrammingError, cur.stop_replication) + + def test_suite(): return unittest.TestLoader().loadTestsFromName(__name__) From a0b42a12ff63fee362fce963fcb73350a810f09c Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Wed, 14 Oct 2015 15:15:07 +0200 Subject: [PATCH 030/151] Update stop_repl, require replication consumer to be a callable. --- doc/src/extras.rst | 33 +++++++++++++++++---------------- psycopg/cursor_type.c | 16 ++++++++-------- psycopg/pqpath.c | 8 ++++---- tests/test_connection.py | 6 +++++- 4 files changed, 34 insertions(+), 29 deletions(-) diff --git a/doc/src/extras.rst b/doc/src/extras.rst index 356e10e0..a9ba52fc 100644 --- a/doc/src/extras.rst +++ b/doc/src/extras.rst @@ -333,9 +333,9 @@ The individual messages in the replication stream are presented by Start replication on the connection using provided ``START_REPLICATION`` command. - .. method:: consume_replication_stream(consumer, decode=False, keepalive_interval=10) + .. method:: consume_replication_stream(consume, decode=False, keepalive_interval=10) - :param consumer: an object providing ``consume()`` method + :param consume: a callable object with signature ``consume(msg)`` :param decode: a flag indicating that unicode conversion should be performed on the messages received from the server :param keepalive_interval: interval (in seconds) to send keepalive @@ -348,10 +348,9 @@ The individual messages in the replication stream are presented by `start_replication()` first. When called, this method enters an endless loop, reading messages from - the server and passing them to ``consume()`` method of the *consumer* - object. In order to make this method break out of the loop and - return, the ``consume()`` method can call `stop_replication()` on the - cursor or it can throw an exception. + the server and passing them to ``consume()``. In order to make this + method break out of the loop and return, ``consume()`` can call + `stop_replication()` on the cursor or it can throw an exception. If *decode* is set to `!True`, the messages read from the server are converted according to the connection `~connection.encoding`. This @@ -362,12 +361,12 @@ The individual messages in the replication stream are presented by *keepalive_interval* (in seconds). The value of this parameter must be equal to at least 1 second, but it can have a fractional part. - The following example is a sketch implementation of *consumer* object - for logical replication:: + The following example is a sketch implementation of ``consume()`` + callable for logical replication:: class LogicalStreamConsumer(object): - def consume(self, msg): + def __call__(self, msg): self.store_message_data(msg.payload) if self.should_report_to_the_server_now(msg): @@ -376,7 +375,7 @@ The individual messages in the replication stream are presented by consumer = LogicalStreamConsumer() cur.consume_replication_stream(consumer, decode=True) - The *msg* objects passed to the ``consume()`` method are instances of + The *msg* objects passed to ``consume()`` are instances of `ReplicationMessage` class. After storing certain amount of messages' data reliably, the client @@ -401,11 +400,10 @@ The individual messages in the replication stream are presented by .. method:: stop_replication() - This method can be called on synchronous connections from the - ``consume()`` method of a ``consumer`` object in order to break out of - the endless loop in `consume_replication_stream()`. If called on - asynchronous connection or outside of the consume loop, this method - raises an error. + This method can be called on synchronous connection from the + ``consume()`` callable in order to break out of the endless loop in + `consume_replication_stream()`. If called on asynchronous connection + or when replication is not in progress, this method raises an error. .. method:: send_replication_feedback(write_lsn=0, flush_lsn=0, apply_lsn=0, reply=False) @@ -490,11 +488,14 @@ The individual messages in the replication stream are presented by An actual example of asynchronous operation might look like this:: + def consume(msg): + ... + keepalive_interval = 10.0 while True: msg = cur.read_replication_message() if msg: - consumer.consume(msg) + consume(msg) else: timeout = keepalive_interval - (datetime.now() - cur.replication_io_timestamp).total_seconds() if timeout > 0: diff --git a/psycopg/cursor_type.c b/psycopg/cursor_type.c index 5dd08cc9..a4581495 100644 --- a/psycopg/cursor_type.c +++ b/psycopg/cursor_type.c @@ -1622,13 +1622,15 @@ psyco_curs_start_replication_expert(cursorObject *self, PyObject *args, PyObject 1 /* no_result */, 1 /* no_begin */) >= 0) { res = Py_None; Py_INCREF(res); + + self->repl_started = 1; } return res; } #define psyco_curs_stop_replication_doc \ -"stop_replication() -- Set flag to break out of endless loop in start_replication() on sync connection." +"stop_replication() -- Set flag to break out of the endless loop in consume_replication_stream()." static PyObject * psyco_curs_stop_replication(cursorObject *self) @@ -1652,13 +1654,13 @@ psyco_curs_stop_replication(cursorObject *self) static PyObject * psyco_curs_consume_replication_stream(cursorObject *self, PyObject *args, PyObject *kwargs) { - PyObject *consumer = NULL, *res = NULL; + PyObject *consume = NULL, *res = NULL; int decode = 0; double keepalive_interval = 10; - static char *kwlist[] = {"consumer", "decode", "keepalive_interval", NULL}; + static char *kwlist[] = {"consume", "decode", "keepalive_interval", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|id", kwlist, - &consumer, &decode, &keepalive_interval)) { + &consume, &decode, &keepalive_interval)) { return NULL; } @@ -1674,9 +1676,7 @@ psyco_curs_consume_replication_stream(cursorObject *self, PyObject *args, PyObje return NULL; } - self->repl_started = 1; - - if (pq_copy_both(self, consumer, decode, keepalive_interval) >= 0) { + if (pq_copy_both(self, consume, decode, keepalive_interval) >= 0) { res = Py_None; Py_INCREF(res); } @@ -1709,7 +1709,7 @@ static PyObject * curs_flush_replication_feedback(cursorObject *self, int reply) { if (!(self->repl_feedback_pending || reply)) - Py_RETURN_FALSE; + Py_RETURN_TRUE; if (pq_send_replication_feedback(self, reply)) { self->repl_feedback_pending = 0; diff --git a/psycopg/pqpath.c b/psycopg/pqpath.c index 4f1427de..a42c9a1a 100644 --- a/psycopg/pqpath.c +++ b/psycopg/pqpath.c @@ -1723,7 +1723,7 @@ pq_send_replication_feedback(cursorObject* curs, int reply_requested) manages to send keepalive messages to the server as needed. */ int -pq_copy_both(cursorObject *curs, PyObject *consumer, int decode, double keepalive_interval) +pq_copy_both(cursorObject *curs, PyObject *consume, int decode, double keepalive_interval) { PyObject *msg, *tmp = NULL; PyObject *consume_func = NULL; @@ -1732,8 +1732,8 @@ pq_copy_both(cursorObject *curs, PyObject *consumer, int decode, double keepaliv fd_set fds; struct timeval keep_intr, curr_time, ping_time, timeout; - if (!(consume_func = PyObject_GetAttrString(consumer, "consume"))) { - Dprintf("pq_copy_both: can't get o.consume"); + if (!(consume_func = PyObject_GetAttrString(consume, "__call__"))) { + Dprintf("pq_copy_both: expected callable consume object"); goto exit; } @@ -1743,7 +1743,7 @@ pq_copy_both(cursorObject *curs, PyObject *consumer, int decode, double keepaliv keep_intr.tv_sec = (int)keepalive_interval; keep_intr.tv_usec = (keepalive_interval - keep_intr.tv_sec)*1.0e6; - while (1) { + while (!curs->repl_stop) { msg = pq_read_replication_message(curs, decode); if (!msg) { goto exit; diff --git a/tests/test_connection.py b/tests/test_connection.py index 18f1ff3e..e2b0da30 100755 --- a/tests/test_connection.py +++ b/tests/test_connection.py @@ -1206,7 +1206,11 @@ class ReplicationTest(ConnectingTestCase): self.assertRaises(psycopg2.ProgrammingError, cur.stop_replication) cur.start_replication() - self.assertRaises(psycopg2.ProgrammingError, cur.stop_replication) + cur.stop_replication() # doesn't raise now + + def consume(msg): + pass + cur.consume_replication_stream(consume) # should return at once def test_suite(): From e05b4fd2673a721e858cffdcd5b49ae451e57332 Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Wed, 14 Oct 2015 17:36:50 +0200 Subject: [PATCH 031/151] Add checks on replication state, have to have a separate check for consume loop. --- psycopg/cursor.h | 17 +++++++++++++++++ psycopg/cursor_type.c | 29 ++++++++++++++++++----------- 2 files changed, 35 insertions(+), 11 deletions(-) diff --git a/psycopg/cursor.h b/psycopg/cursor.h index 941e279e..432425f5 100644 --- a/psycopg/cursor.h +++ b/psycopg/cursor.h @@ -76,6 +76,7 @@ struct cursorObject { /* replication cursor attrs */ int repl_started:1; /* if replication is started */ int repl_stop:1; /* if client requested to stop replication */ + int repl_consuming:1; /* if running the consume loop */ struct timeval repl_keepalive_interval; /* interval for keepalive messages in replication mode */ XLogRecPtr repl_write_lsn; /* LSN stats for replication feedback messages */ XLogRecPtr repl_flush_lsn; @@ -147,6 +148,22 @@ do \ return NULL; } \ while (0) +#define EXC_IF_REPLICATING(self, cmd) \ +do \ + if ((self)->repl_started) { \ + PyErr_SetString(ProgrammingError, \ + #cmd " cannot be used when replication is already in progress"); \ + return NULL; } \ +while (0) + +#define EXC_IF_NOT_REPLICATING(self, cmd) \ +do \ + if (!(self)->repl_started) { \ + PyErr_SetString(ProgrammingError, \ + #cmd " cannot be used when replication is not in progress"); \ + return NULL; } \ +while (0) + #ifdef __cplusplus } #endif diff --git a/psycopg/cursor_type.c b/psycopg/cursor_type.c index a4581495..c7e6c26a 100644 --- a/psycopg/cursor_type.c +++ b/psycopg/cursor_type.c @@ -1600,16 +1600,13 @@ psyco_curs_start_replication_expert(cursorObject *self, PyObject *args, PyObject EXC_IF_CURS_CLOSED(self); EXC_IF_GREEN(start_replication_expert); EXC_IF_TPC_PREPARED(self->conn, start_replication_expert); + EXC_IF_REPLICATING(self, start_replication_expert); - if (self->repl_started) { - psyco_set_error(ProgrammingError, self, "replication already in progress"); - return NULL; - } - - Dprintf("psyco_curs_start_replication_expert: command = %s", command); + Dprintf("psyco_curs_start_replication_expert: %s", command); self->copysize = 0; self->repl_stop = 0; + self->repl_consuming = 0; self->repl_write_lsn = InvalidXLogRecPtr; self->repl_flush_lsn = InvalidXLogRecPtr; @@ -1637,11 +1634,7 @@ psyco_curs_stop_replication(cursorObject *self) { EXC_IF_CURS_CLOSED(self); EXC_IF_CURS_ASYNC(self, stop_replication); - - if (!self->repl_started || self->repl_stop) { - psyco_set_error(ProgrammingError, self, "replication is not in progress"); - return NULL; - } + EXC_IF_NOT_REPLICATING(self, stop_replication); self->repl_stop = 1; @@ -1668,6 +1661,13 @@ psyco_curs_consume_replication_stream(cursorObject *self, PyObject *args, PyObje EXC_IF_CURS_ASYNC(self, consume_replication_stream); EXC_IF_GREEN(consume_replication_stream); EXC_IF_TPC_PREPARED(self->conn, consume_replication_stream); + EXC_IF_NOT_REPLICATING(self, consume_replication_stream); + + if (self->repl_consuming) { + PyErr_SetString(ProgrammingError, + "consume_replication_stream cannot be used when already in the consume loop"); + return NULL; + } Dprintf("psyco_curs_consume_replication_stream"); @@ -1676,11 +1676,15 @@ psyco_curs_consume_replication_stream(cursorObject *self, PyObject *args, PyObje return NULL; } + self->repl_consuming = 1; + if (pq_copy_both(self, consume, decode, keepalive_interval) >= 0) { res = Py_None; Py_INCREF(res); } + self->repl_consuming = 0; + return res; } @@ -1696,6 +1700,7 @@ psyco_curs_read_replication_message(cursorObject *self, PyObject *args, PyObject EXC_IF_CURS_CLOSED(self); EXC_IF_GREEN(read_replication_message); EXC_IF_TPC_PREPARED(self->conn, read_replication_message); + EXC_IF_NOT_REPLICATING(self, read_replication_message); if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", kwlist, &decode)) { @@ -1733,6 +1738,7 @@ psyco_curs_send_replication_feedback(cursorObject *self, PyObject *args, PyObjec static char* kwlist[] = {"write_lsn", "flush_lsn", "apply_lsn", "reply", NULL}; EXC_IF_CURS_CLOSED(self); + EXC_IF_NOT_REPLICATING(self, send_replication_feedback); if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|KKKi", kwlist, &write_lsn, &flush_lsn, &apply_lsn, &reply)) { @@ -1763,6 +1769,7 @@ psyco_curs_flush_replication_feedback(cursorObject *self, PyObject *args, PyObje static char *kwlist[] = {"reply", NULL}; EXC_IF_CURS_CLOSED(self); + EXC_IF_NOT_REPLICATING(self, flush_replication_feedback); if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", kwlist, &reply)) { From 822d671e8b2b0039bbcfb908c87fd239aa152faf Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Wed, 14 Oct 2015 17:40:39 +0200 Subject: [PATCH 032/151] Clear repl_stop flag after the consume loop. --- psycopg/cursor_type.c | 1 + 1 file changed, 1 insertion(+) diff --git a/psycopg/cursor_type.c b/psycopg/cursor_type.c index c7e6c26a..c797c264 100644 --- a/psycopg/cursor_type.c +++ b/psycopg/cursor_type.c @@ -1684,6 +1684,7 @@ psyco_curs_consume_replication_stream(cursorObject *self, PyObject *args, PyObje } self->repl_consuming = 0; + self->repl_stop = 0; /* who knows, what if we will be called again? */ return res; } From e3097ec9562a09b66f3d73e5bf901c8295909f38 Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Wed, 14 Oct 2015 17:42:53 +0200 Subject: [PATCH 033/151] Fix select/timeout indication in async replication example --- doc/src/extras.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/src/extras.rst b/doc/src/extras.rst index a9ba52fc..e2ded4b6 100644 --- a/doc/src/extras.rst +++ b/doc/src/extras.rst @@ -501,9 +501,9 @@ The individual messages in the replication stream are presented by if timeout > 0: sel = select.select([cur], [], [], timeout) else: - sel = [] + sel = ([], [], []) - if not sel: + if not sel[0]: cur.send_replication_feedback() From 28a1a00d1ce29a823a91417807b9d2b9cbf7b4dd Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Wed, 14 Oct 2015 18:39:20 +0200 Subject: [PATCH 034/151] Remove commented copy_both code in pqfetch. --- psycopg/pqpath.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/psycopg/pqpath.c b/psycopg/pqpath.c index a42c9a1a..111eb875 100644 --- a/psycopg/pqpath.c +++ b/psycopg/pqpath.c @@ -1760,6 +1760,7 @@ pq_copy_both(cursorObject *curs, PyObject *consume, int decode, double keepalive FD_ZERO(&fds); FD_SET(fd, &fds); + /* how long can we wait before we need to send a keepalive? */ gettimeofday(&curr_time, NULL); timeradd(&curs->repl_last_io, &keep_intr, &ping_time); @@ -1880,13 +1881,7 @@ pq_fetch(cursorObject *curs, int no_result) Dprintf("pq_fetch: data from a streaming replication slot (no tuples)"); curs->rowcount = -1; ex = 0; - /*if (curs->conn->async) { - ex = 0; - } else { - ex = _pq_copy_both_v3(curs); - - if (PyErr_Occurred()) ex = -1; - }*/ + /* nothing to do here: _pq_copy_both_v3 will be called separately */ CLEARPGRES(curs->pgres); break; From 9ab38ee8c5faf1241adaec0467ff6d83d1af6434 Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Wed, 14 Oct 2015 18:39:48 +0200 Subject: [PATCH 035/151] Add psyco_curs_datetime_init --- psycopg/cursor.h | 2 ++ psycopg/cursor_type.c | 18 +++++++++++++++--- psycopg/psycopgmodule.c | 1 + 3 files changed, 18 insertions(+), 3 deletions(-) diff --git a/psycopg/cursor.h b/psycopg/cursor.h index 432425f5..3f125998 100644 --- a/psycopg/cursor.h +++ b/psycopg/cursor.h @@ -107,6 +107,8 @@ HIDDEN void curs_reset(cursorObject *self); HIDDEN int psyco_curs_withhold_set(cursorObject *self, PyObject *pyvalue); HIDDEN int psyco_curs_scrollable_set(cursorObject *self, PyObject *pyvalue); +RAISES_NEG int psyco_curs_datetime_init(void); + /* exception-raising macros */ #define EXC_IF_CURS_CLOSED(self) \ do { \ diff --git a/psycopg/cursor_type.c b/psycopg/cursor_type.c index c797c264..f4598873 100644 --- a/psycopg/cursor_type.c +++ b/psycopg/cursor_type.c @@ -1780,6 +1780,21 @@ psyco_curs_flush_replication_feedback(cursorObject *self, PyObject *args, PyObje return curs_flush_replication_feedback(self, reply); } + +RAISES_NEG int +psyco_curs_datetime_init(void) +{ + Dprintf("psyco_curs_datetime_init: datetime init"); + + PyDateTime_IMPORT; + + if (!PyDateTimeAPI) { + PyErr_SetString(PyExc_ImportError, "datetime initialization failed"); + return -1; + } + return 0; +} + #define psyco_curs_replication_io_timestamp_doc \ "replication_io_timestamp -- the timestamp of latest IO with the server" @@ -1791,9 +1806,6 @@ psyco_curs_get_replication_io_timestamp(cursorObject *self) EXC_IF_CURS_CLOSED(self); - // TODO: move to a one-call init function - PyDateTime_IMPORT; - seconds = self->repl_last_io.tv_sec + self->repl_last_io.tv_usec / 1.0e6; tval = Py_BuildValue("(d)", seconds); diff --git a/psycopg/psycopgmodule.c b/psycopg/psycopgmodule.c index 543b0c1b..7d3c73d9 100644 --- a/psycopg/psycopgmodule.c +++ b/psycopg/psycopgmodule.c @@ -939,6 +939,7 @@ INIT_MODULE(_psycopg)(void) /* Initialize the PyDateTimeAPI everywhere is used */ PyDateTime_IMPORT; if (psyco_adapter_datetime_init()) { goto exit; } + if (psyco_curs_datetime_init()) { goto exit; } if (psyco_replmsg_datetime_init()) { goto exit; } Py_TYPE(&pydatetimeType) = &PyType_Type; From d14fea31a33488a1f62a45a8a87109d5be678a72 Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Thu, 15 Oct 2015 12:56:21 +0200 Subject: [PATCH 036/151] Use quote_ident from psycopg2.extensions --- lib/extras.py | 18 +++++++----------- tests/test_connection.py | 14 ++++++++++++++ 2 files changed, 21 insertions(+), 11 deletions(-) diff --git a/lib/extras.py b/lib/extras.py index e0fd8ef1..f411a4d0 100644 --- a/lib/extras.py +++ b/lib/extras.py @@ -40,7 +40,7 @@ from psycopg2 import extensions as _ext from psycopg2.extensions import cursor as _cursor from psycopg2.extensions import connection as _connection from psycopg2.extensions import replicationMessage as ReplicationMessage -from psycopg2.extensions import adapt as _A +from psycopg2.extensions import adapt as _A, quote_ident from psycopg2.extensions import b @@ -484,10 +484,6 @@ class ReplicationConnectionBase(_connection): if self.cursor_factory is None: self.cursor_factory = ReplicationCursor - def quote_ident(self, ident): - # FIXME: use PQescapeIdentifier or psycopg_escape_identifier_easy, somehow - return '"%s"' % ident.replace('"', '""') - class LogicalReplicationConnection(ReplicationConnectionBase): @@ -509,7 +505,7 @@ class ReplicationCursor(_cursor): def create_replication_slot(self, slot_name, slot_type=None, output_plugin=None): """Create streaming replication slot.""" - command = "CREATE_REPLICATION_SLOT %s " % self.connection.quote_ident(slot_name) + command = "CREATE_REPLICATION_SLOT %s " % quote_ident(slot_name, self) if slot_type is None: slot_type = self.connection.replication_type @@ -518,7 +514,7 @@ class ReplicationCursor(_cursor): if output_plugin is None: raise psycopg2.ProgrammingError("output plugin name is required to create logical replication slot") - command += "%s %s" % (slot_type, self.connection.quote_ident(output_plugin)) + command += "%s %s" % (slot_type, quote_ident(output_plugin, self)) elif slot_type == REPLICATION_PHYSICAL: if output_plugin is not None: @@ -534,7 +530,7 @@ class ReplicationCursor(_cursor): def drop_replication_slot(self, slot_name): """Drop streaming replication slot.""" - command = "DROP_REPLICATION_SLOT %s" % self.connection.quote_ident(slot_name) + command = "DROP_REPLICATION_SLOT %s" % quote_ident(slot_name, self) self.execute(command) def start_replication(self, slot_name=None, slot_type=None, start_lsn=0, @@ -548,7 +544,7 @@ class ReplicationCursor(_cursor): if slot_type == REPLICATION_LOGICAL: if slot_name: - command += "SLOT %s " % self.connection.quote_ident(slot_name) + command += "SLOT %s " % quote_ident(slot_name, self) else: raise psycopg2.ProgrammingError("slot name is required for logical replication") @@ -556,7 +552,7 @@ class ReplicationCursor(_cursor): elif slot_type == REPLICATION_PHYSICAL: if slot_name: - command += "SLOT %s " % self.connection.quote_ident(slot_name) + command += "SLOT %s " % quote_ident(slot_name, self) # don't add "PHYSICAL", before 9.4 it was just START_REPLICATION XXX/XXX else: @@ -584,7 +580,7 @@ class ReplicationCursor(_cursor): for k,v in options.iteritems(): if not command.endswith('('): command += ", " - command += "%s %s" % (self.connection.quote_ident(k), _A(str(v))) + command += "%s %s" % (quote_ident(k, self), _A(str(v))) command += ")" self.start_replication_expert(command) diff --git a/tests/test_connection.py b/tests/test_connection.py index e2b0da30..eeeaa845 100755 --- a/tests/test_connection.py +++ b/tests/test_connection.py @@ -1212,6 +1212,20 @@ class ReplicationTest(ConnectingTestCase): pass cur.consume_replication_stream(consume) # should return at once + @skip_before_postgres(9, 4) # slots require 9.4 + def test_create_replication_slot(self): + import psycopg2.extras + conn = self.repl_connect(connection_factory=psycopg2.extras.PhysicalReplicationConnection) + if conn is None: return + cur = conn.cursor() + + slot = "test_slot1" + try: + cur.create_replication_slot(slot) + self.assertRaises(psycopg2.ProgrammingError, cur.create_replication_slot, slot) + finally: + cur.drop_replication_slot(slot) + def test_suite(): return unittest.TestLoader().loadTestsFromName(__name__) From cf4f2411bfd2d5a1cb84393f135e48107428137b Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Thu, 15 Oct 2015 18:01:43 +0200 Subject: [PATCH 037/151] Fix async replication and test. --- lib/extras.py | 7 ++- tests/test_async.py | 16 ----- tests/test_connection.py | 49 --------------- tests/test_replication.py | 123 ++++++++++++++++++++++++++++++++++++++ tests/testutils.py | 21 ++++++- 5 files changed, 147 insertions(+), 69 deletions(-) create mode 100644 tests/test_replication.py diff --git a/lib/extras.py b/lib/extras.py index f411a4d0..dc2d5e65 100644 --- a/lib/extras.py +++ b/lib/extras.py @@ -449,7 +449,7 @@ class ReplicationConnectionBase(_connection): classes. Uses `ReplicationCursor` automatically. """ - def __init__(self, dsn, **kwargs): + def __init__(self, *args, **kwargs): """ Initializes a replication connection by adding appropriate parameters to the provided DSN and tweaking the connection @@ -466,7 +466,7 @@ class ReplicationConnectionBase(_connection): else: raise psycopg2.ProgrammingError("unrecognized replication type: %s" % self.replication_type) - items = _ext.parse_dsn(dsn) + items = _ext.parse_dsn(args[0]) # we add an appropriate replication keyword parameter, unless # user has specified one explicitly in the DSN @@ -475,7 +475,8 @@ class ReplicationConnectionBase(_connection): dsn = " ".join(["%s=%s" % (k, psycopg2._param_escape(str(v))) for (k, v) in items.iteritems()]) - super(ReplicationConnectionBase, self).__init__(dsn, **kwargs) + args = [dsn] + list(args[1:]) # async is the possible 2nd arg + super(ReplicationConnectionBase, self).__init__(*args, **kwargs) # prevent auto-issued BEGIN statements if not self.async: diff --git a/tests/test_async.py b/tests/test_async.py index d40b9c3e..e0bca7d5 100755 --- a/tests/test_async.py +++ b/tests/test_async.py @@ -29,7 +29,6 @@ import psycopg2 from psycopg2 import extensions import time -import select import StringIO from testutils import ConnectingTestCase @@ -66,21 +65,6 @@ class AsyncTests(ConnectingTestCase): )''') self.wait(curs) - def wait(self, cur_or_conn): - pollable = cur_or_conn - if not hasattr(pollable, 'poll'): - pollable = cur_or_conn.connection - while True: - state = pollable.poll() - if state == psycopg2.extensions.POLL_OK: - break - elif state == psycopg2.extensions.POLL_READ: - select.select([pollable], [], [], 10) - elif state == psycopg2.extensions.POLL_WRITE: - select.select([], [pollable], [], 10) - else: - raise Exception("Unexpected result from poll: %r", state) - def test_connection_setup(self): cur = self.conn.cursor() sync_cur = self.sync_conn.cursor() diff --git a/tests/test_connection.py b/tests/test_connection.py index eeeaa845..568f09ed 100755 --- a/tests/test_connection.py +++ b/tests/test_connection.py @@ -1178,55 +1178,6 @@ class AutocommitTests(ConnectingTestCase): self.assertEqual(cur.fetchone()[0], 'on') -class ReplicationTest(ConnectingTestCase): - @skip_before_postgres(9, 0) - def test_physical_replication_connection(self): - import psycopg2.extras - conn = self.repl_connect(connection_factory=psycopg2.extras.PhysicalReplicationConnection) - if conn is None: return - cur = conn.cursor() - cur.execute("IDENTIFY_SYSTEM") - cur.fetchall() - - @skip_before_postgres(9, 4) - def test_logical_replication_connection(self): - import psycopg2.extras - conn = self.repl_connect(connection_factory=psycopg2.extras.LogicalReplicationConnection) - if conn is None: return - cur = conn.cursor() - cur.execute("IDENTIFY_SYSTEM") - cur.fetchall() - - @skip_before_postgres(9, 0) - def test_stop_replication_raises(self): - import psycopg2.extras - conn = self.repl_connect(connection_factory=psycopg2.extras.PhysicalReplicationConnection) - if conn is None: return - cur = conn.cursor() - self.assertRaises(psycopg2.ProgrammingError, cur.stop_replication) - - cur.start_replication() - cur.stop_replication() # doesn't raise now - - def consume(msg): - pass - cur.consume_replication_stream(consume) # should return at once - - @skip_before_postgres(9, 4) # slots require 9.4 - def test_create_replication_slot(self): - import psycopg2.extras - conn = self.repl_connect(connection_factory=psycopg2.extras.PhysicalReplicationConnection) - if conn is None: return - cur = conn.cursor() - - slot = "test_slot1" - try: - cur.create_replication_slot(slot) - self.assertRaises(psycopg2.ProgrammingError, cur.create_replication_slot, slot) - finally: - cur.drop_replication_slot(slot) - - def test_suite(): return unittest.TestLoader().loadTestsFromName(__name__) diff --git a/tests/test_replication.py b/tests/test_replication.py new file mode 100644 index 00000000..231bcd08 --- /dev/null +++ b/tests/test_replication.py @@ -0,0 +1,123 @@ +#!/usr/bin/env python + +# test_replication.py - unit test for replication protocol +# +# Copyright (C) 2015 Daniele Varrazzo +# +# psycopg2 is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# In addition, as a special exception, the copyright holders give +# permission to link this program with the OpenSSL library (or with +# modified versions of OpenSSL that use the same license as OpenSSL), +# and distribute linked combinations including the two. +# +# You must obey the GNU Lesser General Public License in all respects for +# all of the code used other than OpenSSL. +# +# psycopg2 is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +# License for more details. + +import psycopg2 +import psycopg2.extensions +from psycopg2.extras import PhysicalReplicationConnection, LogicalReplicationConnection + +from testutils import unittest +from testutils import skip_before_postgres +from testutils import ConnectingTestCase + + +class ReplicationTestCase(ConnectingTestCase): + def setUp(self): + super(ReplicationTestCase, self).setUp() + self._slots = [] + + def tearDown(self): + # first close all connections, as they might keep the slot(s) active + super(ReplicationTestCase, self).tearDown() + + if self._slots: + kill_conn = self.repl_connect(connection_factory=PhysicalReplicationConnection) + if kill_conn: + kill_cur = kill_conn.cursor() + for slot in self._slots: + kill_cur.drop_replication_slot(slot) + kill_conn.close() + + def create_replication_slot(self, cur, slot_name, **kwargs): + cur.create_replication_slot(slot_name, **kwargs) + self._slots.append(slot_name) + + def drop_replication_slot(self, cur, slot_name): + cur.drop_replication_slot(slot_name) + self._slots.remove(slot_name) + + +class ReplicationTest(ReplicationTestCase): + @skip_before_postgres(9, 0) + def test_physical_replication_connection(self): + conn = self.repl_connect(connection_factory=PhysicalReplicationConnection) + if conn is None: return + cur = conn.cursor() + cur.execute("IDENTIFY_SYSTEM") + cur.fetchall() + + @skip_before_postgres(9, 4) + def test_logical_replication_connection(self): + conn = self.repl_connect(connection_factory=LogicalReplicationConnection) + if conn is None: return + cur = conn.cursor() + cur.execute("IDENTIFY_SYSTEM") + cur.fetchall() + + @skip_before_postgres(9, 0) + def test_stop_replication_raises(self): + conn = self.repl_connect(connection_factory=PhysicalReplicationConnection) + if conn is None: return + cur = conn.cursor() + self.assertRaises(psycopg2.ProgrammingError, cur.stop_replication) + + cur.start_replication() + cur.stop_replication() # doesn't raise now + + def consume(msg): + pass + cur.consume_replication_stream(consume) # should return at once + + @skip_before_postgres(9, 4) # slots require 9.4 + def test_create_replication_slot(self): + conn = self.repl_connect(connection_factory=PhysicalReplicationConnection) + if conn is None: return + cur = conn.cursor() + + slot = "test_slot1" + + self.create_replication_slot(cur, slot) + self.assertRaises(psycopg2.ProgrammingError, self.create_replication_slot, cur, slot) + + +class AsyncReplicationTest(ReplicationTestCase): + @skip_before_postgres(9, 4) + def test_async_replication(self): + conn = self.repl_connect(connection_factory=LogicalReplicationConnection, async=1) + if conn is None: return + self.wait(conn) + cur = conn.cursor() + + slot = "test_slot1" + self.create_replication_slot(cur, slot, output_plugin='test_decoding') + self.wait(cur) + + cur.start_replication(slot) + self.wait(cur) + + +def test_suite(): + return unittest.TestLoader().loadTestsFromName(__name__) + +if __name__ == "__main__": + unittest.main() diff --git a/tests/testutils.py b/tests/testutils.py index 76671d99..5f4493f2 100644 --- a/tests/testutils.py +++ b/tests/testutils.py @@ -27,6 +27,7 @@ import os import platform import sys +import select from functools import wraps from testconfig import dsn, repl_dsn @@ -129,7 +130,8 @@ class ConnectingTestCase(unittest.TestCase): except psycopg2.OperationalError, e: return self.skipTest("replication db not configured: %s" % e) - conn.autocommit = True + if not conn.async: + conn.autocommit = True return conn def _get_conn(self): @@ -143,6 +145,23 @@ class ConnectingTestCase(unittest.TestCase): conn = property(_get_conn, _set_conn) + # for use with async connections only + def wait(self, cur_or_conn): + import psycopg2.extensions + pollable = cur_or_conn + if not hasattr(pollable, 'poll'): + pollable = cur_or_conn.connection + while True: + state = pollable.poll() + if state == psycopg2.extensions.POLL_OK: + break + elif state == psycopg2.extensions.POLL_READ: + select.select([pollable], [], [], 10) + elif state == psycopg2.extensions.POLL_WRITE: + select.select([], [pollable], [], 10) + else: + raise Exception("Unexpected result from poll: %r", state) + def decorate_all_tests(cls, *decorators): """ From 0435320f34c56ced8c15899053920fc94fd4f3d7 Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Fri, 16 Oct 2015 16:36:03 +0200 Subject: [PATCH 038/151] Fix PSYCOPG2_TEST_REPL_DSN handling. --- tests/test_replication.py | 16 ++++++++++++++++ tests/testconfig.py | 6 +++++- 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/tests/test_replication.py b/tests/test_replication.py index 231bcd08..dfe11af0 100644 --- a/tests/test_replication.py +++ b/tests/test_replication.py @@ -33,6 +33,9 @@ from testutils import ConnectingTestCase class ReplicationTestCase(ConnectingTestCase): def setUp(self): + from testconfig import repl_dsn + if not repl_dsn: + self.skipTest("replication tests disabled by default") super(ReplicationTestCase, self).setUp() self._slots = [] @@ -99,6 +102,19 @@ class ReplicationTest(ReplicationTestCase): self.create_replication_slot(cur, slot) self.assertRaises(psycopg2.ProgrammingError, self.create_replication_slot, cur, slot) + @skip_before_postgres(9, 4) # slots require 9.4 + def test_start_on_missing_replication_slot(self): + conn = self.repl_connect(connection_factory=PhysicalReplicationConnection) + if conn is None: return + cur = conn.cursor() + + slot = "test_slot1" + + self.assertRaises(psycopg2.ProgrammingError, cur.start_replication, slot) + + self.create_replication_slot(cur, slot) + cur.start_replication(slot) + class AsyncReplicationTest(ReplicationTestCase): @skip_before_postgres(9, 4) diff --git a/tests/testconfig.py b/tests/testconfig.py index d59e5a0d..841eaf1c 100644 --- a/tests/testconfig.py +++ b/tests/testconfig.py @@ -33,4 +33,8 @@ if dbuser is not None: if dbpass is not None: dsn += ' password=%s' % dbpass -repl_dsn = os.environ.get('PSYCOPG2_TEST_REPL_DSN', dsn) +# Don't run replication tests if REPL_DSN is not set, default to normal DSN if +# set to empty string. +repl_dsn = os.environ.get('PSYCOPG2_TEST_REPL_DSN', None) +if repl_dsn == '': + repl_dsn = dsn From 4ab7cf0157ae311aa22c0cb38410a3d2ab9bea06 Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Mon, 19 Oct 2015 15:42:42 +0200 Subject: [PATCH 039/151] Replace stop_replication with requirement for an exception. --- doc/src/extras.rst | 18 ++++++---------- lib/extras.py | 12 +++++++++++ psycopg/cursor.h | 1 - psycopg/cursor_type.c | 19 ----------------- psycopg/pqpath.c | 7 +----- tests/test_replication.py | 45 +++++++++++++++++++++++++++------------ 6 files changed, 51 insertions(+), 51 deletions(-) diff --git a/doc/src/extras.rst b/doc/src/extras.rst index 4755cc72..ddf989d7 100644 --- a/doc/src/extras.rst +++ b/doc/src/extras.rst @@ -348,9 +348,11 @@ The individual messages in the replication stream are presented by `start_replication()` first. When called, this method enters an endless loop, reading messages from - the server and passing them to ``consume()``. In order to make this - method break out of the loop and return, ``consume()`` can call - `stop_replication()` on the cursor or it can throw an exception. + the server and passing them to ``consume()``, then waiting for more + messages from the server. In order to make this method break out of + the loop and return, ``consume()`` can throw a `StopReplication` + exception (any unhandled exception will make it break out of the loop + as well). If *decode* is set to `!True`, the messages read from the server are converted according to the connection `~connection.encoding`. This @@ -398,13 +400,6 @@ The individual messages in the replication stream are presented by load on network and the server. A possible strategy is to confirm after every COMMIT message. - .. method:: stop_replication() - - This method can be called on synchronous connection from the - ``consume()`` callable in order to break out of the endless loop in - `consume_replication_stream()`. If called on asynchronous connection - or when replication is not in progress, this method raises an error. - .. method:: send_replication_feedback(write_lsn=0, flush_lsn=0, apply_lsn=0, reply=False) :param write_lsn: a LSN position up to which the client has written the data locally @@ -506,10 +501,11 @@ The individual messages in the replication stream are presented by if not sel[0]: cur.send_replication_feedback() - .. index:: pair: Cursor; Replication +.. autoclass:: StopReplication + .. index:: single: Data types; Additional diff --git a/lib/extras.py b/lib/extras.py index dc2d5e65..8854ec2b 100644 --- a/lib/extras.py +++ b/lib/extras.py @@ -500,6 +500,18 @@ class PhysicalReplicationConnection(ReplicationConnectionBase): super(PhysicalReplicationConnection, self).__init__(*args, **kwargs) +class StopReplication(Exception): + """ + Exception used to break out of the endless loop in + `~ReplicationCursor.consume_replication_stream()`. + + Subclass of `~exceptions.Exception`. Intentionally *not* inherited from + `~psycopg2.Error` as occurrence of this exception does not indicate an + error. + """ + pass + + class ReplicationCursor(_cursor): """A cursor used for communication on the replication protocol.""" diff --git a/psycopg/cursor.h b/psycopg/cursor.h index 3f125998..669e176d 100644 --- a/psycopg/cursor.h +++ b/psycopg/cursor.h @@ -75,7 +75,6 @@ struct cursorObject { /* replication cursor attrs */ int repl_started:1; /* if replication is started */ - int repl_stop:1; /* if client requested to stop replication */ int repl_consuming:1; /* if running the consume loop */ struct timeval repl_keepalive_interval; /* interval for keepalive messages in replication mode */ XLogRecPtr repl_write_lsn; /* LSN stats for replication feedback messages */ diff --git a/psycopg/cursor_type.c b/psycopg/cursor_type.c index f4598873..d51f7a55 100644 --- a/psycopg/cursor_type.c +++ b/psycopg/cursor_type.c @@ -1605,7 +1605,6 @@ psyco_curs_start_replication_expert(cursorObject *self, PyObject *args, PyObject Dprintf("psyco_curs_start_replication_expert: %s", command); self->copysize = 0; - self->repl_stop = 0; self->repl_consuming = 0; self->repl_write_lsn = InvalidXLogRecPtr; @@ -1626,21 +1625,6 @@ psyco_curs_start_replication_expert(cursorObject *self, PyObject *args, PyObject return res; } -#define psyco_curs_stop_replication_doc \ -"stop_replication() -- Set flag to break out of the endless loop in consume_replication_stream()." - -static PyObject * -psyco_curs_stop_replication(cursorObject *self) -{ - EXC_IF_CURS_CLOSED(self); - EXC_IF_CURS_ASYNC(self, stop_replication); - EXC_IF_NOT_REPLICATING(self, stop_replication); - - self->repl_stop = 1; - - Py_RETURN_NONE; -} - #define psyco_curs_consume_replication_stream_doc \ "consume_replication_stream(consumer, keepalive_interval=10) -- Consume replication stream." @@ -1684,7 +1668,6 @@ psyco_curs_consume_replication_stream(cursorObject *self, PyObject *args, PyObje } self->repl_consuming = 0; - self->repl_stop = 0; /* who knows, what if we will be called again? */ return res; } @@ -1992,8 +1975,6 @@ static struct PyMethodDef cursorObject_methods[] = { METH_VARARGS|METH_KEYWORDS, psyco_curs_copy_expert_doc}, {"start_replication_expert", (PyCFunction)psyco_curs_start_replication_expert, METH_VARARGS|METH_KEYWORDS, psyco_curs_start_replication_expert_doc}, - {"stop_replication", (PyCFunction)psyco_curs_stop_replication, - METH_NOARGS, psyco_curs_stop_replication_doc}, {"consume_replication_stream", (PyCFunction)psyco_curs_consume_replication_stream, METH_VARARGS|METH_KEYWORDS, psyco_curs_consume_replication_stream_doc}, {"read_replication_message", (PyCFunction)psyco_curs_read_replication_message, diff --git a/psycopg/pqpath.c b/psycopg/pqpath.c index 111eb875..f38fbd39 100644 --- a/psycopg/pqpath.c +++ b/psycopg/pqpath.c @@ -1743,7 +1743,7 @@ pq_copy_both(cursorObject *curs, PyObject *consume, int decode, double keepalive keep_intr.tv_sec = (int)keepalive_interval; keep_intr.tv_usec = (keepalive_interval - keep_intr.tv_sec)*1.0e6; - while (!curs->repl_stop) { + while (1) { msg = pq_read_replication_message(curs, decode); if (!msg) { goto exit; @@ -1803,11 +1803,6 @@ pq_copy_both(cursorObject *curs, PyObject *consume, int decode, double keepalive goto exit; } Py_DECREF(tmp); - - if (curs->repl_stop) { - Dprintf("pq_copy_both: repl_stop flag set by consume_func"); - break; - } } } diff --git a/tests/test_replication.py b/tests/test_replication.py index dfe11af0..cd1321ae 100644 --- a/tests/test_replication.py +++ b/tests/test_replication.py @@ -25,6 +25,7 @@ import psycopg2 import psycopg2.extensions from psycopg2.extras import PhysicalReplicationConnection, LogicalReplicationConnection +from psycopg2.extras import StopReplication from testutils import unittest from testutils import skip_before_postgres @@ -77,20 +78,6 @@ class ReplicationTest(ReplicationTestCase): cur.execute("IDENTIFY_SYSTEM") cur.fetchall() - @skip_before_postgres(9, 0) - def test_stop_replication_raises(self): - conn = self.repl_connect(connection_factory=PhysicalReplicationConnection) - if conn is None: return - cur = conn.cursor() - self.assertRaises(psycopg2.ProgrammingError, cur.stop_replication) - - cur.start_replication() - cur.stop_replication() # doesn't raise now - - def consume(msg): - pass - cur.consume_replication_stream(consume) # should return at once - @skip_before_postgres(9, 4) # slots require 9.4 def test_create_replication_slot(self): conn = self.repl_connect(connection_factory=PhysicalReplicationConnection) @@ -115,6 +102,36 @@ class ReplicationTest(ReplicationTestCase): self.create_replication_slot(cur, slot) cur.start_replication(slot) + @skip_before_postgres(9, 4) # slots require 9.4 + def test_stop_replication(self): + conn = self.repl_connect(connection_factory=LogicalReplicationConnection) + if conn is None: return + cur = conn.cursor() + + slot = "test_slot1" + + self.create_replication_slot(cur, slot, output_plugin='test_decoding') + + self.make_replication_event() + + cur.start_replication(slot) + def consume(msg): + raise StopReplication() + self.assertRaises(StopReplication, cur.consume_replication_stream, consume) + + # generate an event for our replication stream + def make_replication_event(self): + conn = self.connect() + if conn is None: return + cur = conn.cursor() + + try: + cur.execute("DROP TABLE dummy1") + except psycopg2.ProgrammingError: + conn.rollback() + cur.execute("CREATE TABLE dummy1()") + conn.commit() + class AsyncReplicationTest(ReplicationTestCase): @skip_before_postgres(9, 4) From 7aea2cef6e42c961fadac61f19b570bdf8c61401 Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Mon, 19 Oct 2015 17:02:18 +0200 Subject: [PATCH 040/151] Improve async replication test. --- tests/test_replication.py | 83 ++++++++++++++++++++++----------------- tests/testconfig.py | 2 + 2 files changed, 50 insertions(+), 35 deletions(-) diff --git a/tests/test_replication.py b/tests/test_replication.py index cd1321ae..5c029c88 100644 --- a/tests/test_replication.py +++ b/tests/test_replication.py @@ -27,6 +27,7 @@ import psycopg2.extensions from psycopg2.extras import PhysicalReplicationConnection, LogicalReplicationConnection from psycopg2.extras import StopReplication +import testconfig from testutils import unittest from testutils import skip_before_postgres from testutils import ConnectingTestCase @@ -34,10 +35,12 @@ from testutils import ConnectingTestCase class ReplicationTestCase(ConnectingTestCase): def setUp(self): - from testconfig import repl_dsn - if not repl_dsn: + if not testconfig.repl_dsn: self.skipTest("replication tests disabled by default") + super(ReplicationTestCase, self).setUp() + + self.slot = testconfig.repl_slot self._slots = [] def tearDown(self): @@ -52,14 +55,27 @@ class ReplicationTestCase(ConnectingTestCase): kill_cur.drop_replication_slot(slot) kill_conn.close() - def create_replication_slot(self, cur, slot_name, **kwargs): + def create_replication_slot(self, cur, slot_name=testconfig.repl_slot, **kwargs): cur.create_replication_slot(slot_name, **kwargs) self._slots.append(slot_name) - def drop_replication_slot(self, cur, slot_name): + def drop_replication_slot(self, cur, slot_name=testconfig.repl_slot): cur.drop_replication_slot(slot_name) self._slots.remove(slot_name) + # generate some events for our replication stream + def make_replication_events(self): + conn = self.connect() + if conn is None: return + cur = conn.cursor() + + try: + cur.execute("DROP TABLE dummy1") + except psycopg2.ProgrammingError: + conn.rollback() + cur.execute("CREATE TABLE dummy1 AS SELECT * FROM generate_series(1, 5) AS id") + conn.commit() + class ReplicationTest(ReplicationTestCase): @skip_before_postgres(9, 0) @@ -84,10 +100,8 @@ class ReplicationTest(ReplicationTestCase): if conn is None: return cur = conn.cursor() - slot = "test_slot1" - - self.create_replication_slot(cur, slot) - self.assertRaises(psycopg2.ProgrammingError, self.create_replication_slot, cur, slot) + self.create_replication_slot(cur) + self.assertRaises(psycopg2.ProgrammingError, self.create_replication_slot, cur) @skip_before_postgres(9, 4) # slots require 9.4 def test_start_on_missing_replication_slot(self): @@ -95,12 +109,10 @@ class ReplicationTest(ReplicationTestCase): if conn is None: return cur = conn.cursor() - slot = "test_slot1" + self.assertRaises(psycopg2.ProgrammingError, cur.start_replication, self.slot) - self.assertRaises(psycopg2.ProgrammingError, cur.start_replication, slot) - - self.create_replication_slot(cur, slot) - cur.start_replication(slot) + self.create_replication_slot(cur) + cur.start_replication(self.slot) @skip_before_postgres(9, 4) # slots require 9.4 def test_stop_replication(self): @@ -108,46 +120,47 @@ class ReplicationTest(ReplicationTestCase): if conn is None: return cur = conn.cursor() - slot = "test_slot1" + self.create_replication_slot(cur, output_plugin='test_decoding') - self.create_replication_slot(cur, slot, output_plugin='test_decoding') + self.make_replication_events() - self.make_replication_event() - - cur.start_replication(slot) + cur.start_replication(self.slot) def consume(msg): raise StopReplication() self.assertRaises(StopReplication, cur.consume_replication_stream, consume) - # generate an event for our replication stream - def make_replication_event(self): - conn = self.connect() - if conn is None: return - cur = conn.cursor() - - try: - cur.execute("DROP TABLE dummy1") - except psycopg2.ProgrammingError: - conn.rollback() - cur.execute("CREATE TABLE dummy1()") - conn.commit() - class AsyncReplicationTest(ReplicationTestCase): - @skip_before_postgres(9, 4) + @skip_before_postgres(9, 4) # slots require 9.4 def test_async_replication(self): conn = self.repl_connect(connection_factory=LogicalReplicationConnection, async=1) if conn is None: return self.wait(conn) cur = conn.cursor() - slot = "test_slot1" - self.create_replication_slot(cur, slot, output_plugin='test_decoding') + self.create_replication_slot(cur, output_plugin='test_decoding') self.wait(cur) - cur.start_replication(slot) + cur.start_replication(self.slot) self.wait(cur) + self.make_replication_events() + + self.msg_count = 0 + def consume(msg): + self.msg_count += 1 + if self.msg_count > 3: + raise StopReplication() + + def process_stream(): + from select import select + while True: + msg = cur.read_replication_message() + if msg: + consume(msg) + else: + select([cur], [], []) + self.assertRaises(StopReplication, process_stream) def test_suite(): return unittest.TestLoader().loadTestsFromName(__name__) diff --git a/tests/testconfig.py b/tests/testconfig.py index 841eaf1c..82b48a39 100644 --- a/tests/testconfig.py +++ b/tests/testconfig.py @@ -38,3 +38,5 @@ if dbpass is not None: repl_dsn = os.environ.get('PSYCOPG2_TEST_REPL_DSN', None) if repl_dsn == '': repl_dsn = dsn + +repl_slot = os.environ.get('PSYCOPG2_TEST_REPL_SLOT', 'psycopg2_test_slot') From 0bb81fc84811134bca70b59daa4661bd0697f2ff Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Mon, 19 Oct 2015 20:00:39 +0200 Subject: [PATCH 041/151] Properly subclass ReplicationCursor on C level. --- doc/src/extras.rst | 50 ++-- lib/extensions.py | 2 +- lib/extras.py | 10 +- psycopg/cursor.h | 27 +-- psycopg/cursor_type.c | 235 +------------------ psycopg/pqpath.c | 97 ++++---- psycopg/pqpath.h | 8 +- psycopg/psycopgmodule.c | 9 +- psycopg/replication_cursor.h | 77 ++++++ psycopg/replication_cursor_type.c | 360 +++++++++++++++++++++++++++++ psycopg/replication_message_type.c | 2 +- psycopg2.cproj | 2 + setup.py | 4 +- tests/test_replication.py | 20 +- 14 files changed, 554 insertions(+), 349 deletions(-) create mode 100644 psycopg/replication_cursor.h create mode 100644 psycopg/replication_cursor_type.c diff --git a/doc/src/extras.rst b/doc/src/extras.rst index ddf989d7..9384a961 100644 --- a/doc/src/extras.rst +++ b/doc/src/extras.rst @@ -324,16 +324,15 @@ The individual messages in the replication stream are presented by `start_replication_expert()` internally. After starting the replication, to actually consume the incoming - server messages, use `consume_replication_stream()` or implement a - loop around `read_replication_message()` in case of asynchronous - connection. + server messages, use `consume_stream()` or implement a loop around + `read_message()` in case of asynchronous connection. .. method:: start_replication_expert(command) Start replication on the connection using provided ``START_REPLICATION`` command. - .. method:: consume_replication_stream(consume, decode=False, keepalive_interval=10) + .. method:: consume_stream(consume, decode=False, keepalive_interval=10) :param consume: a callable object with signature ``consume(msg)`` :param decode: a flag indicating that unicode conversion should be @@ -342,7 +341,7 @@ The individual messages in the replication stream are presented by messages to the server This method can only be used with synchronous connection. For - asynchronous connections see `read_replication_message()`. + asynchronous connections see `read_message()`. Before calling this method to consume the stream, use `start_replication()` first. @@ -372,18 +371,18 @@ The individual messages in the replication stream are presented by self.store_message_data(msg.payload) if self.should_report_to_the_server_now(msg): - msg.cursor.send_replication_feedback(flush_lsn=msg.data_start) + msg.cursor.send_feedback(flush_lsn=msg.data_start) consumer = LogicalStreamConsumer() - cur.consume_replication_stream(consumer, decode=True) + cur.consume_stream(consumer, decode=True) The *msg* objects passed to ``consume()`` are instances of `ReplicationMessage` class. After storing certain amount of messages' data reliably, the client should send a confirmation message to the server. This should be done - by calling `send_replication_feedback()` method on the corresponding - replication cursor. A reference to the cursor is provided in the + by calling `send_feedback()` method on the corresponding replication + cursor. A reference to the cursor is provided in the `ReplicationMessage` as an attribute. .. warning:: @@ -400,7 +399,7 @@ The individual messages in the replication stream are presented by load on network and the server. A possible strategy is to confirm after every COMMIT message. - .. method:: send_replication_feedback(write_lsn=0, flush_lsn=0, apply_lsn=0, reply=False) + .. method:: send_feedback(write_lsn=0, flush_lsn=0, apply_lsn=0, reply=False) :param write_lsn: a LSN position up to which the client has written the data locally :param flush_lsn: a LSN position up to which the client has stored the @@ -419,16 +418,15 @@ The individual messages in the replication stream are presented by just send a keepalive message to the server. If the feedback message could not be sent, updates the passed LSN - positions in the cursor for a later call to - `flush_replication_feedback()` and returns `!False`, otherwise returns - `!True`. + positions in the cursor for a later call to `flush_feedback()` and + returns `!False`, otherwise returns `!True`. - .. method:: flush_replication_feedback(reply=False) + .. method:: flush_feedback(reply=False) :param reply: request the server to send back a keepalive message immediately This method tries to flush the latest replication feedback message - that `send_replication_feedback()` was trying to send but couldn't. + that `send_feedback()` was trying to send but couldn't. If *reply* is `!True` sends a keepalive message in either case. @@ -437,14 +435,13 @@ The individual messages in the replication stream are presented by Low-level methods for asynchronous connection operation. - With the synchronous connection, a call to `consume_replication_stream()` - handles all the complexity of handling the incoming messages and sending - keepalive replies, but at times it might be beneficial to use low-level - interface for better control, in particular to `~select.select()` on - multiple sockets. The following methods are provided for asynchronous - operation: + With the synchronous connection, a call to `consume_stream()` handles all + the complexity of handling the incoming messages and sending keepalive + replies, but at times it might be beneficial to use low-level interface + for better control, in particular to `~select.select()` on multiple + sockets. The following methods are provided for asynchronous operation: - .. method:: read_replication_message(decode=True) + .. method:: read_message(decode=True) :param decode: a flag indicating that unicode conversion should be performed on the data received from the server @@ -475,7 +472,7 @@ The individual messages in the replication stream are presented by This is a convenience method which allows replication cursor to be used directly in `~select.select()` or `~select.poll()` calls. - .. attribute:: replication_io_timestamp + .. attribute:: io_timestamp A `~datetime` object representing the timestamp at the moment of last communication with the server (a data or keepalive message in either @@ -488,18 +485,19 @@ The individual messages in the replication stream are presented by keepalive_interval = 10.0 while True: - msg = cur.read_replication_message() + msg = cur.read_message() if msg: consume(msg) else: - timeout = keepalive_interval - (datetime.now() - cur.replication_io_timestamp).total_seconds() + now = datetime.now() + timeout = keepalive_interval - (now - cur.io_timestamp).total_seconds() if timeout > 0: sel = select.select([cur], [], [], timeout) else: sel = ([], [], []) if not sel[0]: - cur.send_replication_feedback() + cur.send_feedback() .. index:: pair: Cursor; Replication diff --git a/lib/extensions.py b/lib/extensions.py index 513b7fc7..af27bca6 100644 --- a/lib/extensions.py +++ b/lib/extensions.py @@ -61,7 +61,7 @@ from psycopg2._psycopg import string_types, binary_types, new_type, new_array_ty from psycopg2._psycopg import ISQLQuote, Notify, Diagnostics, Column from psycopg2._psycopg import QueryCanceledError, TransactionRollbackError -from psycopg2._psycopg import replicationMessage +from psycopg2._psycopg import ReplicationCursor, ReplicationMessage try: from psycopg2._psycopg import set_wait_callback, get_wait_callback diff --git a/lib/extras.py b/lib/extras.py index 8854ec2b..7c713573 100644 --- a/lib/extras.py +++ b/lib/extras.py @@ -39,7 +39,8 @@ import psycopg2 from psycopg2 import extensions as _ext from psycopg2.extensions import cursor as _cursor from psycopg2.extensions import connection as _connection -from psycopg2.extensions import replicationMessage as ReplicationMessage +from psycopg2.extensions import ReplicationCursor as _replicationCursor +from psycopg2.extensions import ReplicationMessage from psycopg2.extensions import adapt as _A, quote_ident from psycopg2.extensions import b @@ -503,7 +504,7 @@ class PhysicalReplicationConnection(ReplicationConnectionBase): class StopReplication(Exception): """ Exception used to break out of the endless loop in - `~ReplicationCursor.consume_replication_stream()`. + `~ReplicationCursor.consume_stream()`. Subclass of `~exceptions.Exception`. Intentionally *not* inherited from `~psycopg2.Error` as occurrence of this exception does not indicate an @@ -512,7 +513,7 @@ class StopReplication(Exception): pass -class ReplicationCursor(_cursor): +class ReplicationCursor(_replicationCursor): """A cursor used for communication on the replication protocol.""" def create_replication_slot(self, slot_name, slot_type=None, output_plugin=None): @@ -598,9 +599,6 @@ class ReplicationCursor(_cursor): self.start_replication_expert(command) - def send_feedback_message(self, written_lsn=0, sync_lsn=0, apply_lsn=0, reply_requested=False): - return self.send_replication_feedback(written_lsn, sync_lsn, apply_lsn, reply_requested) - # allows replication cursors to be used in select.select() directly def fileno(self): return self.connection.fileno() diff --git a/psycopg/cursor.h b/psycopg/cursor.h index 669e176d..18e31e5f 100644 --- a/psycopg/cursor.h +++ b/psycopg/cursor.h @@ -27,7 +27,6 @@ #define PSYCOPG_CURSOR_H 1 #include "psycopg/connection.h" -#include "libpq_support.h" #ifdef __cplusplus extern "C" { @@ -74,14 +73,6 @@ struct cursorObject { #define DEFAULT_COPYBUFF 8192 /* replication cursor attrs */ - int repl_started:1; /* if replication is started */ - int repl_consuming:1; /* if running the consume loop */ - struct timeval repl_keepalive_interval; /* interval for keepalive messages in replication mode */ - XLogRecPtr repl_write_lsn; /* LSN stats for replication feedback messages */ - XLogRecPtr repl_flush_lsn; - XLogRecPtr repl_apply_lsn; - int repl_feedback_pending; /* flag set when we couldn't send the feedback to the server */ - struct timeval repl_last_io; /* timestamp of the last exchange with the server */ PyObject *tuple_factory; /* factory for result tuples */ PyObject *tzinfo_factory; /* factory for tzinfo objects */ @@ -106,7 +97,7 @@ HIDDEN void curs_reset(cursorObject *self); HIDDEN int psyco_curs_withhold_set(cursorObject *self, PyObject *pyvalue); HIDDEN int psyco_curs_scrollable_set(cursorObject *self, PyObject *pyvalue); -RAISES_NEG int psyco_curs_datetime_init(void); +HIDDEN int psyco_curs_init(PyObject *obj, PyObject *args, PyObject *kwargs); /* exception-raising macros */ #define EXC_IF_CURS_CLOSED(self) \ @@ -149,22 +140,6 @@ do \ return NULL; } \ while (0) -#define EXC_IF_REPLICATING(self, cmd) \ -do \ - if ((self)->repl_started) { \ - PyErr_SetString(ProgrammingError, \ - #cmd " cannot be used when replication is already in progress"); \ - return NULL; } \ -while (0) - -#define EXC_IF_NOT_REPLICATING(self, cmd) \ -do \ - if (!(self)->repl_started) { \ - PyErr_SetString(ProgrammingError, \ - #cmd " cannot be used when replication is not in progress"); \ - return NULL; } \ -while (0) - #ifdef __cplusplus } #endif diff --git a/psycopg/cursor_type.c b/psycopg/cursor_type.c index d51f7a55..63bd5a10 100644 --- a/psycopg/cursor_type.c +++ b/psycopg/cursor_type.c @@ -28,7 +28,6 @@ #include "psycopg/cursor.h" #include "psycopg/connection.h" -#include "psycopg/replication_message.h" #include "psycopg/green.h" #include "psycopg/pqpath.h" #include "psycopg/typecast.h" @@ -39,9 +38,6 @@ #include -/* python */ -#include "datetime.h" - /** DBAPI methods **/ @@ -1583,222 +1579,6 @@ exit: return res; } -#define psyco_curs_start_replication_expert_doc \ -"start_replication_expert(command, writer=None, keepalive_interval=10) -- Start and consume replication stream with direct command." - -static PyObject * -psyco_curs_start_replication_expert(cursorObject *self, PyObject *args, PyObject *kwargs) -{ - PyObject *res = NULL; - char *command; - static char *kwlist[] = {"command", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s", kwlist, &command)) { - return NULL; - } - - EXC_IF_CURS_CLOSED(self); - EXC_IF_GREEN(start_replication_expert); - EXC_IF_TPC_PREPARED(self->conn, start_replication_expert); - EXC_IF_REPLICATING(self, start_replication_expert); - - Dprintf("psyco_curs_start_replication_expert: %s", command); - - self->copysize = 0; - self->repl_consuming = 0; - - self->repl_write_lsn = InvalidXLogRecPtr; - self->repl_flush_lsn = InvalidXLogRecPtr; - self->repl_apply_lsn = InvalidXLogRecPtr; - self->repl_feedback_pending = 0; - - gettimeofday(&self->repl_last_io, NULL); - - if (pq_execute(self, command, self->conn->async, - 1 /* no_result */, 1 /* no_begin */) >= 0) { - res = Py_None; - Py_INCREF(res); - - self->repl_started = 1; - } - - return res; -} - -#define psyco_curs_consume_replication_stream_doc \ -"consume_replication_stream(consumer, keepalive_interval=10) -- Consume replication stream." - -static PyObject * -psyco_curs_consume_replication_stream(cursorObject *self, PyObject *args, PyObject *kwargs) -{ - PyObject *consume = NULL, *res = NULL; - int decode = 0; - double keepalive_interval = 10; - static char *kwlist[] = {"consume", "decode", "keepalive_interval", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|id", kwlist, - &consume, &decode, &keepalive_interval)) { - return NULL; - } - - EXC_IF_CURS_CLOSED(self); - EXC_IF_CURS_ASYNC(self, consume_replication_stream); - EXC_IF_GREEN(consume_replication_stream); - EXC_IF_TPC_PREPARED(self->conn, consume_replication_stream); - EXC_IF_NOT_REPLICATING(self, consume_replication_stream); - - if (self->repl_consuming) { - PyErr_SetString(ProgrammingError, - "consume_replication_stream cannot be used when already in the consume loop"); - return NULL; - } - - Dprintf("psyco_curs_consume_replication_stream"); - - if (keepalive_interval < 1.0) { - psyco_set_error(ProgrammingError, self, "keepalive_interval must be >= 1 (sec)"); - return NULL; - } - - self->repl_consuming = 1; - - if (pq_copy_both(self, consume, decode, keepalive_interval) >= 0) { - res = Py_None; - Py_INCREF(res); - } - - self->repl_consuming = 0; - - return res; -} - -#define psyco_curs_read_replication_message_doc \ -"read_replication_message(decode=True) -- Try reading a replication message from the server (non-blocking)." - -static PyObject * -psyco_curs_read_replication_message(cursorObject *self, PyObject *args, PyObject *kwargs) -{ - int decode = 1; - static char *kwlist[] = {"decode", NULL}; - - EXC_IF_CURS_CLOSED(self); - EXC_IF_GREEN(read_replication_message); - EXC_IF_TPC_PREPARED(self->conn, read_replication_message); - EXC_IF_NOT_REPLICATING(self, read_replication_message); - - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", kwlist, - &decode)) { - return NULL; - } - - return pq_read_replication_message(self, decode); -} - -static PyObject * -curs_flush_replication_feedback(cursorObject *self, int reply) -{ - if (!(self->repl_feedback_pending || reply)) - Py_RETURN_TRUE; - - if (pq_send_replication_feedback(self, reply)) { - self->repl_feedback_pending = 0; - Py_RETURN_TRUE; - } else { - self->repl_feedback_pending = 1; - Py_RETURN_FALSE; - } -} - -#define psyco_curs_send_replication_feedback_doc \ -"send_replication_feedback(write_lsn=0, flush_lsn=0, apply_lsn=0, reply=False) -- Try sending a replication feedback message to the server and optionally request a reply." - -static PyObject * -psyco_curs_send_replication_feedback(cursorObject *self, PyObject *args, PyObject *kwargs) -{ - XLogRecPtr write_lsn = InvalidXLogRecPtr, - flush_lsn = InvalidXLogRecPtr, - apply_lsn = InvalidXLogRecPtr; - int reply = 0; - static char* kwlist[] = {"write_lsn", "flush_lsn", "apply_lsn", "reply", NULL}; - - EXC_IF_CURS_CLOSED(self); - EXC_IF_NOT_REPLICATING(self, send_replication_feedback); - - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|KKKi", kwlist, - &write_lsn, &flush_lsn, &apply_lsn, &reply)) { - return NULL; - } - - if (write_lsn > self->repl_write_lsn) - self->repl_write_lsn = write_lsn; - - if (flush_lsn > self->repl_flush_lsn) - self->repl_flush_lsn = flush_lsn; - - if (apply_lsn > self->repl_apply_lsn) - self->repl_apply_lsn = apply_lsn; - - self->repl_feedback_pending = 1; - - return curs_flush_replication_feedback(self, reply); -} - -#define psyco_curs_flush_replication_feedback_doc \ -"flush_replication_feedback(reply=False) -- Try flushing the latest pending replication feedback message to the server and optionally request a reply." - -static PyObject * -psyco_curs_flush_replication_feedback(cursorObject *self, PyObject *args, PyObject *kwargs) -{ - int reply = 0; - static char *kwlist[] = {"reply", NULL}; - - EXC_IF_CURS_CLOSED(self); - EXC_IF_NOT_REPLICATING(self, flush_replication_feedback); - - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", kwlist, - &reply)) { - return NULL; - } - - return curs_flush_replication_feedback(self, reply); -} - - -RAISES_NEG int -psyco_curs_datetime_init(void) -{ - Dprintf("psyco_curs_datetime_init: datetime init"); - - PyDateTime_IMPORT; - - if (!PyDateTimeAPI) { - PyErr_SetString(PyExc_ImportError, "datetime initialization failed"); - return -1; - } - return 0; -} - -#define psyco_curs_replication_io_timestamp_doc \ -"replication_io_timestamp -- the timestamp of latest IO with the server" - -static PyObject * -psyco_curs_get_replication_io_timestamp(cursorObject *self) -{ - PyObject *tval, *res = NULL; - double seconds; - - EXC_IF_CURS_CLOSED(self); - - seconds = self->repl_last_io.tv_sec + self->repl_last_io.tv_usec / 1.0e6; - - tval = Py_BuildValue("(d)", seconds); - if (tval) { - res = PyDateTime_FromTimestamp(tval); - Py_DECREF(tval); - } - return res; -} - /* extension: closed - return true if cursor is closed */ #define psyco_curs_closed_doc \ @@ -1973,16 +1753,6 @@ static struct PyMethodDef cursorObject_methods[] = { METH_VARARGS|METH_KEYWORDS, psyco_curs_copy_to_doc}, {"copy_expert", (PyCFunction)psyco_curs_copy_expert, METH_VARARGS|METH_KEYWORDS, psyco_curs_copy_expert_doc}, - {"start_replication_expert", (PyCFunction)psyco_curs_start_replication_expert, - METH_VARARGS|METH_KEYWORDS, psyco_curs_start_replication_expert_doc}, - {"consume_replication_stream", (PyCFunction)psyco_curs_consume_replication_stream, - METH_VARARGS|METH_KEYWORDS, psyco_curs_consume_replication_stream_doc}, - {"read_replication_message", (PyCFunction)psyco_curs_read_replication_message, - METH_VARARGS|METH_KEYWORDS, psyco_curs_read_replication_message_doc}, - {"send_replication_feedback", (PyCFunction)psyco_curs_send_replication_feedback, - METH_VARARGS|METH_KEYWORDS, psyco_curs_send_replication_feedback_doc}, - {"flush_replication_feedback", (PyCFunction)psyco_curs_flush_replication_feedback, - METH_VARARGS|METH_KEYWORDS, psyco_curs_flush_replication_feedback_doc}, {NULL} }; @@ -2033,9 +1803,6 @@ static struct PyGetSetDef cursorObject_getsets[] = { (getter)psyco_curs_scrollable_get, (setter)psyco_curs_scrollable_set, psyco_curs_scrollable_doc, NULL }, - { "replication_io_timestamp", - (getter)psyco_curs_get_replication_io_timestamp, NULL, - psyco_curs_replication_io_timestamp_doc, NULL }, {NULL} }; @@ -2134,7 +1901,7 @@ cursor_dealloc(PyObject* obj) Py_TYPE(obj)->tp_free(obj); } -static int +int cursor_init(PyObject *obj, PyObject *args, PyObject *kwargs) { PyObject *conn; diff --git a/psycopg/pqpath.c b/psycopg/pqpath.c index f38fbd39..d6886981 100644 --- a/psycopg/pqpath.c +++ b/psycopg/pqpath.c @@ -35,6 +35,7 @@ #include "psycopg/pqpath.h" #include "psycopg/connection.h" #include "psycopg/cursor.h" +#include "psycopg/replication_cursor.h" #include "psycopg/replication_message.h" #include "psycopg/green.h" #include "psycopg/typecast.h" @@ -1542,19 +1543,23 @@ exit: are never returned to the caller. */ PyObject * -pq_read_replication_message(cursorObject *curs, int decode) +pq_read_replication_message(replicationCursorObject *repl, int decode) { + cursorObject *curs = &repl->cur; + connectionObject *conn = curs->conn; + PGconn *pgconn = conn->pgconn; char *buffer = NULL; int len, data_size, consumed, hdr, reply; XLogRecPtr data_start, wal_end; pg_int64 send_time; - PyObject *str = NULL, *msg = NULL; + PyObject *str = NULL, *result = NULL; + replicationMessageObject *msg = NULL; Dprintf("pq_read_replication_message(decode=%d)", decode); consumed = 0; retry: - len = PQgetCopyData(curs->conn->pgconn, &buffer, 1 /* async */); + len = PQgetCopyData(pgconn, &buffer, 1 /* async */); if (len == 0) { /* If we've tried reading some data, but there was none, bail out. */ @@ -1566,8 +1571,8 @@ retry: server we might be reading a number of messages for every single one we process, thus overgrowing the internal buffer until the client system runs out of memory. */ - if (!PQconsumeInput(curs->conn->pgconn)) { - pq_raise(curs->conn, curs, NULL); + if (!PQconsumeInput(pgconn)) { + pq_raise(conn, curs, NULL); goto exit; } /* But PQconsumeInput() doesn't tell us if it has actually read @@ -1581,15 +1586,15 @@ retry: if (len == -2) { /* serious error */ - pq_raise(curs->conn, curs, NULL); + pq_raise(conn, curs, NULL); goto exit; } if (len == -1) { /* EOF */ - curs->pgres = PQgetResult(curs->conn->pgconn); + curs->pgres = PQgetResult(pgconn); if (curs->pgres && PQresultStatus(curs->pgres) == PGRES_FATAL_ERROR) { - pq_raise(curs->conn, curs, NULL); + pq_raise(conn, curs, NULL); goto exit; } @@ -1603,7 +1608,7 @@ retry: consumed = 1; /* ok, we did really read something: update the io timestamp */ - gettimeofday(&curs->repl_last_io, NULL); + gettimeofday(&repl->last_io, NULL); Dprintf("pq_read_replication_message: msg=%c, len=%d", buffer[0], len); if (buffer[0] == 'w') { @@ -1626,21 +1631,22 @@ retry: /* XXX it would be wise to check if it's really a logical replication */ if (decode) { - str = PyUnicode_Decode(buffer + hdr, data_size, curs->conn->codec, NULL); + str = PyUnicode_Decode(buffer + hdr, data_size, conn->codec, NULL); } else { str = Bytes_FromStringAndSize(buffer + hdr, data_size); } if (!str) { goto exit; } - msg = PyObject_CallFunctionObjArgs((PyObject *)&replicationMessageType, - curs, str, NULL); + result = PyObject_CallFunctionObjArgs((PyObject *)&replicationMessageType, + curs, str, NULL); Py_DECREF(str); - if (!msg) { goto exit; } + if (!result) { goto exit; } - ((replicationMessageObject *)msg)->data_size = data_size; - ((replicationMessageObject *)msg)->data_start = data_start; - ((replicationMessageObject *)msg)->wal_end = wal_end; - ((replicationMessageObject *)msg)->send_time = send_time; + msg = (replicationMessageObject *)result; + msg->data_size = data_size; + msg->data_start = data_start; + msg->wal_end = wal_end; + msg->send_time = send_time; } else if (buffer[0] == 'k') { /* Primary keepalive message: msgtype(1), walEnd(8), sendTime(8), reply(1) */ @@ -1652,17 +1658,17 @@ retry: reply = buffer[hdr]; if (reply) { - if (!pq_send_replication_feedback(curs, 0)) { - if (curs->conn->async) { - curs->repl_feedback_pending = 1; + if (!pq_send_replication_feedback(repl, 0)) { + if (conn->async) { + repl->feedback_pending = 1; } else { /* XXX not sure if this was a good idea after all */ - pq_raise(curs->conn, curs, NULL); + pq_raise(conn, curs, NULL); goto exit; } } else { - gettimeofday(&curs->repl_last_io, NULL); + gettimeofday(&repl->last_io, NULL); } } @@ -1680,37 +1686,38 @@ exit: PQfreemem(buffer); } - return msg; + return result; none: - msg = Py_None; - Py_INCREF(msg); + result = Py_None; + Py_INCREF(result); goto exit; } int -pq_send_replication_feedback(cursorObject* curs, int reply_requested) +pq_send_replication_feedback(replicationCursorObject *repl, int reply_requested) { + cursorObject *curs = &repl->cur; + PGconn *pgconn = curs->conn->pgconn; char replybuf[1 + 8 + 8 + 8 + 8 + 1]; int len = 0; Dprintf("pq_send_replication_feedback: write="XLOGFMTSTR", flush="XLOGFMTSTR", apply="XLOGFMTSTR, - XLOGFMTARGS(curs->repl_write_lsn), - XLOGFMTARGS(curs->repl_flush_lsn), - XLOGFMTARGS(curs->repl_apply_lsn)); + XLOGFMTARGS(repl->write_lsn), + XLOGFMTARGS(repl->flush_lsn), + XLOGFMTARGS(repl->apply_lsn)); replybuf[len] = 'r'; len += 1; - fe_sendint64(curs->repl_write_lsn, &replybuf[len]); len += 8; - fe_sendint64(curs->repl_flush_lsn, &replybuf[len]); len += 8; - fe_sendint64(curs->repl_apply_lsn, &replybuf[len]); len += 8; + fe_sendint64(repl->write_lsn, &replybuf[len]); len += 8; + fe_sendint64(repl->flush_lsn, &replybuf[len]); len += 8; + fe_sendint64(repl->apply_lsn, &replybuf[len]); len += 8; fe_sendint64(feGetCurrentTimestamp(), &replybuf[len]); len += 8; replybuf[len] = reply_requested ? 1 : 0; len += 1; - if (PQputCopyData(curs->conn->pgconn, replybuf, len) <= 0 || - PQflush(curs->conn->pgconn) != 0) { + if (PQputCopyData(pgconn, replybuf, len) <= 0 || PQflush(pgconn) != 0) { return 0; } - gettimeofday(&curs->repl_last_io, NULL); + gettimeofday(&repl->last_io, NULL); return 1; } @@ -1723,12 +1730,15 @@ pq_send_replication_feedback(cursorObject* curs, int reply_requested) manages to send keepalive messages to the server as needed. */ int -pq_copy_both(cursorObject *curs, PyObject *consume, int decode, double keepalive_interval) +pq_copy_both(replicationCursorObject *repl, PyObject *consume, int decode, + double keepalive_interval) { + cursorObject *curs = &repl->cur; + connectionObject *conn = curs->conn; + PGconn *pgconn = conn->pgconn; PyObject *msg, *tmp = NULL; PyObject *consume_func = NULL; int fd, sel, ret = -1; - PGconn *pgconn; fd_set fds; struct timeval keep_intr, curr_time, ping_time, timeout; @@ -1738,13 +1748,12 @@ pq_copy_both(cursorObject *curs, PyObject *consume, int decode, double keepalive } CLEARPGRES(curs->pgres); - pgconn = curs->conn->pgconn; keep_intr.tv_sec = (int)keepalive_interval; keep_intr.tv_usec = (keepalive_interval - keep_intr.tv_sec)*1.0e6; while (1) { - msg = pq_read_replication_message(curs, decode); + msg = pq_read_replication_message(repl, decode); if (!msg) { goto exit; } @@ -1753,7 +1762,7 @@ pq_copy_both(cursorObject *curs, PyObject *consume, int decode, double keepalive fd = PQsocket(pgconn); if (fd < 0) { - pq_raise(curs->conn, curs, NULL); + pq_raise(conn, curs, NULL); goto exit; } @@ -1763,7 +1772,7 @@ pq_copy_both(cursorObject *curs, PyObject *consume, int decode, double keepalive /* how long can we wait before we need to send a keepalive? */ gettimeofday(&curr_time, NULL); - timeradd(&curs->repl_last_io, &keep_intr, &ping_time); + timeradd(&repl->last_io, &keep_intr, &ping_time); timersub(&ping_time, &curr_time, &timeout); if (timeout.tv_sec >= 0) { @@ -1787,8 +1796,8 @@ pq_copy_both(cursorObject *curs, PyObject *consume, int decode, double keepalive } if (sel == 0) { - if (!pq_send_replication_feedback(curs, 0)) { - pq_raise(curs->conn, curs, NULL); + if (!pq_send_replication_feedback(repl, 0)) { + pq_raise(conn, curs, NULL); goto exit; } } @@ -1876,7 +1885,7 @@ pq_fetch(cursorObject *curs, int no_result) Dprintf("pq_fetch: data from a streaming replication slot (no tuples)"); curs->rowcount = -1; ex = 0; - /* nothing to do here: _pq_copy_both_v3 will be called separately */ + /* nothing to do here: pq_copy_both will be called separately */ CLEARPGRES(curs->pgres); break; diff --git a/psycopg/pqpath.h b/psycopg/pqpath.h index a858a269..568f0768 100644 --- a/psycopg/pqpath.h +++ b/psycopg/pqpath.h @@ -27,6 +27,7 @@ #define PSYCOPG_PQPATH_H 1 #include "psycopg/cursor.h" +#include "psycopg/replication_cursor.h" #include "psycopg/connection.h" /* macro to clean the pg result */ @@ -72,9 +73,10 @@ HIDDEN int pq_execute_command_locked(connectionObject *conn, RAISES HIDDEN void pq_complete_error(connectionObject *conn, PGresult **pgres, char **error); -HIDDEN int pq_copy_both(cursorObject *curs, PyObject *consumer, +/* replication protocol support */ +HIDDEN int pq_copy_both(replicationCursorObject *repl, PyObject *consumer, int decode, double keepalive_interval); -HIDDEN PyObject *pq_read_replication_message(cursorObject *curs, int decode); -HIDDEN int pq_send_replication_feedback(cursorObject *curs, int reply_requested); +HIDDEN PyObject *pq_read_replication_message(replicationCursorObject *repl, int decode); +HIDDEN int pq_send_replication_feedback(replicationCursorObject *repl, int reply_requested); #endif /* !defined(PSYCOPG_PQPATH_H) */ diff --git a/psycopg/psycopgmodule.c b/psycopg/psycopgmodule.c index f9f29a2e..25e32598 100644 --- a/psycopg/psycopgmodule.c +++ b/psycopg/psycopgmodule.c @@ -28,6 +28,7 @@ #include "psycopg/connection.h" #include "psycopg/cursor.h" +#include "psycopg/replication_cursor.h" #include "psycopg/replication_message.h" #include "psycopg/green.h" #include "psycopg/lobject.h" @@ -917,6 +918,9 @@ INIT_MODULE(_psycopg)(void) Py_TYPE(&cursorType) = &PyType_Type; if (PyType_Ready(&cursorType) == -1) goto exit; + Py_TYPE(&replicationCursorType) = &PyType_Type; + if (PyType_Ready(&replicationCursorType) == -1) goto exit; + Py_TYPE(&replicationMessageType) = &PyType_Type; if (PyType_Ready(&replicationMessageType) == -1) goto exit; @@ -1000,7 +1004,7 @@ INIT_MODULE(_psycopg)(void) /* Initialize the PyDateTimeAPI everywhere is used */ PyDateTime_IMPORT; if (psyco_adapter_datetime_init()) { goto exit; } - if (psyco_curs_datetime_init()) { goto exit; } + if (psyco_repl_curs_datetime_init()) { goto exit; } if (psyco_replmsg_datetime_init()) { goto exit; } Py_TYPE(&pydatetimeType) = &PyType_Type; @@ -1044,7 +1048,8 @@ INIT_MODULE(_psycopg)(void) /* put new types in module dictionary */ PyModule_AddObject(module, "connection", (PyObject*)&connectionType); PyModule_AddObject(module, "cursor", (PyObject*)&cursorType); - PyModule_AddObject(module, "replicationMessage", (PyObject*)&replicationMessageType); + PyModule_AddObject(module, "ReplicationCursor", (PyObject*)&replicationCursorType); + PyModule_AddObject(module, "ReplicationMessage", (PyObject*)&replicationMessageType); PyModule_AddObject(module, "ISQLQuote", (PyObject*)&isqlquoteType); PyModule_AddObject(module, "Notify", (PyObject*)¬ifyType); PyModule_AddObject(module, "Xid", (PyObject*)&xidType); diff --git a/psycopg/replication_cursor.h b/psycopg/replication_cursor.h new file mode 100644 index 00000000..1b6dbfab --- /dev/null +++ b/psycopg/replication_cursor.h @@ -0,0 +1,77 @@ +/* replication_cursor.h - definition for the psycopg replication cursor type + * + * Copyright (C) 2015 Daniele Varrazzo + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#ifndef PSYCOPG_REPLICATION_CURSOR_H +#define PSYCOPG_REPLICATION_CURSOR_H 1 + +#include "psycopg/cursor.h" +#include "libpq_support.h" + +#ifdef __cplusplus +extern "C" { +#endif + +extern HIDDEN PyTypeObject replicationCursorType; + +typedef struct replicationCursorObject { + cursorObject cur; + + int started:1; /* if replication is started */ + int consuming:1; /* if running the consume loop */ + + struct timeval last_io; /* timestamp of the last exchange with the server */ + struct timeval keepalive_interval; /* interval for keepalive messages in replication mode */ + + XLogRecPtr write_lsn; /* LSN stats for replication feedback messages */ + XLogRecPtr flush_lsn; + XLogRecPtr apply_lsn; + int feedback_pending; /* flag set when we couldn't send the feedback to the server */ +} replicationCursorObject; + + +RAISES_NEG int psyco_repl_curs_datetime_init(void); + +/* exception-raising macros */ +#define EXC_IF_REPLICATING(self, cmd) \ +do \ + if ((self)->started) { \ + PyErr_SetString(ProgrammingError, \ + #cmd " cannot be used when replication is already in progress"); \ + return NULL; } \ +while (0) + +#define EXC_IF_NOT_REPLICATING(self, cmd) \ +do \ + if (!(self)->started) { \ + PyErr_SetString(ProgrammingError, \ + #cmd " cannot be used when replication is not in progress"); \ + return NULL; } \ +while (0) + +#ifdef __cplusplus +} +#endif + +#endif /* !defined(PSYCOPG_REPLICATION_CURSOR_H) */ diff --git a/psycopg/replication_cursor_type.c b/psycopg/replication_cursor_type.c new file mode 100644 index 00000000..d1f7939a --- /dev/null +++ b/psycopg/replication_cursor_type.c @@ -0,0 +1,360 @@ +/* replication_cursor_type.c - python interface to replication cursor objects + * + * Copyright (C) 2015 Daniele Varrazzo + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#define PSYCOPG_MODULE +#include "psycopg/psycopg.h" + +#include "psycopg/replication_cursor.h" +#include "psycopg/replication_message.h" +#include "psycopg/green.h" +#include "psycopg/pqpath.h" + +#include +#include + +/* python */ +#include "datetime.h" + + +#define psyco_repl_curs_start_replication_expert_doc \ +"start_replication_expert(command, writer=None, keepalive_interval=10) -- Start replication stream with a directly given command." + +static PyObject * +psyco_repl_curs_start_replication_expert(replicationCursorObject *self, + PyObject *args, PyObject *kwargs) +{ + cursorObject *curs = &self->cur; + connectionObject *conn = self->cur.conn; + PyObject *res = NULL; + char *command; + static char *kwlist[] = {"command", NULL}; + + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s", kwlist, &command)) { + return NULL; + } + + EXC_IF_CURS_CLOSED(curs); + EXC_IF_GREEN(start_replication_expert); + EXC_IF_TPC_PREPARED(conn, start_replication_expert); + EXC_IF_REPLICATING(self, start_replication_expert); + + Dprintf("psyco_repl_curs_start_replication_expert: %s", command); + + /* self->copysize = 0;*/ + + gettimeofday(&self->last_io, NULL); + + if (pq_execute(curs, command, conn->async, 1 /* no_result */, 1 /* no_begin */) >= 0) { + res = Py_None; + Py_INCREF(res); + + self->started = 1; + } + + return res; +} + +#define psyco_repl_curs_consume_stream_doc \ +"consume_stream(consumer, keepalive_interval=10) -- Consume replication stream." + +static PyObject * +psyco_repl_curs_consume_stream(replicationCursorObject *self, + PyObject *args, PyObject *kwargs) +{ + cursorObject *curs = &self->cur; + PyObject *consume = NULL, *res = NULL; + int decode = 0; + double keepalive_interval = 10; + static char *kwlist[] = {"consume", "decode", "keepalive_interval", NULL}; + + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|id", kwlist, + &consume, &decode, &keepalive_interval)) { + return NULL; + } + + EXC_IF_CURS_CLOSED(curs); + EXC_IF_CURS_ASYNC(curs, consume_stream); + EXC_IF_GREEN(consume_stream); + EXC_IF_TPC_PREPARED(self->cur.conn, consume_stream); + EXC_IF_NOT_REPLICATING(self, consume_stream); + + if (self->consuming) { + PyErr_SetString(ProgrammingError, + "consume_stream cannot be used when already in the consume loop"); + return NULL; + } + + Dprintf("psyco_repl_curs_consume_stream"); + + if (keepalive_interval < 1.0) { + psyco_set_error(ProgrammingError, curs, "keepalive_interval must be >= 1 (sec)"); + return NULL; + } + + self->consuming = 1; + + if (pq_copy_both(self, consume, decode, keepalive_interval) >= 0) { + res = Py_None; + Py_INCREF(res); + } + + self->consuming = 0; + + return res; +} + +#define psyco_repl_curs_read_message_doc \ +"read_message(decode=True) -- Try reading a replication message from the server (non-blocking)." + +static PyObject * +psyco_repl_curs_read_message(replicationCursorObject *self, + PyObject *args, PyObject *kwargs) +{ + cursorObject *curs = &self->cur; + int decode = 1; + static char *kwlist[] = {"decode", NULL}; + + EXC_IF_CURS_CLOSED(curs); + EXC_IF_GREEN(read_message); + EXC_IF_TPC_PREPARED(self->cur.conn, read_message); + EXC_IF_NOT_REPLICATING(self, read_message); + + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", kwlist, + &decode)) { + return NULL; + } + + return pq_read_replication_message(self, decode); +} + +static PyObject * +repl_curs_flush_feedback(replicationCursorObject *self, int reply) +{ + if (!(self->feedback_pending || reply)) + Py_RETURN_TRUE; + + if (pq_send_replication_feedback(self, reply)) { + self->feedback_pending = 0; + Py_RETURN_TRUE; + } else { + self->feedback_pending = 1; + Py_RETURN_FALSE; + } +} + +#define psyco_repl_curs_send_feedback_doc \ +"send_feedback(write_lsn=0, flush_lsn=0, apply_lsn=0, reply=False) -- Try sending a replication feedback message to the server and optionally request a reply." + +static PyObject * +psyco_repl_curs_send_feedback(replicationCursorObject *self, + PyObject *args, PyObject *kwargs) +{ + cursorObject *curs = &self->cur; + XLogRecPtr write_lsn = InvalidXLogRecPtr, + flush_lsn = InvalidXLogRecPtr, + apply_lsn = InvalidXLogRecPtr; + int reply = 0; + static char* kwlist[] = {"write_lsn", "flush_lsn", "apply_lsn", "reply", NULL}; + + EXC_IF_CURS_CLOSED(curs); + EXC_IF_NOT_REPLICATING(self, send_feedback); + + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|KKKi", kwlist, + &write_lsn, &flush_lsn, &apply_lsn, &reply)) { + return NULL; + } + + if (write_lsn > self->write_lsn) + self->write_lsn = write_lsn; + + if (flush_lsn > self->flush_lsn) + self->flush_lsn = flush_lsn; + + if (apply_lsn > self->apply_lsn) + self->apply_lsn = apply_lsn; + + self->feedback_pending = 1; + + return repl_curs_flush_feedback(self, reply); +} + +#define psyco_repl_curs_flush_feedback_doc \ +"flush_feedback(reply=False) -- Try flushing the latest pending replication feedback message to the server and optionally request a reply." + +static PyObject * +psyco_repl_curs_flush_feedback(replicationCursorObject *self, + PyObject *args, PyObject *kwargs) +{ + cursorObject *curs = &self->cur; + int reply = 0; + static char *kwlist[] = {"reply", NULL}; + + EXC_IF_CURS_CLOSED(curs); + EXC_IF_NOT_REPLICATING(self, flush_feedback); + + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", kwlist, + &reply)) { + return NULL; + } + + return repl_curs_flush_feedback(self, reply); +} + + +RAISES_NEG int +psyco_repl_curs_datetime_init(void) +{ + Dprintf("psyco_repl_curs_datetime_init: datetime init"); + + PyDateTime_IMPORT; + + if (!PyDateTimeAPI) { + PyErr_SetString(PyExc_ImportError, "datetime initialization failed"); + return -1; + } + return 0; +} + +#define psyco_repl_curs_io_timestamp_doc \ +"io_timestamp -- the timestamp of latest IO with the server" + +static PyObject * +psyco_repl_curs_get_io_timestamp(replicationCursorObject *self) +{ + cursorObject *curs = &self->cur; + PyObject *tval, *res = NULL; + double seconds; + + EXC_IF_CURS_CLOSED(curs); + + seconds = self->last_io.tv_sec + self->last_io.tv_usec / 1.0e6; + + tval = Py_BuildValue("(d)", seconds); + if (tval) { + res = PyDateTime_FromTimestamp(tval); + Py_DECREF(tval); + } + return res; +} + +/* object method list */ + +static struct PyMethodDef replicationCursorObject_methods[] = { + {"start_replication_expert", (PyCFunction)psyco_repl_curs_start_replication_expert, + METH_VARARGS|METH_KEYWORDS, psyco_repl_curs_start_replication_expert_doc}, + {"consume_stream", (PyCFunction)psyco_repl_curs_consume_stream, + METH_VARARGS|METH_KEYWORDS, psyco_repl_curs_consume_stream_doc}, + {"read_message", (PyCFunction)psyco_repl_curs_read_message, + METH_VARARGS|METH_KEYWORDS, psyco_repl_curs_read_message_doc}, + {"send_feedback", (PyCFunction)psyco_repl_curs_send_feedback, + METH_VARARGS|METH_KEYWORDS, psyco_repl_curs_send_feedback_doc}, + {"flush_feedback", (PyCFunction)psyco_repl_curs_flush_feedback, + METH_VARARGS|METH_KEYWORDS, psyco_repl_curs_flush_feedback_doc}, + {NULL} +}; + +/* object calculated member list */ + +static struct PyGetSetDef replicationCursorObject_getsets[] = { + { "io_timestamp", + (getter)psyco_repl_curs_get_io_timestamp, NULL, + psyco_repl_curs_io_timestamp_doc, NULL }, + {NULL} +}; + +static int +replicationCursor_setup(replicationCursorObject* self) +{ + self->started = 0; + self->consuming = 0; + + self->write_lsn = InvalidXLogRecPtr; + self->flush_lsn = InvalidXLogRecPtr; + self->apply_lsn = InvalidXLogRecPtr; + self->feedback_pending = 0; + + return 0; +} + +static int +replicationCursor_init(PyObject *obj, PyObject *args, PyObject *kwargs) +{ + replicationCursor_setup((replicationCursorObject *)obj); + return cursor_init(obj, args, kwargs); +} + +static PyObject * +replicationCursor_repr(replicationCursorObject *self) +{ + return PyString_FromFormat( + "", self, self->cur.closed); +} + + +/* object type */ + +#define replicationCursorType_doc \ +"A database replication cursor." + +PyTypeObject replicationCursorType = { + PyVarObject_HEAD_INIT(NULL, 0) + "psycopg2.extensions.ReplicationCursor", + sizeof(replicationCursorObject), 0, + 0, /*tp_dealloc*/ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + 0, /*tp_compare*/ + (reprfunc)replicationCursor_repr, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash*/ + 0, /*tp_call*/ + (reprfunc)replicationCursor_repr, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_ITER | + Py_TPFLAGS_HAVE_GC, /*tp_flags*/ + replicationCursorType_doc, /*tp_doc*/ + 0, /*tp_traverse*/ + 0, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + replicationCursorObject_methods, /*tp_methods*/ + 0, /*tp_members*/ + replicationCursorObject_getsets, /*tp_getset*/ + &cursorType, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + replicationCursor_init, /*tp_init*/ + 0, /*tp_alloc*/ + 0, /*tp_new*/ +}; diff --git a/psycopg/replication_message_type.c b/psycopg/replication_message_type.c index 61833931..d4b0457b 100644 --- a/psycopg/replication_message_type.c +++ b/psycopg/replication_message_type.c @@ -49,7 +49,7 @@ static PyObject * replmsg_repr(replicationMessageObject *self) { return PyString_FromFormat( - "", + "", self, self->data_size, XLOGFMTARGS(self->data_start), XLOGFMTARGS(self->wal_end), self->send_time); } diff --git a/psycopg2.cproj b/psycopg2.cproj index 386287c1..682b69d0 100644 --- a/psycopg2.cproj +++ b/psycopg2.cproj @@ -92,6 +92,7 @@ + @@ -225,6 +226,7 @@ + diff --git a/setup.py b/setup.py index 339d7f2a..18c47b7c 100644 --- a/setup.py +++ b/setup.py @@ -466,7 +466,7 @@ sources = [ 'connection_int.c', 'connection_type.c', 'cursor_int.c', 'cursor_type.c', - 'replication_message_type.c', + 'replication_cursor_type.c', 'replication_message_type.c', 'diagnostics_type.c', 'error_type.c', 'lobject_int.c', 'lobject_type.c', 'notify_type.c', 'xid_type.c', @@ -482,7 +482,7 @@ depends = [ # headers 'config.h', 'pgtypes.h', 'psycopg.h', 'python.h', 'connection.h', 'cursor.h', 'diagnostics.h', 'error.h', 'green.h', 'lobject.h', - 'replication_message.h', + 'replication_cursor.h', 'replication_message.h', 'notify.h', 'pqpath.h', 'xid.h', 'libpq_support.h', 'win32_support.h', diff --git a/tests/test_replication.py b/tests/test_replication.py index 5c029c88..2dbb0086 100644 --- a/tests/test_replication.py +++ b/tests/test_replication.py @@ -47,12 +47,16 @@ class ReplicationTestCase(ConnectingTestCase): # first close all connections, as they might keep the slot(s) active super(ReplicationTestCase, self).tearDown() + import time + time.sleep(0.025) # sometimes the slot is still active, wait a little + if self._slots: - kill_conn = self.repl_connect(connection_factory=PhysicalReplicationConnection) + kill_conn = self.connect() if kill_conn: kill_cur = kill_conn.cursor() for slot in self._slots: - kill_cur.drop_replication_slot(slot) + kill_cur.execute("SELECT pg_drop_replication_slot(%s)", (slot,)) + kill_conn.commit() kill_conn.close() def create_replication_slot(self, cur, slot_name=testconfig.repl_slot, **kwargs): @@ -127,7 +131,7 @@ class ReplicationTest(ReplicationTestCase): cur.start_replication(self.slot) def consume(msg): raise StopReplication() - self.assertRaises(StopReplication, cur.consume_replication_stream, consume) + self.assertRaises(StopReplication, cur.consume_stream, consume) class AsyncReplicationTest(ReplicationTestCase): @@ -148,14 +152,22 @@ class AsyncReplicationTest(ReplicationTestCase): self.msg_count = 0 def consume(msg): + # just check the methods + log = "%s: %s" % (cur.io_timestamp, repr(msg)) + self.msg_count += 1 if self.msg_count > 3: + cur.flush_feedback(reply=True) raise StopReplication() + cur.send_feedback(flush_lsn=msg.data_start) + + self.assertRaises(psycopg2.ProgrammingError, cur.consume_stream, consume) + def process_stream(): from select import select while True: - msg = cur.read_replication_message() + msg = cur.read_message() if msg: consume(msg) else: From 23abe4f501ce60468e9e6b089910068265342368 Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Tue, 20 Oct 2015 12:36:13 +0200 Subject: [PATCH 042/151] Add quick start to the replication doc, minor doc fixes. --- doc/src/extras.rst | 246 ++++++++++++++++++----------- lib/extras.py | 2 +- psycopg/replication_message_type.c | 2 +- 3 files changed, 160 insertions(+), 90 deletions(-) diff --git a/doc/src/extras.rst b/doc/src/extras.rst index 9384a961..2a7bed26 100644 --- a/doc/src/extras.rst +++ b/doc/src/extras.rst @@ -141,8 +141,81 @@ Logging cursor .. autoclass:: MinTimeLoggingCursor -Replication cursor -^^^^^^^^^^^^^^^^^^ +Replication protocol support +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Modern PostgreSQL servers (version 9.0 and above) support replication. The +replication protocol is built on top of the client-server protocol and can be +operated using ``libpq``, as such it can be also operated by ``psycopg2``. +The replication protocol can be operated on both synchronous and +:ref:`asynchronous ` connections. + +Server version 9.4 adds a new feature called *Logical Replication*. + +.. seealso:: + + - PostgreSQL `Streaming Replication Protocol`__ + + .. __: http://www.postgresql.org/docs/current/static/protocol-replication.html + + +Logical replication Quick-Start +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +You must be using PostgreSQL server version 9.4 or above to run this quick +start. + +Make sure that replication connections are permitted for user ``postgres`` in +``pg_hba.conf`` and reload the server configuration. You also need to set +``wal_level=logical`` and ``max_wal_senders``, ``max_replication_slots`` to +value greater than zero in ``postgresql.conf`` (these changes require a server +restart). Create a database ``psycopg2test``. + +Then run the following code to quickly try the replication support out. This +is not production code -- it has no error handling, it sends feedback too +often, etc. -- and it's only intended as a simple demo of logical +replication:: + + from __future__ import print_function + import sys + import psycopg2 + import psycopg2.extras + + conn = psycopg2.connect('dbname=psycopg2test user=postgres', + connection_factory=psycopg2.extras.LogicalReplicationConnection) + cur = conn.cursor() + try: + cur.start_replication(slot_name='pytest') + except psycopg2.ProgrammingError: + cur.create_replication_slot('pytest', output_plugin='test_decoding') + cur.start_replication(slot_name='pytest') + + class DemoConsumer(object): + def __call__(self, msg): + print(msg.payload) + msg.cursor.send_feedback(flush_lsn=msg.data_start) + + democonsumer = DemoConsumer() + + print("Starting streaming, press Control-C to end...", file=sys.stderr) + try: + cur.consume_stream(democonsumer) + except KeyboardInterrupt: + cur.close() + conn.close() + print("The slot 'pytest' still exists. Drop it with SELECT pg_drop_replication_slot('pytest'); if no longer needed.", file=sys.stderr) + print("WARNING: Transaction logs will accumulate in pg_xlog until the slot is dropped.", file=sys.stderr) + + +You can now make changes to the ``psycopg2test`` database using a normal +psycopg2 session, ``psql``, etc. and see the logical decoding stream printed +by this demo client. + +This will continue running until terminated with ``Control-C``. + + +Replication connection and cursor classes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: ReplicationConnectionBase @@ -177,17 +250,11 @@ The following replication types are defined: phys_cur = phys_conn.cursor() Both `LogicalReplicationConnection` and `PhysicalReplicationConnection` use - `ReplicationCursor` for actual communication on the connection. - -.. seealso:: - - - PostgreSQL `Streaming Replication Protocol`__ - - .. __: http://www.postgresql.org/docs/current/static/protocol-replication.html + `ReplicationCursor` for actual communication with the server. -The individual messages in the replication stream are presented by -`ReplicationMessage` objects: +The individual messages in the replication stream are represented by +`ReplicationMessage` objects (both logical and physical type): .. autoclass:: ReplicationMessage @@ -249,7 +316,7 @@ The individual messages in the replication stream are presented by replication slot is created by default. No output plugin parameter is required or allowed when creating a physical replication slot. - In either case, the type of slot being created can be specified + In either case the type of slot being created can be specified explicitly using *slot_type* parameter. Replication slots are a feature of PostgreSQL server starting with @@ -295,25 +362,25 @@ The individual messages in the replication stream are presented by replication can be used with both types of connection. On the other hand, physical replication doesn't require a named - replication slot to be used, only logical one does. In any case, - logical replication and replication slots are a feature of PostgreSQL - server starting with version 9.4. Physical replication can be used - starting with 9.0. + replication slot to be used, only logical replication does. In any + case logical replication and replication slots are a feature of + PostgreSQL server starting with version 9.4. Physical replication can + be used starting with 9.0. If *start_lsn* is specified, the requested stream will start from that - LSN. The default is `!None`, which passes the LSN ``0/0``, causing - replay to begin at the last point at which the server got replay - confirmation from the client for, or the oldest available point for a - new slot. + LSN. The default is `!None` which passes the LSN ``0/0`` causing + replay to begin at the last point for which the server got flush + confirmation from the client, or the oldest available point for a new + slot. The server might produce an error if a WAL file for the given LSN has - already been recycled, or it may silently start streaming from a later + already been recycled or it may silently start streaming from a later position: the client can verify the actual position using information - provided the `ReplicationMessage` attributes. The exact server + provided by the `ReplicationMessage` attributes. The exact server behavior depends on the type of replication and use of slots. - A *timeline* parameter can only be specified with physical replication - and only starting with server version 9.3. + The *timeline* parameter can only be specified with physical + replication and only starting with server version 9.3. A dictionary of *options* may be passed to the logical decoding plugin on a logical replication slot. The set of supported options depends @@ -324,8 +391,9 @@ The individual messages in the replication stream are presented by `start_replication_expert()` internally. After starting the replication, to actually consume the incoming - server messages, use `consume_stream()` or implement a loop around - `read_message()` in case of asynchronous connection. + server messages use `consume_stream()` or implement a loop around + `read_message()` in case of :ref:`asynchronous connection + `. .. method:: start_replication_expert(command) @@ -343,66 +411,66 @@ The individual messages in the replication stream are presented by This method can only be used with synchronous connection. For asynchronous connections see `read_message()`. - Before calling this method to consume the stream, use + Before calling this method to consume the stream use `start_replication()` first. - When called, this method enters an endless loop, reading messages from - the server and passing them to ``consume()``, then waiting for more - messages from the server. In order to make this method break out of - the loop and return, ``consume()`` can throw a `StopReplication` - exception (any unhandled exception will make it break out of the loop - as well). + This method enters an endless loop reading messages from the server + and passing them to ``consume()``, then waiting for more messages from + the server. In order to make this method break out of the loop and + return, ``consume()`` can throw a `StopReplication` exception. Any + unhandled exception will make it break out of the loop as well. - If *decode* is set to `!True`, the messages read from the server are + If *decode* is set to `!True` the messages read from the server are converted according to the connection `~connection.encoding`. This parameter should not be set with physical replication. - This method also sends keepalive messages to the server, in case there + This method also sends keepalive messages to the server in case there were no new data from the server for the duration of *keepalive_interval* (in seconds). The value of this parameter must - be equal to at least 1 second, but it can have a fractional part. + be set to at least 1 second, but it can have a fractional part. + + The *msg* objects passed to ``consume()`` are instances of + `ReplicationMessage` class. + + After processing certain amount of messages the client should send a + confirmation message to the server. This should be done by calling + `send_feedback()` method on the corresponding replication cursor. A + reference to the cursor is provided in the `ReplicationMessage` as an + attribute. The following example is a sketch implementation of ``consume()`` callable for logical replication:: class LogicalStreamConsumer(object): - def __call__(self, msg): - self.store_message_data(msg.payload) + ... - if self.should_report_to_the_server_now(msg): + def __call__(self, msg): + self.process_message(msg.payload) + + if self.should_send_feedback(msg): msg.cursor.send_feedback(flush_lsn=msg.data_start) consumer = LogicalStreamConsumer() cur.consume_stream(consumer, decode=True) - The *msg* objects passed to ``consume()`` are instances of - `ReplicationMessage` class. - - After storing certain amount of messages' data reliably, the client - should send a confirmation message to the server. This should be done - by calling `send_feedback()` method on the corresponding replication - cursor. A reference to the cursor is provided in the - `ReplicationMessage` as an attribute. - .. warning:: - When using replication with slots, failure to properly notify the - server by constantly consuming and reporting success at - appropriate times can eventually lead to "disk full" condition on - the server, because the server retains all the WAL segments that - might be needed to stream the changes via all of the currently - open replication slots. + When using replication with slots, failure to constantly consume + *and* report success to the server appropriately can eventually + lead to "disk full" condition on the server, because the server + retains all the WAL segments that might be needed to stream the + changes via all of the currently open replication slots. - On the other hand, it is not recommended to send a confirmation - after every processed message, since that will put an unnecessary - load on network and the server. A possible strategy is to confirm - after every COMMIT message. + On the other hand, it is not recommended to send confirmation + after *every* processed message, since that will put an + unnecessary load on network and the server. A possible strategy + is to confirm after every COMMIT message. .. method:: send_feedback(write_lsn=0, flush_lsn=0, apply_lsn=0, reply=False) :param write_lsn: a LSN position up to which the client has written the data locally - :param flush_lsn: a LSN position up to which the client has stored the + :param flush_lsn: a LSN position up to which the client has processed the data reliably (the server is allowed to discard all and every data that predates this LSN) :param apply_lsn: a LSN position up to which the warm standby server @@ -411,7 +479,7 @@ The individual messages in the replication stream are presented by :param reply: request the server to send back a keepalive message immediately Use this method to report to the server that all messages up to a - certain LSN position have been stored on the client and may be + certain LSN position have been processed on the client and may be discarded on the server. This method can also be called with all default parameters' values to @@ -433,13 +501,14 @@ The individual messages in the replication stream are presented by Returns `!True` if the feedback message was sent successfully, `!False` otherwise. - Low-level methods for asynchronous connection operation. + Low-level replication cursor methods for :ref:`asynchronous connection + ` operation. - With the synchronous connection, a call to `consume_stream()` handles all + With the synchronous connection a call to `consume_stream()` handles all the complexity of handling the incoming messages and sending keepalive replies, but at times it might be beneficial to use low-level interface - for better control, in particular to `~select.select()` on multiple - sockets. The following methods are provided for asynchronous operation: + for better control, in particular to `~select` on multiple sockets. The + following methods are provided for asynchronous operation: .. method:: read_message(decode=True) @@ -449,16 +518,16 @@ The individual messages in the replication stream are presented by This method should be used in a loop with asynchronous connections after calling `start_replication()` once. - It tries to read the next message from the server, without blocking - and returns an instance of `ReplicationMessage` or `!None`, in case - there are no more data messages from the server at the moment. + It tries to read the next message from the server without blocking and + returns an instance of `ReplicationMessage` or `!None`, in case there + are no more data messages from the server at the moment. It is expected that the calling code will call this method repeatedly - in order to consume all of the messages that might have been buffered, - until `!None` is returned. After receiving a `!None` value from this - method, the caller should use `~select.select()` or `~select.poll()` - on the corresponding connection to block the process until there is - more data from the server. + in order to consume all of the messages that might have been buffered + until `!None` is returned. After receiving `!None` from this method + the caller should use `~select.select()` or `~select.poll()` on the + corresponding connection to block the process until there is more data + from the server. The server can send keepalive messages to the client periodically. Such messages are silently consumed by this method and are never @@ -480,24 +549,25 @@ The individual messages in the replication stream are presented by An actual example of asynchronous operation might look like this:: - def consume(msg): - ... + def consume(msg): + ... - keepalive_interval = 10.0 - while True: - msg = cur.read_message() - if msg: - consume(msg) - else: - now = datetime.now() - timeout = keepalive_interval - (now - cur.io_timestamp).total_seconds() - if timeout > 0: - sel = select.select([cur], [], [], timeout) - else: - sel = ([], [], []) + keepalive_interval = 10.0 + while True: + msg = cur.read_message() + if msg: + consume(msg) + else: + now = datetime.now() + timeout = keepalive_interval - (now - cur.io_timestamp).total_seconds() + if timeout > 0: + sel = select.select([cur], [], [], timeout) + else: + sel = ([], [], []) - if not sel[0]: - cur.send_feedback() + if not sel[0]: + # timed out, send keepalive message + cur.send_feedback() .. index:: pair: Cursor; Replication diff --git a/lib/extras.py b/lib/extras.py index 7c713573..8e1373c1 100644 --- a/lib/extras.py +++ b/lib/extras.py @@ -514,7 +514,7 @@ class StopReplication(Exception): class ReplicationCursor(_replicationCursor): - """A cursor used for communication on the replication protocol.""" + """A cursor used for communication on replication connections.""" def create_replication_slot(self, slot_name, slot_type=None, output_plugin=None): """Create streaming replication slot.""" diff --git a/psycopg/replication_message_type.c b/psycopg/replication_message_type.c index d4b0457b..893ce7ad 100644 --- a/psycopg/replication_message_type.c +++ b/psycopg/replication_message_type.c @@ -146,7 +146,7 @@ static struct PyGetSetDef replicationMessageObject_getsets[] = { /* object type */ #define replicationMessageType_doc \ -"A database replication message." +"A replication protocol message." PyTypeObject replicationMessageType = { PyVarObject_HEAD_INIT(NULL, 0) From b3f8e9adb56f8db16fb75ebf56ed262a52095ebb Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Tue, 20 Oct 2015 12:54:22 +0200 Subject: [PATCH 043/151] Fix send_time printf format in replmsg_repr(). --- psycopg/replication_message_type.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/psycopg/replication_message_type.c b/psycopg/replication_message_type.c index 893ce7ad..f607d2ba 100644 --- a/psycopg/replication_message_type.c +++ b/psycopg/replication_message_type.c @@ -49,7 +49,7 @@ static PyObject * replmsg_repr(replicationMessageObject *self) { return PyString_FromFormat( - "", + "", self, self->data_size, XLOGFMTARGS(self->data_start), XLOGFMTARGS(self->wal_end), self->send_time); } From 089e745af64b660574424cae88011d0689d56c5c Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Tue, 20 Oct 2015 12:55:43 +0200 Subject: [PATCH 044/151] Fix cursor_init() declaration for use in replication_cursor_type.c --- psycopg/cursor.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/psycopg/cursor.h b/psycopg/cursor.h index 18e31e5f..44d8a47a 100644 --- a/psycopg/cursor.h +++ b/psycopg/cursor.h @@ -92,12 +92,13 @@ struct cursorObject { /* C-callable functions in cursor_int.c and cursor_type.c */ +HIDDEN int cursor_init(PyObject *obj, PyObject *args, PyObject *kwargs); + BORROWED HIDDEN PyObject *curs_get_cast(cursorObject *self, PyObject *oid); HIDDEN void curs_reset(cursorObject *self); HIDDEN int psyco_curs_withhold_set(cursorObject *self, PyObject *pyvalue); HIDDEN int psyco_curs_scrollable_set(cursorObject *self, PyObject *pyvalue); -HIDDEN int psyco_curs_init(PyObject *obj, PyObject *args, PyObject *kwargs); /* exception-raising macros */ #define EXC_IF_CURS_CLOSED(self) \ From 22cbfb26d6c7f596e17166f91b5e0712ff683dcc Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Tue, 20 Oct 2015 13:05:43 +0200 Subject: [PATCH 045/151] Actually add replication tests to the test suite. --- psycopg2.cproj | 1 + tests/__init__.py | 2 ++ 2 files changed, 3 insertions(+) diff --git a/psycopg2.cproj b/psycopg2.cproj index 682b69d0..75d96180 100644 --- a/psycopg2.cproj +++ b/psycopg2.cproj @@ -128,6 +128,7 @@ + diff --git a/tests/__init__.py b/tests/__init__.py index 3e677d85..2d2609ce 100755 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -31,6 +31,7 @@ import test_bugX000 import test_bug_gc import test_cancel import test_connection +import test_replication import test_copy import test_cursor import test_dates @@ -68,6 +69,7 @@ def test_suite(): suite.addTest(test_bug_gc.test_suite()) suite.addTest(test_cancel.test_suite()) suite.addTest(test_connection.test_suite()) + suite.addTest(test_replication.test_suite()) suite.addTest(test_copy.test_suite()) suite.addTest(test_cursor.test_suite()) suite.addTest(test_dates.test_suite()) From 76c7f4a0b5f3ff69499239917fb0aec8b0da6adf Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Thu, 22 Oct 2015 16:17:08 +0200 Subject: [PATCH 046/151] Use direct call to consume() callable in pq_copy_both() --- psycopg/pqpath.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/psycopg/pqpath.c b/psycopg/pqpath.c index d6886981..30a3d394 100644 --- a/psycopg/pqpath.c +++ b/psycopg/pqpath.c @@ -1737,12 +1737,11 @@ pq_copy_both(replicationCursorObject *repl, PyObject *consume, int decode, connectionObject *conn = curs->conn; PGconn *pgconn = conn->pgconn; PyObject *msg, *tmp = NULL; - PyObject *consume_func = NULL; int fd, sel, ret = -1; fd_set fds; struct timeval keep_intr, curr_time, ping_time, timeout; - if (!(consume_func = PyObject_GetAttrString(consume, "__call__"))) { + if (!PyCallable_Check(consume)) { Dprintf("pq_copy_both: expected callable consume object"); goto exit; } @@ -1804,11 +1803,11 @@ pq_copy_both(replicationCursorObject *repl, PyObject *consume, int decode, continue; } else { - tmp = PyObject_CallFunctionObjArgs(consume_func, msg, NULL); + tmp = PyObject_CallFunctionObjArgs(consume, msg, NULL); Py_DECREF(msg); if (tmp == NULL) { - Dprintf("pq_copy_both: consume_func returned NULL"); + Dprintf("pq_copy_both: consume returned NULL"); goto exit; } Py_DECREF(tmp); @@ -1818,7 +1817,6 @@ pq_copy_both(replicationCursorObject *repl, PyObject *consume, int decode, ret = 1; exit: - Py_XDECREF(consume_func); return ret; } From e69dafbeccf4a1ff096759bd531fd771955592da Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Fri, 23 Oct 2015 11:31:55 +0200 Subject: [PATCH 047/151] Move the `decode` parameter to `start_replication()`. It makes more sense this way, because otherwise it must be passed to every call of `read_message()`. --- doc/src/extras.rst | 73 ++++++++++++++++++------------- lib/extras.py | 4 +- psycopg/pqpath.c | 12 +++-- psycopg/pqpath.h | 4 +- psycopg/replication_cursor.h | 7 +-- psycopg/replication_cursor_type.c | 41 +++++++---------- 6 files changed, 71 insertions(+), 70 deletions(-) diff --git a/doc/src/extras.rst b/doc/src/extras.rst index 2a7bed26..7df68a77 100644 --- a/doc/src/extras.rst +++ b/doc/src/extras.rst @@ -185,10 +185,10 @@ replication:: connection_factory=psycopg2.extras.LogicalReplicationConnection) cur = conn.cursor() try: - cur.start_replication(slot_name='pytest') + cur.start_replication(slot_name='pytest', decode=True) # test_decoding produces textual output except psycopg2.ProgrammingError: cur.create_replication_slot('pytest', output_plugin='test_decoding') - cur.start_replication(slot_name='pytest') + cur.start_replication(slot_name='pytest', decode=True) class DemoConsumer(object): def __call__(self, msg): @@ -260,9 +260,12 @@ The individual messages in the replication stream are represented by .. attribute:: payload - The actual data received from the server. An instance of either - ``str`` or ``unicode``, depending on the method that was used to - produce this message. + The actual data received from the server. + + An instance of either `bytes()` or `unicode()`, depending on the value + of `decode` option passed to `ReplicationCursor.start_replication()` + on the connection. See `ReplicationCursor.read_message()` for + details. .. attribute:: data_size @@ -336,7 +339,7 @@ The individual messages in the replication stream are represented by Replication slots are a feature of PostgreSQL server starting with version 9.4. - .. method:: start_replication(slot_name=None, slot_type=None, start_lsn=0, timeline=0, options=None) + .. method:: start_replication(slot_name=None, slot_type=None, start_lsn=0, timeline=0, options=None, decode=False) Start replication on the connection. @@ -352,6 +355,8 @@ The individual messages in the replication stream are represented by can only be used with physical replication) :param options: a dictionary of options to pass to logical replication slot (not allowed with physical replication) + :param decode: a flag indicating that unicode conversion should be + performed on messages received from the server If a *slot_name* is specified, the slot must exist on the server and its type must match the replication type used. @@ -387,6 +392,11 @@ The individual messages in the replication stream are represented by on the output plugin that was used to create the slot. Must be `!None` for physical replication. + If *decode* is set to `!True` the messages received from the server + would be converted according to the connection `~connection.encoding`. + *This parameter should not be set with physical replication or with + logical replication plugins that produce binary output.* + This function constructs a ``START_REPLICATION`` command and calls `start_replication_expert()` internally. @@ -395,43 +405,40 @@ The individual messages in the replication stream are represented by `read_message()` in case of :ref:`asynchronous connection `. - .. method:: start_replication_expert(command) + .. method:: start_replication_expert(command, decode=False) - Start replication on the connection using provided ``START_REPLICATION`` - command. + Start replication on the connection using provided + ``START_REPLICATION`` command. See `start_replication()` for + description of *decode* parameter. - .. method:: consume_stream(consume, decode=False, keepalive_interval=10) + .. method:: consume_stream(consume, keepalive_interval=10) :param consume: a callable object with signature ``consume(msg)`` - :param decode: a flag indicating that unicode conversion should be - performed on the messages received from the server :param keepalive_interval: interval (in seconds) to send keepalive messages to the server This method can only be used with synchronous connection. For asynchronous connections see `read_message()`. - Before calling this method to consume the stream use + Before using this method to consume the stream call `start_replication()` first. This method enters an endless loop reading messages from the server - and passing them to ``consume()``, then waiting for more messages from - the server. In order to make this method break out of the loop and - return, ``consume()`` can throw a `StopReplication` exception. Any - unhandled exception will make it break out of the loop as well. + and passing them to ``consume()`` one at a time, then waiting for more + messages from the server. In order to make this method break out of + the loop and return, ``consume()`` can throw a `StopReplication` + exception. Any unhandled exception will make it break out of the loop + as well. - If *decode* is set to `!True` the messages read from the server are - converted according to the connection `~connection.encoding`. This - parameter should not be set with physical replication. + The *msg* object passed to ``consume()`` is an instance of + `ReplicationMessage` class. See `read_message()` for details about + message decoding. This method also sends keepalive messages to the server in case there were no new data from the server for the duration of *keepalive_interval* (in seconds). The value of this parameter must be set to at least 1 second, but it can have a fractional part. - The *msg* objects passed to ``consume()`` are instances of - `ReplicationMessage` class. - After processing certain amount of messages the client should send a confirmation message to the server. This should be done by calling `send_feedback()` method on the corresponding replication cursor. A @@ -452,7 +459,7 @@ The individual messages in the replication stream are represented by msg.cursor.send_feedback(flush_lsn=msg.data_start) consumer = LogicalStreamConsumer() - cur.consume_stream(consumer, decode=True) + cur.consume_stream(consumer) .. warning:: @@ -510,17 +517,21 @@ The individual messages in the replication stream are represented by for better control, in particular to `~select` on multiple sockets. The following methods are provided for asynchronous operation: - .. method:: read_message(decode=True) + .. method:: read_message() - :param decode: a flag indicating that unicode conversion should be - performed on the data received from the server + Try to read the next message from the server without blocking and + return an instance of `ReplicationMessage` or `!None`, in case there + are no more data messages from the server at the moment. This method should be used in a loop with asynchronous connections - after calling `start_replication()` once. + (after calling `start_replication()` once). For synchronous + connections see `consume_stream()`. - It tries to read the next message from the server without blocking and - returns an instance of `ReplicationMessage` or `!None`, in case there - are no more data messages from the server at the moment. + The returned message's `ReplicationMessage.payload` is an instance of + `unicode()` decoded according to connection `connection.encoding` + *iff* `decode` was set to `!True` in the initial call to + `start_replication()` on this connection, otherwise it is an instance + of `bytes()` with no decoding. It is expected that the calling code will call this method repeatedly in order to consume all of the messages that might have been buffered diff --git a/lib/extras.py b/lib/extras.py index 8e1373c1..8a8d34ff 100644 --- a/lib/extras.py +++ b/lib/extras.py @@ -548,7 +548,7 @@ class ReplicationCursor(_replicationCursor): self.execute(command) def start_replication(self, slot_name=None, slot_type=None, start_lsn=0, - timeline=0, options=None): + timeline=0, options=None, decode=False): """Start replication stream.""" command = "START_REPLICATION " @@ -597,7 +597,7 @@ class ReplicationCursor(_replicationCursor): command += "%s %s" % (quote_ident(k, self), _A(str(v))) command += ")" - self.start_replication_expert(command) + self.start_replication_expert(command, decode=decode) # allows replication cursors to be used in select.select() directly def fileno(self): diff --git a/psycopg/pqpath.c b/psycopg/pqpath.c index 30a3d394..424ed901 100644 --- a/psycopg/pqpath.c +++ b/psycopg/pqpath.c @@ -1543,7 +1543,7 @@ exit: are never returned to the caller. */ PyObject * -pq_read_replication_message(replicationCursorObject *repl, int decode) +pq_read_replication_message(replicationCursorObject *repl) { cursorObject *curs = &repl->cur; connectionObject *conn = curs->conn; @@ -1555,7 +1555,7 @@ pq_read_replication_message(replicationCursorObject *repl, int decode) PyObject *str = NULL, *result = NULL; replicationMessageObject *msg = NULL; - Dprintf("pq_read_replication_message(decode=%d)", decode); + Dprintf("pq_read_replication_message"); consumed = 0; retry: @@ -1629,8 +1629,7 @@ retry: Dprintf("pq_read_replication_message: >>%.*s<<", data_size, buffer + hdr); - /* XXX it would be wise to check if it's really a logical replication */ - if (decode) { + if (repl->decode) { str = PyUnicode_Decode(buffer + hdr, data_size, conn->codec, NULL); } else { str = Bytes_FromStringAndSize(buffer + hdr, data_size); @@ -1730,8 +1729,7 @@ pq_send_replication_feedback(replicationCursorObject *repl, int reply_requested) manages to send keepalive messages to the server as needed. */ int -pq_copy_both(replicationCursorObject *repl, PyObject *consume, int decode, - double keepalive_interval) +pq_copy_both(replicationCursorObject *repl, PyObject *consume, double keepalive_interval) { cursorObject *curs = &repl->cur; connectionObject *conn = curs->conn; @@ -1752,7 +1750,7 @@ pq_copy_both(replicationCursorObject *repl, PyObject *consume, int decode, keep_intr.tv_usec = (keepalive_interval - keep_intr.tv_sec)*1.0e6; while (1) { - msg = pq_read_replication_message(repl, decode); + msg = pq_read_replication_message(repl); if (!msg) { goto exit; } diff --git a/psycopg/pqpath.h b/psycopg/pqpath.h index 568f0768..1348d9c4 100644 --- a/psycopg/pqpath.h +++ b/psycopg/pqpath.h @@ -75,8 +75,8 @@ RAISES HIDDEN void pq_complete_error(connectionObject *conn, PGresult **pgres, /* replication protocol support */ HIDDEN int pq_copy_both(replicationCursorObject *repl, PyObject *consumer, - int decode, double keepalive_interval); -HIDDEN PyObject *pq_read_replication_message(replicationCursorObject *repl, int decode); + double keepalive_interval); +HIDDEN PyObject *pq_read_replication_message(replicationCursorObject *repl); HIDDEN int pq_send_replication_feedback(replicationCursorObject *repl, int reply_requested); #endif /* !defined(PSYCOPG_PQPATH_H) */ diff --git a/psycopg/replication_cursor.h b/psycopg/replication_cursor.h index 1b6dbfab..07bf7b54 100644 --- a/psycopg/replication_cursor.h +++ b/psycopg/replication_cursor.h @@ -38,10 +38,11 @@ extern HIDDEN PyTypeObject replicationCursorType; typedef struct replicationCursorObject { cursorObject cur; - int started:1; /* if replication is started */ - int consuming:1; /* if running the consume loop */ + int started:1; /* if replication is started */ + int consuming:1; /* if running the consume loop */ + int decode:1; /* if we should use character decoding on the messages */ - struct timeval last_io; /* timestamp of the last exchange with the server */ + struct timeval last_io ; /* timestamp of the last exchange with the server */ struct timeval keepalive_interval; /* interval for keepalive messages in replication mode */ XLogRecPtr write_lsn; /* LSN stats for replication feedback messages */ diff --git a/psycopg/replication_cursor_type.c b/psycopg/replication_cursor_type.c index d1f7939a..1fd5ea39 100644 --- a/psycopg/replication_cursor_type.c +++ b/psycopg/replication_cursor_type.c @@ -38,8 +38,8 @@ #include "datetime.h" -#define psyco_repl_curs_start_replication_expert_doc \ -"start_replication_expert(command, writer=None, keepalive_interval=10) -- Start replication stream with a directly given command." +#define psyco_repl_curs_start_replication_expert_doc \ +"start_replication_expert(command, decode=False) -- Start replication with a given command." static PyObject * psyco_repl_curs_start_replication_expert(replicationCursorObject *self, @@ -49,9 +49,10 @@ psyco_repl_curs_start_replication_expert(replicationCursorObject *self, connectionObject *conn = self->cur.conn; PyObject *res = NULL; char *command; - static char *kwlist[] = {"command", NULL}; + long int decode = 0; + static char *kwlist[] = {"command", "decode", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s", kwlist, &command)) { + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s|l", kwlist, &command, &decode)) { return NULL; } @@ -60,17 +61,15 @@ psyco_repl_curs_start_replication_expert(replicationCursorObject *self, EXC_IF_TPC_PREPARED(conn, start_replication_expert); EXC_IF_REPLICATING(self, start_replication_expert); - Dprintf("psyco_repl_curs_start_replication_expert: %s", command); - - /* self->copysize = 0;*/ - - gettimeofday(&self->last_io, NULL); + Dprintf("psyco_repl_curs_start_replication_expert: '%s'; decode: %d", command, decode); if (pq_execute(curs, command, conn->async, 1 /* no_result */, 1 /* no_begin */) >= 0) { res = Py_None; Py_INCREF(res); self->started = 1; + self->decode = decode; + gettimeofday(&self->last_io, NULL); } return res; @@ -85,12 +84,11 @@ psyco_repl_curs_consume_stream(replicationCursorObject *self, { cursorObject *curs = &self->cur; PyObject *consume = NULL, *res = NULL; - int decode = 0; double keepalive_interval = 10; - static char *kwlist[] = {"consume", "decode", "keepalive_interval", NULL}; + static char *kwlist[] = {"consume", "keepalive_interval", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|id", kwlist, - &consume, &decode, &keepalive_interval)) { + &consume, &keepalive_interval)) { return NULL; } @@ -115,7 +113,7 @@ psyco_repl_curs_consume_stream(replicationCursorObject *self, self->consuming = 1; - if (pq_copy_both(self, consume, decode, keepalive_interval) >= 0) { + if (pq_copy_both(self, consume, keepalive_interval) >= 0) { res = Py_None; Py_INCREF(res); } @@ -126,27 +124,19 @@ psyco_repl_curs_consume_stream(replicationCursorObject *self, } #define psyco_repl_curs_read_message_doc \ -"read_message(decode=True) -- Try reading a replication message from the server (non-blocking)." +"read_message() -- Try reading a replication message from the server (non-blocking)." static PyObject * -psyco_repl_curs_read_message(replicationCursorObject *self, - PyObject *args, PyObject *kwargs) +psyco_repl_curs_read_message(replicationCursorObject *self) { cursorObject *curs = &self->cur; - int decode = 1; - static char *kwlist[] = {"decode", NULL}; EXC_IF_CURS_CLOSED(curs); EXC_IF_GREEN(read_message); EXC_IF_TPC_PREPARED(self->cur.conn, read_message); EXC_IF_NOT_REPLICATING(self, read_message); - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", kwlist, - &decode)) { - return NULL; - } - - return pq_read_replication_message(self, decode); + return pq_read_replication_message(self); } static PyObject * @@ -267,7 +257,7 @@ static struct PyMethodDef replicationCursorObject_methods[] = { {"consume_stream", (PyCFunction)psyco_repl_curs_consume_stream, METH_VARARGS|METH_KEYWORDS, psyco_repl_curs_consume_stream_doc}, {"read_message", (PyCFunction)psyco_repl_curs_read_message, - METH_VARARGS|METH_KEYWORDS, psyco_repl_curs_read_message_doc}, + METH_NOARGS, psyco_repl_curs_read_message_doc}, {"send_feedback", (PyCFunction)psyco_repl_curs_send_feedback, METH_VARARGS|METH_KEYWORDS, psyco_repl_curs_send_feedback_doc}, {"flush_feedback", (PyCFunction)psyco_repl_curs_flush_feedback, @@ -289,6 +279,7 @@ replicationCursor_setup(replicationCursorObject* self) { self->started = 0; self->consuming = 0; + self->decode = 0; self->write_lsn = InvalidXLogRecPtr; self->flush_lsn = InvalidXLogRecPtr; From dd6bcbd04fc9714ac87b827af12647590ef131a1 Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Fri, 23 Oct 2015 17:51:03 +0200 Subject: [PATCH 048/151] Improve async replication example. --- doc/src/extras.rst | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/doc/src/extras.rst b/doc/src/extras.rst index 7df68a77..bd13a782 100644 --- a/doc/src/extras.rst +++ b/doc/src/extras.rst @@ -560,6 +560,9 @@ The individual messages in the replication stream are represented by An actual example of asynchronous operation might look like this:: + from select import select + from datetime import datetime + def consume(msg): ... @@ -571,14 +574,12 @@ The individual messages in the replication stream are represented by else: now = datetime.now() timeout = keepalive_interval - (now - cur.io_timestamp).total_seconds() - if timeout > 0: - sel = select.select([cur], [], [], timeout) - else: - sel = ([], [], []) - - if not sel[0]: - # timed out, send keepalive message - cur.send_feedback() + try: + sel = select([cur], [], [], max(0, timeout)) + if not any(sel): + cur.send_feedback() # timed out, send keepalive message + except InterruptedError: + pass # recalculate timeout and continue .. index:: pair: Cursor; Replication From 8b79bf43ace9b7d09f16b4c829c96a6c1784dacf Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Fri, 23 Oct 2015 18:30:18 +0200 Subject: [PATCH 049/151] Drop ReplicationCursor.flush_feedback(), rectify pq_*_replication_*() interface. --- doc/src/extras.rst | 16 ------- psycopg/libpq_support.h | 2 - psycopg/pqpath.c | 69 +++++++++++++------------------ psycopg/pqpath.h | 6 ++- psycopg/replication_cursor.h | 3 +- psycopg/replication_cursor_type.c | 60 +++++++-------------------- tests/test_replication.py | 2 +- 7 files changed, 49 insertions(+), 109 deletions(-) diff --git a/doc/src/extras.rst b/doc/src/extras.rst index bd13a782..58b0dc07 100644 --- a/doc/src/extras.rst +++ b/doc/src/extras.rst @@ -492,22 +492,6 @@ The individual messages in the replication stream are represented by This method can also be called with all default parameters' values to just send a keepalive message to the server. - If the feedback message could not be sent, updates the passed LSN - positions in the cursor for a later call to `flush_feedback()` and - returns `!False`, otherwise returns `!True`. - - .. method:: flush_feedback(reply=False) - - :param reply: request the server to send back a keepalive message immediately - - This method tries to flush the latest replication feedback message - that `send_feedback()` was trying to send but couldn't. - - If *reply* is `!True` sends a keepalive message in either case. - - Returns `!True` if the feedback message was sent successfully, - `!False` otherwise. - Low-level replication cursor methods for :ref:`asynchronous connection ` operation. diff --git a/psycopg/libpq_support.h b/psycopg/libpq_support.h index c7139463..77d7ab12 100644 --- a/psycopg/libpq_support.h +++ b/psycopg/libpq_support.h @@ -31,8 +31,6 @@ /* type and constant definitions from internal postgres includes not available otherwise */ typedef unsigned PG_INT64_TYPE XLogRecPtr; -#define InvalidXLogRecPtr ((XLogRecPtr) 0) - /* have to use lowercase %x, as PyString_FromFormat can't do %X */ #define XLOGFMTSTR "%x/%x" #define XLOGFMTARGS(x) ((uint32)((x) >> 32)), ((uint32)((x) & 0xFFFFFFFF)) diff --git a/psycopg/pqpath.c b/psycopg/pqpath.c index 424ed901..63154172 100644 --- a/psycopg/pqpath.c +++ b/psycopg/pqpath.c @@ -1542,8 +1542,8 @@ exit: Any keepalive messages from the server are silently consumed and are never returned to the caller. */ -PyObject * -pq_read_replication_message(replicationCursorObject *repl) +int +pq_read_replication_message(replicationCursorObject *repl, replicationMessageObject **msg) { cursorObject *curs = &repl->cur; connectionObject *conn = curs->conn; @@ -1553,18 +1553,21 @@ pq_read_replication_message(replicationCursorObject *repl) XLogRecPtr data_start, wal_end; pg_int64 send_time; PyObject *str = NULL, *result = NULL; - replicationMessageObject *msg = NULL; + int ret = -1; Dprintf("pq_read_replication_message"); + *msg = NULL; consumed = 0; + retry: len = PQgetCopyData(pgconn, &buffer, 1 /* async */); if (len == 0) { /* If we've tried reading some data, but there was none, bail out. */ if (consumed) { - goto none; + ret = 0; + goto exit; } /* We should only try reading more data when there is nothing available at the moment. Otherwise, with a really highly loaded @@ -1599,7 +1602,8 @@ retry: } CLEARPGRES(curs->pgres); - goto none; + ret = 0; + goto exit; } /* It also makes sense to set this flag here to make us return early in @@ -1641,11 +1645,11 @@ retry: Py_DECREF(str); if (!result) { goto exit; } - msg = (replicationMessageObject *)result; - msg->data_size = data_size; - msg->data_start = data_start; - msg->wal_end = wal_end; - msg->send_time = send_time; + *msg = (replicationMessageObject *)result; + (*msg)->data_size = data_size; + (*msg)->data_start = data_start; + (*msg)->wal_end = wal_end; + (*msg)->send_time = send_time; } else if (buffer[0] == 'k') { /* Primary keepalive message: msgtype(1), walEnd(8), sendTime(8), reply(1) */ @@ -1656,19 +1660,8 @@ retry: } reply = buffer[hdr]; - if (reply) { - if (!pq_send_replication_feedback(repl, 0)) { - if (conn->async) { - repl->feedback_pending = 1; - } else { - /* XXX not sure if this was a good idea after all */ - pq_raise(conn, curs, NULL); - goto exit; - } - } - else { - gettimeofday(&repl->last_io, NULL); - } + if (reply && pq_send_replication_feedback(repl, 0) < 0) { + goto exit; } PQfreemem(buffer); @@ -1680,24 +1673,22 @@ retry: goto exit; } + ret = 0; + exit: if (buffer) { PQfreemem(buffer); } - return result; - -none: - result = Py_None; - Py_INCREF(result); - goto exit; + return ret; } int pq_send_replication_feedback(replicationCursorObject *repl, int reply_requested) { cursorObject *curs = &repl->cur; - PGconn *pgconn = curs->conn->pgconn; + connectionObject *conn = curs->conn; + PGconn *pgconn = conn->pgconn; char replybuf[1 + 8 + 8 + 8 + 8 + 1]; int len = 0; @@ -1714,11 +1705,12 @@ pq_send_replication_feedback(replicationCursorObject *repl, int reply_requested) replybuf[len] = reply_requested ? 1 : 0; len += 1; if (PQputCopyData(pgconn, replybuf, len) <= 0 || PQflush(pgconn) != 0) { - return 0; + pq_raise(conn, curs, NULL); + return -1; } gettimeofday(&repl->last_io, NULL); - return 1; + return 0; } /* Calls pq_read_replication_message in an endless loop, until @@ -1734,7 +1726,8 @@ pq_copy_both(replicationCursorObject *repl, PyObject *consume, double keepalive_ cursorObject *curs = &repl->cur; connectionObject *conn = curs->conn; PGconn *pgconn = conn->pgconn; - PyObject *msg, *tmp = NULL; + replicationMessageObject *msg = NULL; + PyObject *tmp = NULL; int fd, sel, ret = -1; fd_set fds; struct timeval keep_intr, curr_time, ping_time, timeout; @@ -1750,13 +1743,10 @@ pq_copy_both(replicationCursorObject *repl, PyObject *consume, double keepalive_ keep_intr.tv_usec = (keepalive_interval - keep_intr.tv_sec)*1.0e6; while (1) { - msg = pq_read_replication_message(repl); - if (!msg) { + if (pq_read_replication_message(repl, &msg) < 0) { goto exit; } - else if (msg == Py_None) { - Py_DECREF(msg); - + else if (msg == NULL) { fd = PQsocket(pgconn); if (fd < 0) { pq_raise(conn, curs, NULL); @@ -1793,8 +1783,7 @@ pq_copy_both(replicationCursorObject *repl, PyObject *consume, double keepalive_ } if (sel == 0) { - if (!pq_send_replication_feedback(repl, 0)) { - pq_raise(conn, curs, NULL); + if (pq_send_replication_feedback(repl, 0) < 0) { goto exit; } } diff --git a/psycopg/pqpath.h b/psycopg/pqpath.h index 1348d9c4..5cf22309 100644 --- a/psycopg/pqpath.h +++ b/psycopg/pqpath.h @@ -27,8 +27,9 @@ #define PSYCOPG_PQPATH_H 1 #include "psycopg/cursor.h" -#include "psycopg/replication_cursor.h" #include "psycopg/connection.h" +#include "psycopg/replication_cursor.h" +#include "psycopg/replication_message.h" /* macro to clean the pg result */ #define CLEARPGRES(pgres) do { PQclear(pgres); pgres = NULL; } while (0) @@ -76,7 +77,8 @@ RAISES HIDDEN void pq_complete_error(connectionObject *conn, PGresult **pgres, /* replication protocol support */ HIDDEN int pq_copy_both(replicationCursorObject *repl, PyObject *consumer, double keepalive_interval); -HIDDEN PyObject *pq_read_replication_message(replicationCursorObject *repl); +HIDDEN int pq_read_replication_message(replicationCursorObject *repl, + replicationMessageObject **msg); HIDDEN int pq_send_replication_feedback(replicationCursorObject *repl, int reply_requested); #endif /* !defined(PSYCOPG_PQPATH_H) */ diff --git a/psycopg/replication_cursor.h b/psycopg/replication_cursor.h index 07bf7b54..36ced138 100644 --- a/psycopg/replication_cursor.h +++ b/psycopg/replication_cursor.h @@ -45,10 +45,9 @@ typedef struct replicationCursorObject { struct timeval last_io ; /* timestamp of the last exchange with the server */ struct timeval keepalive_interval; /* interval for keepalive messages in replication mode */ - XLogRecPtr write_lsn; /* LSN stats for replication feedback messages */ + XLogRecPtr write_lsn; /* LSNs for replication feedback messages */ XLogRecPtr flush_lsn; XLogRecPtr apply_lsn; - int feedback_pending; /* flag set when we couldn't send the feedback to the server */ } replicationCursorObject; diff --git a/psycopg/replication_cursor_type.c b/psycopg/replication_cursor_type.c index 1fd5ea39..f652984e 100644 --- a/psycopg/replication_cursor_type.c +++ b/psycopg/replication_cursor_type.c @@ -130,28 +130,21 @@ static PyObject * psyco_repl_curs_read_message(replicationCursorObject *self) { cursorObject *curs = &self->cur; + replicationMessageObject *msg = NULL; EXC_IF_CURS_CLOSED(curs); EXC_IF_GREEN(read_message); EXC_IF_TPC_PREPARED(self->cur.conn, read_message); EXC_IF_NOT_REPLICATING(self, read_message); - return pq_read_replication_message(self); -} - -static PyObject * -repl_curs_flush_feedback(replicationCursorObject *self, int reply) -{ - if (!(self->feedback_pending || reply)) - Py_RETURN_TRUE; - - if (pq_send_replication_feedback(self, reply)) { - self->feedback_pending = 0; - Py_RETURN_TRUE; - } else { - self->feedback_pending = 1; - Py_RETURN_FALSE; + if (pq_read_replication_message(self, &msg) < 0) { + return NULL; } + if (msg) { + return (PyObject *)msg; + } + + Py_RETURN_NONE; } #define psyco_repl_curs_send_feedback_doc \ @@ -162,9 +155,7 @@ psyco_repl_curs_send_feedback(replicationCursorObject *self, PyObject *args, PyObject *kwargs) { cursorObject *curs = &self->cur; - XLogRecPtr write_lsn = InvalidXLogRecPtr, - flush_lsn = InvalidXLogRecPtr, - apply_lsn = InvalidXLogRecPtr; + XLogRecPtr write_lsn = 0, flush_lsn = 0, apply_lsn = 0; int reply = 0; static char* kwlist[] = {"write_lsn", "flush_lsn", "apply_lsn", "reply", NULL}; @@ -185,31 +176,11 @@ psyco_repl_curs_send_feedback(replicationCursorObject *self, if (apply_lsn > self->apply_lsn) self->apply_lsn = apply_lsn; - self->feedback_pending = 1; - - return repl_curs_flush_feedback(self, reply); -} - -#define psyco_repl_curs_flush_feedback_doc \ -"flush_feedback(reply=False) -- Try flushing the latest pending replication feedback message to the server and optionally request a reply." - -static PyObject * -psyco_repl_curs_flush_feedback(replicationCursorObject *self, - PyObject *args, PyObject *kwargs) -{ - cursorObject *curs = &self->cur; - int reply = 0; - static char *kwlist[] = {"reply", NULL}; - - EXC_IF_CURS_CLOSED(curs); - EXC_IF_NOT_REPLICATING(self, flush_feedback); - - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", kwlist, - &reply)) { + if (pq_send_replication_feedback(self, reply) < 0) { return NULL; } - return repl_curs_flush_feedback(self, reply); + Py_RETURN_NONE; } @@ -260,8 +231,6 @@ static struct PyMethodDef replicationCursorObject_methods[] = { METH_NOARGS, psyco_repl_curs_read_message_doc}, {"send_feedback", (PyCFunction)psyco_repl_curs_send_feedback, METH_VARARGS|METH_KEYWORDS, psyco_repl_curs_send_feedback_doc}, - {"flush_feedback", (PyCFunction)psyco_repl_curs_flush_feedback, - METH_VARARGS|METH_KEYWORDS, psyco_repl_curs_flush_feedback_doc}, {NULL} }; @@ -281,10 +250,9 @@ replicationCursor_setup(replicationCursorObject* self) self->consuming = 0; self->decode = 0; - self->write_lsn = InvalidXLogRecPtr; - self->flush_lsn = InvalidXLogRecPtr; - self->apply_lsn = InvalidXLogRecPtr; - self->feedback_pending = 0; + self->write_lsn = 0; + self->flush_lsn = 0; + self->apply_lsn = 0; return 0; } diff --git a/tests/test_replication.py b/tests/test_replication.py index 2dbb0086..4441a266 100644 --- a/tests/test_replication.py +++ b/tests/test_replication.py @@ -157,7 +157,7 @@ class AsyncReplicationTest(ReplicationTestCase): self.msg_count += 1 if self.msg_count > 3: - cur.flush_feedback(reply=True) + cur.send_feedback(reply=True) raise StopReplication() cur.send_feedback(flush_lsn=msg.data_start) From 7aba8b3ed0483c675d757bf52c8ce9456c9aeeb1 Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Tue, 27 Oct 2015 12:54:10 +0100 Subject: [PATCH 050/151] Rework psycopg2.connect() interface. --- doc/src/extensions.rst | 22 +++++ lib/__init__.py | 49 +--------- lib/extensions.py | 3 +- psycopg/psycopg.h | 6 ++ psycopg/psycopgmodule.c | 210 ++++++++++++++++++++++++++++++++++++++-- psycopg/utils.c | 44 +++++++++ tests/test_module.py | 32 ++++-- 7 files changed, 303 insertions(+), 63 deletions(-) diff --git a/doc/src/extensions.rst b/doc/src/extensions.rst index d96cca4f..dcaa2340 100644 --- a/doc/src/extensions.rst +++ b/doc/src/extensions.rst @@ -24,6 +24,28 @@ functionalities defined by the |DBAPI|_. >>> psycopg2.extensions.parse_dsn('dbname=test user=postgres password=secret') {'password': 'secret', 'user': 'postgres', 'dbname': 'test'} +.. function:: make_dsn(**kwargs) + + Wrap keyword parameters into a connection string, applying necessary + quoting and escaping any special characters (namely, single quote and + backslash). + + Example (note the order of parameters in the resulting string is + arbitrary):: + + >>> psycopg2.extensions.make_dsn(dbname='test', user='postgres', password='secret') + 'user=postgres dbname=test password=secret' + + As a special case, the *database* keyword is translated to *dbname*:: + + >>> psycopg2.extensions.make_dsn(database='test') + 'dbname=test' + + An example of quoting (using `print()` for clarity):: + + >>> print(psycopg2.extensions.make_dsn(database='test', password="some\\thing ''special")) + password='some\\thing \'\'special' dbname=test + .. class:: connection(dsn, async=False) Is the class usually returned by the `~psycopg2.connect()` function. diff --git a/lib/__init__.py b/lib/__init__.py index 994b15a8..39dd12e2 100644 --- a/lib/__init__.py +++ b/lib/__init__.py @@ -56,7 +56,7 @@ from psycopg2._psycopg import Error, Warning, DataError, DatabaseError, Programm from psycopg2._psycopg import IntegrityError, InterfaceError, InternalError from psycopg2._psycopg import NotSupportedError, OperationalError -from psycopg2._psycopg import _connect, apilevel, threadsafety, paramstyle +from psycopg2._psycopg import _connect, parse_args, apilevel, threadsafety, paramstyle from psycopg2._psycopg import __version__, __libpq_version__ from psycopg2 import tz @@ -80,27 +80,8 @@ else: _ext.register_adapter(Decimal, Adapter) del Decimal, Adapter -import re - -def _param_escape(s, - re_escape=re.compile(r"([\\'])"), - re_space=re.compile(r'\s')): - """ - Apply the escaping rule required by PQconnectdb - """ - if not s: return "''" - - s = re_escape.sub(r'\\\1', s) - if re_space.search(s): - s = "'" + s + "'" - - return s - -del re - def connect(dsn=None, - database=None, user=None, password=None, host=None, port=None, connection_factory=None, cursor_factory=None, async=False, **kwargs): """ Create a new database connection. @@ -135,33 +116,7 @@ def connect(dsn=None, library: the list of supported parameters depends on the library version. """ - items = [] - if database is not None: - items.append(('dbname', database)) - if user is not None: - items.append(('user', user)) - if password is not None: - items.append(('password', password)) - if host is not None: - items.append(('host', host)) - if port is not None: - items.append(('port', port)) - - items.extend([(k, v) for (k, v) in kwargs.iteritems() if v is not None]) - - if dsn is not None and items: - raise TypeError( - "'%s' is an invalid keyword argument when the dsn is specified" - % items[0][0]) - - if dsn is None: - if not items: - raise TypeError('missing dsn and no parameters') - else: - dsn = " ".join(["%s=%s" % (k, _param_escape(str(v))) - for (k, v) in items]) - - conn = _connect(dsn, connection_factory=connection_factory, async=async) + conn = _connect(dsn, connection_factory, async, **kwargs) if cursor_factory is not None: conn.cursor_factory = cursor_factory diff --git a/lib/extensions.py b/lib/extensions.py index b40e28b8..f99ed939 100644 --- a/lib/extensions.py +++ b/lib/extensions.py @@ -56,7 +56,8 @@ try: except ImportError: pass -from psycopg2._psycopg import adapt, adapters, encodings, connection, cursor, lobject, Xid, libpq_version, parse_dsn, quote_ident +from psycopg2._psycopg import adapt, adapters, encodings, connection, cursor, lobject, Xid, libpq_version +from psycopg2._psycopg import parse_dsn, make_dsn, quote_ident from psycopg2._psycopg import string_types, binary_types, new_type, new_array_type, register_type from psycopg2._psycopg import ISQLQuote, Notify, Diagnostics, Column diff --git a/psycopg/psycopg.h b/psycopg/psycopg.h index eb406fd2..770de7c6 100644 --- a/psycopg/psycopg.h +++ b/psycopg/psycopg.h @@ -119,11 +119,17 @@ typedef struct cursorObject cursorObject; typedef struct connectionObject connectionObject; /* some utility functions */ +HIDDEN PyObject *psyco_parse_args(PyObject *self, PyObject *args, PyObject *kwargs); +HIDDEN PyObject *psyco_parse_dsn(PyObject *self, PyObject *args, PyObject *kwargs); +HIDDEN PyObject *psyco_make_dsn(PyObject *self, PyObject *args, PyObject *kwargs); + RAISES HIDDEN PyObject *psyco_set_error(PyObject *exc, cursorObject *curs, const char *msg); HIDDEN char *psycopg_escape_string(connectionObject *conn, const char *from, Py_ssize_t len, char *to, Py_ssize_t *tolen); HIDDEN char *psycopg_escape_identifier_easy(const char *from, Py_ssize_t len); +HIDDEN char *psycopg_escape_conninfo(const char *from, Py_ssize_t len); + HIDDEN int psycopg_strdup(char **to, const char *from, Py_ssize_t len); HIDDEN int psycopg_is_text_file(PyObject *f); diff --git a/psycopg/psycopgmodule.c b/psycopg/psycopgmodule.c index cf70a4ad..03b115d0 100644 --- a/psycopg/psycopgmodule.c +++ b/psycopg/psycopgmodule.c @@ -70,24 +70,104 @@ HIDDEN PyObject *psyco_null = NULL; /* The type of the cursor.description items */ HIDDEN PyObject *psyco_DescriptionType = NULL; + +/* finds a keyword or positional arg (pops it from kwargs if found there) */ +static PyObject * +parse_arg(int pos, char *name, PyObject *defval, PyObject *args, PyObject *kwargs) +{ + Py_ssize_t nargs = PyTuple_GET_SIZE(args); + PyObject *val = NULL; + + if (kwargs && PyMapping_HasKeyString(kwargs, name)) { + val = PyMapping_GetItemString(kwargs, name); + Py_XINCREF(val); + PyMapping_DelItemString(kwargs, name); /* pop from the kwargs dict! */ + } + if (nargs > pos) { + if (!val) { + val = PyTuple_GET_ITEM(args, pos); + Py_XINCREF(val); + } else { + PyErr_Format(PyExc_TypeError, + "parse_args() got multiple values for keyword argument '%s'", name); + return NULL; + } + } + if (!val) { + val = defval; + Py_XINCREF(val); + } + + return val; +} + + +#define psyco_parse_args_doc \ +"parse_args(...) -- parse connection parameters.\n\n" \ +"Return a tuple of (dsn, connection_factory, async)" + +PyObject * +psyco_parse_args(PyObject *self, PyObject *args, PyObject *kwargs) +{ + Py_ssize_t nargs = PyTuple_GET_SIZE(args); + PyObject *dsn = NULL; + PyObject *factory = NULL; + PyObject *async = NULL; + PyObject *res = NULL; + + if (nargs > 3) { + PyErr_Format(PyExc_TypeError, + "parse_args() takes at most 3 arguments (%d given)", (int)nargs); + goto exit; + } + /* parse and remove all keywords we know, so they are not interpreted as part of DSN */ + if (!(dsn = parse_arg(0, "dsn", Py_None, args, kwargs))) { goto exit; } + if (!(factory = parse_arg(1, "connection_factory", Py_None, + args, kwargs))) { goto exit; } + if (!(async = parse_arg(2, "async", Py_False, args, kwargs))) { goto exit; } + + if (kwargs && PyMapping_Size(kwargs) > 0) { + if (dsn == Py_None) { + Py_DECREF(dsn); + if (!(dsn = psyco_make_dsn(NULL, NULL, kwargs))) { goto exit; } + } else { + PyErr_SetString(PyExc_TypeError, "both dsn and parameters given"); + goto exit; + } + } else { + if (dsn == Py_None) { + PyErr_SetString(PyExc_TypeError, "missing dsn and no parameters"); + goto exit; + } + } + + res = PyTuple_Pack(3, dsn, factory, async); + +exit: + Py_XDECREF(dsn); + Py_XDECREF(factory); + Py_XDECREF(async); + + return res; +} + + /** connect module-level function **/ #define psyco_connect_doc \ -"_connect(dsn, [connection_factory], [async]) -- New database connection.\n\n" +"_connect(dsn, [connection_factory], [async], **kwargs) -- New database connection.\n\n" static PyObject * psyco_connect(PyObject *self, PyObject *args, PyObject *keywds) { PyObject *conn = NULL; + PyObject *tuple = NULL; PyObject *factory = NULL; const char *dsn = NULL; int async = 0; - static char *kwlist[] = {"dsn", "connection_factory", "async", NULL}; + if (!(tuple = psyco_parse_args(self, args, keywds))) { goto exit; } - if (!PyArg_ParseTupleAndKeywords(args, keywds, "s|Oi", kwlist, - &dsn, &factory, &async)) { - return NULL; - } + if (!PyArg_ParseTuple(tuple, "s|Oi", &dsn, &factory, &async)) { goto exit; } Dprintf("psyco_connect: dsn = '%s', async = %d", dsn, async); @@ -109,12 +189,16 @@ psyco_connect(PyObject *self, PyObject *args, PyObject *keywds) conn = PyObject_CallFunction(factory, "si", dsn, async); } +exit: + Py_XDECREF(tuple); + return conn; } + #define psyco_parse_dsn_doc "parse_dsn(dsn) -> dict" -static PyObject * +PyObject * psyco_parse_dsn(PyObject *self, PyObject *args, PyObject *kwargs) { char *err = NULL; @@ -166,6 +250,114 @@ exit: } +#define psyco_make_dsn_doc "make_dsn(**kwargs) -> str" + +PyObject * +psyco_make_dsn(PyObject *self, PyObject *args, PyObject *kwargs) +{ + Py_ssize_t len, pos; + PyObject *res = NULL; + PyObject *key = NULL, *value = NULL; + PyObject *newkey, *newval; + PyObject *dict = NULL; + char *str = NULL, *p, *q; + + if (args && (len = PyTuple_Size(args)) > 0) { + PyErr_Format(PyExc_TypeError, "make_dsn() takes no arguments (%d given)", (int)len); + goto exit; + } + if (kwargs == NULL) { + return Text_FromUTF8(""); + } + + /* iterate through kwargs, calculating the total resulting string + length and saving prepared key/values to a temp. dict */ + if (!(dict = PyDict_New())) { goto exit; } + + len = 0; + pos = 0; + while (PyDict_Next(kwargs, &pos, &key, &value)) { + if (value == NULL || value == Py_None) { continue; } + + Py_INCREF(key); /* for ensure_bytes */ + if (!(newkey = psycopg_ensure_bytes(key))) { goto exit; } + + /* special handling of 'database' keyword */ + if (strcmp(Bytes_AsString(newkey), "database") == 0) { + key = Bytes_FromString("dbname"); + Py_DECREF(newkey); + } else { + key = newkey; + } + + /* now transform the value */ + if (Bytes_CheckExact(value)) { + Py_INCREF(value); + } else if (PyUnicode_CheckExact(value)) { + if (!(value = PyUnicode_AsUTF8String(value))) { goto exit; } + } else { + /* this could be port=5432, so we need to get the text representation */ + if (!(value = PyObject_Str(value))) { goto exit; } + /* and still ensure it's bytes() (but no need to incref here) */ + if (!(value = psycopg_ensure_bytes(value))) { goto exit; } + } + + /* passing NULL for plen checks for NIL bytes in content and errors out */ + if (Bytes_AsStringAndSize(value, &str, NULL) < 0) { goto exit; } + /* escape any special chars */ + if (!(str = psycopg_escape_conninfo(str, 0))) { goto exit; } + if (!(newval = Bytes_FromString(str))) { + goto exit; + } + PyMem_Free(str); + str = NULL; + Py_DECREF(value); + value = newval; + + /* finally put into the temp. dict */ + if (PyDict_SetItem(dict, key, value) < 0) { goto exit; } + + len += Bytes_GET_SIZE(key) + Bytes_GET_SIZE(value) + 2; /* =, space or NIL */ + + Py_DECREF(key); + Py_DECREF(value); + } + key = NULL; + value = NULL; + + if (!(str = PyMem_Malloc(len))) { + PyErr_NoMemory(); + goto exit; + } + + p = str; + pos = 0; + while (PyDict_Next(dict, &pos, &newkey, &newval)) { + if (p != str) { + *(p++) = ' '; + } + if (Bytes_AsStringAndSize(newkey, &q, &len) < 0) { goto exit; } + strncpy(p, q, len); + p += len; + *(p++) = '='; + if (Bytes_AsStringAndSize(newval, &q, &len) < 0) { goto exit; } + strncpy(p, q, len); + p += len; + } + *p = '\0'; + + res = Text_FromUTF8AndSize(str, p - str); + +exit: + PyMem_Free(str); + Py_XDECREF(key); + Py_XDECREF(value); + Py_XDECREF(dict); + + return res; +} + + #define psyco_quote_ident_doc \ "quote_ident(str, conn_or_curs) -> str -- wrapper around PQescapeIdentifier\n\n" \ ":Parameters:\n" \ @@ -820,8 +1012,12 @@ error: static PyMethodDef psycopgMethods[] = { {"_connect", (PyCFunction)psyco_connect, METH_VARARGS|METH_KEYWORDS, psyco_connect_doc}, + {"parse_args", (PyCFunction)psyco_parse_args, + METH_VARARGS|METH_KEYWORDS, psyco_parse_args_doc}, {"parse_dsn", (PyCFunction)psyco_parse_dsn, METH_VARARGS|METH_KEYWORDS, psyco_parse_dsn_doc}, + {"make_dsn", (PyCFunction)psyco_make_dsn, + METH_VARARGS|METH_KEYWORDS, psyco_make_dsn_doc}, {"quote_ident", (PyCFunction)psyco_quote_ident, METH_VARARGS|METH_KEYWORDS, psyco_quote_ident_doc}, {"adapt", (PyCFunction)psyco_microprotocols_adapt, diff --git a/psycopg/utils.c b/psycopg/utils.c index ec8e47c8..e9dc3ba6 100644 --- a/psycopg/utils.c +++ b/psycopg/utils.c @@ -124,6 +124,50 @@ psycopg_escape_identifier_easy(const char *from, Py_ssize_t len) return rv; } +char * +psycopg_escape_conninfo(const char *from, Py_ssize_t len) +{ + char *rv = NULL; + const char *src; + const char *end; + char *dst; + int space = 0; + + if (!len) { len = strlen(from); } + end = from + len; + + if (!(rv = PyMem_Malloc(3 + 2 * len))) { + PyErr_NoMemory(); + return NULL; + } + + /* check for any whitespace or empty string */ + if (from < end && *from) { + for (src = from; src < end && *src; ++src) { + if (isspace(*src)) { + space = 1; + break; + } + } + } else { + /* empty string: we should produce '' */ + space = 1; + } + + dst = rv; + if (space) { *(dst++) = '\''; } + /* scan and copy */ + for (src = from; src < end && *src; ++src, ++dst) { + if (*src == '\'' || *src == '\\') + *(dst++) = '\\'; + *dst = *src; + } + if (space) { *(dst++) = '\''; } + *dst = '\0'; + + return rv; +} + /* Duplicate a string. * * Allocate a new buffer on the Python heap containing the new string. diff --git a/tests/test_module.py b/tests/test_module.py index 62b85ee2..528f79c5 100755 --- a/tests/test_module.py +++ b/tests/test_module.py @@ -34,11 +34,11 @@ import psycopg2 class ConnectTestCase(unittest.TestCase): def setUp(self): self.args = None - def conect_stub(dsn, connection_factory=None, async=False): - self.args = (dsn, connection_factory, async) + def connect_stub(*args, **kwargs): + self.args = psycopg2.parse_args(*args, **kwargs) self._connect_orig = psycopg2._connect - psycopg2._connect = conect_stub + psycopg2._connect = connect_stub def tearDown(self): psycopg2._connect = self._connect_orig @@ -91,29 +91,45 @@ class ConnectTestCase(unittest.TestCase): pass psycopg2.connect(database='foo', bar='baz', connection_factory=f) - self.assertEqual(self.args[0], 'dbname=foo bar=baz') + dsn = " %s " % self.args[0] + self.assertIn(" dbname=foo ", dsn) + self.assertIn(" bar=baz ", dsn) self.assertEqual(self.args[1], f) self.assertEqual(self.args[2], False) psycopg2.connect("dbname=foo bar=baz", connection_factory=f) - self.assertEqual(self.args[0], 'dbname=foo bar=baz') + dsn = " %s " % self.args[0] + self.assertIn(" dbname=foo ", dsn) + self.assertIn(" bar=baz ", dsn) self.assertEqual(self.args[1], f) self.assertEqual(self.args[2], False) def test_async(self): psycopg2.connect(database='foo', bar='baz', async=1) - self.assertEqual(self.args[0], 'dbname=foo bar=baz') + dsn = " %s " % self.args[0] + self.assertIn(" dbname=foo ", dsn) + self.assertIn(" bar=baz ", dsn) self.assertEqual(self.args[1], None) self.assert_(self.args[2]) psycopg2.connect("dbname=foo bar=baz", async=True) - self.assertEqual(self.args[0], 'dbname=foo bar=baz') + dsn = " %s " % self.args[0] + self.assertIn(" dbname=foo ", dsn) + self.assertIn(" bar=baz ", dsn) self.assertEqual(self.args[1], None) self.assert_(self.args[2]) + def test_int_port_param(self): + psycopg2.connect(database='sony', port=6543) + dsn = " %s " % self.args[0] + self.assertIn(" dbname=sony ", dsn) + self.assertIn(" port=6543 ", dsn) + def test_empty_param(self): psycopg2.connect(database='sony', password='') - self.assertEqual(self.args[0], "dbname=sony password=''") + dsn = " %s " % self.args[0] + self.assertIn(" dbname=sony ", dsn) + self.assertIn(" password='' ", dsn) def test_escape(self): psycopg2.connect(database='hello world') From fbcf99ad070a3eae67c258d357ab86bda29793fd Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Tue, 27 Oct 2015 18:21:24 +0100 Subject: [PATCH 051/151] Move replication connection to C level. --- lib/extensions.py | 3 +- lib/extras.py | 57 ++----- psycopg/psycopg.h | 1 + psycopg/psycopgmodule.c | 12 +- psycopg/replication_connection.h | 53 +++++++ psycopg/replication_connection_type.c | 210 ++++++++++++++++++++++++++ psycopg2.cproj | 2 + setup.py | 8 +- tests/testutils.py | 3 - 9 files changed, 296 insertions(+), 53 deletions(-) create mode 100644 psycopg/replication_connection.h create mode 100644 psycopg/replication_connection_type.c diff --git a/lib/extensions.py b/lib/extensions.py index ad0f31e0..fb91c0f2 100644 --- a/lib/extensions.py +++ b/lib/extensions.py @@ -62,7 +62,8 @@ from psycopg2._psycopg import string_types, binary_types, new_type, new_array_ty from psycopg2._psycopg import ISQLQuote, Notify, Diagnostics, Column from psycopg2._psycopg import QueryCanceledError, TransactionRollbackError -from psycopg2._psycopg import ReplicationCursor, ReplicationMessage +from psycopg2._psycopg import REPLICATION_PHYSICAL, REPLICATION_LOGICAL +from psycopg2._psycopg import ReplicationConnection, ReplicationCursor, ReplicationMessage try: from psycopg2._psycopg import set_wait_callback, get_wait_callback diff --git a/lib/extras.py b/lib/extras.py index 8a8d34ff..6e815d69 100644 --- a/lib/extras.py +++ b/lib/extras.py @@ -39,6 +39,8 @@ import psycopg2 from psycopg2 import extensions as _ext from psycopg2.extensions import cursor as _cursor from psycopg2.extensions import connection as _connection +from psycopg2.extensions import REPLICATION_PHYSICAL, REPLICATION_LOGICAL +from psycopg2.extensions import ReplicationConnection as _replicationConnection from psycopg2.extensions import ReplicationCursor as _replicationCursor from psycopg2.extensions import ReplicationMessage from psycopg2.extensions import adapt as _A, quote_ident @@ -439,65 +441,28 @@ class MinTimeLoggingCursor(LoggingCursor): return LoggingCursor.callproc(self, procname, vars) -"""Replication connection types.""" -REPLICATION_LOGICAL = "LOGICAL" -REPLICATION_PHYSICAL = "PHYSICAL" - - -class ReplicationConnectionBase(_connection): +class ReplicationConnectionBase(_replicationConnection): """ Base class for Logical and Physical replication connection classes. Uses `ReplicationCursor` automatically. """ def __init__(self, *args, **kwargs): - """ - Initializes a replication connection by adding appropriate - parameters to the provided DSN and tweaking the connection - attributes. - """ - - # replication_type is set in subclasses - if self.replication_type == REPLICATION_LOGICAL: - replication = 'database' - - elif self.replication_type == REPLICATION_PHYSICAL: - replication = 'true' - - else: - raise psycopg2.ProgrammingError("unrecognized replication type: %s" % self.replication_type) - - items = _ext.parse_dsn(args[0]) - - # we add an appropriate replication keyword parameter, unless - # user has specified one explicitly in the DSN - items.setdefault('replication', replication) - - dsn = " ".join(["%s=%s" % (k, psycopg2._param_escape(str(v))) - for (k, v) in items.iteritems()]) - - args = [dsn] + list(args[1:]) # async is the possible 2nd arg super(ReplicationConnectionBase, self).__init__(*args, **kwargs) - - # prevent auto-issued BEGIN statements - if not self.async: - self.autocommit = True - - if self.cursor_factory is None: - self.cursor_factory = ReplicationCursor + self.cursor_factory = ReplicationCursor class LogicalReplicationConnection(ReplicationConnectionBase): def __init__(self, *args, **kwargs): - self.replication_type = REPLICATION_LOGICAL + kwargs['replication_type'] = REPLICATION_LOGICAL super(LogicalReplicationConnection, self).__init__(*args, **kwargs) class PhysicalReplicationConnection(ReplicationConnectionBase): def __init__(self, *args, **kwargs): - self.replication_type = REPLICATION_PHYSICAL + kwargs['replication_type'] = REPLICATION_PHYSICAL super(PhysicalReplicationConnection, self).__init__(*args, **kwargs) @@ -528,16 +493,16 @@ class ReplicationCursor(_replicationCursor): if output_plugin is None: raise psycopg2.ProgrammingError("output plugin name is required to create logical replication slot") - command += "%s %s" % (slot_type, quote_ident(output_plugin, self)) + command += "LOGICAL %s" % quote_ident(output_plugin, self) elif slot_type == REPLICATION_PHYSICAL: if output_plugin is not None: raise psycopg2.ProgrammingError("cannot specify output plugin name when creating physical replication slot") - command += slot_type + command += "PHYSICAL" else: - raise psycopg2.ProgrammingError("unrecognized replication type: %s" % slot_type) + raise psycopg2.ProgrammingError("unrecognized replication type: %s" % repr(slot_type)) self.execute(command) @@ -562,7 +527,7 @@ class ReplicationCursor(_replicationCursor): else: raise psycopg2.ProgrammingError("slot name is required for logical replication") - command += "%s " % slot_type + command += "LOGICAL " elif slot_type == REPLICATION_PHYSICAL: if slot_name: @@ -570,7 +535,7 @@ class ReplicationCursor(_replicationCursor): # don't add "PHYSICAL", before 9.4 it was just START_REPLICATION XXX/XXX else: - raise psycopg2.ProgrammingError("unrecognized replication type: %s" % slot_type) + raise psycopg2.ProgrammingError("unrecognized replication type: %s" % repr(slot_type)) if type(start_lsn) is str: lsn = start_lsn.split('/') diff --git a/psycopg/psycopg.h b/psycopg/psycopg.h index 0c5bdcce..8134a83f 100644 --- a/psycopg/psycopg.h +++ b/psycopg/psycopg.h @@ -120,6 +120,7 @@ typedef struct connectionObject connectionObject; typedef struct replicationMessageObject replicationMessageObject; /* some utility functions */ +HIDDEN PyObject *parse_arg(int pos, char *name, PyObject *defval, PyObject *args, PyObject *kwargs); HIDDEN PyObject *psyco_parse_args(PyObject *self, PyObject *args, PyObject *kwargs); HIDDEN PyObject *psyco_parse_dsn(PyObject *self, PyObject *args, PyObject *kwargs); HIDDEN PyObject *psyco_make_dsn(PyObject *self, PyObject *args, PyObject *kwargs); diff --git a/psycopg/psycopgmodule.c b/psycopg/psycopgmodule.c index eaa451d8..04f781f5 100644 --- a/psycopg/psycopgmodule.c +++ b/psycopg/psycopgmodule.c @@ -28,6 +28,7 @@ #include "psycopg/connection.h" #include "psycopg/cursor.h" +#include "psycopg/replication_connection.h" #include "psycopg/replication_cursor.h" #include "psycopg/replication_message.h" #include "psycopg/green.h" @@ -74,7 +75,7 @@ HIDDEN PyObject *psyco_DescriptionType = NULL; /* finds a keyword or positional arg (pops it from kwargs if found there) */ -static PyObject * +PyObject * parse_arg(int pos, char *name, PyObject *defval, PyObject *args, PyObject *kwargs) { Py_ssize_t nargs = PyTuple_GET_SIZE(args); @@ -1114,6 +1115,9 @@ INIT_MODULE(_psycopg)(void) Py_TYPE(&cursorType) = &PyType_Type; if (PyType_Ready(&cursorType) == -1) goto exit; + Py_TYPE(&replicationConnectionType) = &PyType_Type; + if (PyType_Ready(&replicationConnectionType) == -1) goto exit; + Py_TYPE(&replicationCursorType) = &PyType_Type; if (PyType_Ready(&replicationCursorType) == -1) goto exit; @@ -1237,6 +1241,8 @@ INIT_MODULE(_psycopg)(void) PyModule_AddStringConstant(module, "__version__", PSYCOPG_VERSION); PyModule_AddStringConstant(module, "__doc__", "psycopg PostgreSQL driver"); PyModule_AddIntConstant(module, "__libpq_version__", PG_VERSION_NUM); + PyModule_AddIntMacro(module, REPLICATION_PHYSICAL); + PyModule_AddIntMacro(module, REPLICATION_LOGICAL); PyModule_AddObject(module, "apilevel", Text_FromUTF8(APILEVEL)); PyModule_AddObject(module, "threadsafety", PyInt_FromLong(THREADSAFETY)); PyModule_AddObject(module, "paramstyle", Text_FromUTF8(PARAMSTYLE)); @@ -1244,6 +1250,7 @@ INIT_MODULE(_psycopg)(void) /* put new types in module dictionary */ PyModule_AddObject(module, "connection", (PyObject*)&connectionType); PyModule_AddObject(module, "cursor", (PyObject*)&cursorType); + PyModule_AddObject(module, "ReplicationConnection", (PyObject*)&replicationConnectionType); PyModule_AddObject(module, "ReplicationCursor", (PyObject*)&replicationCursorType); PyModule_AddObject(module, "ReplicationMessage", (PyObject*)&replicationMessageType); PyModule_AddObject(module, "ISQLQuote", (PyObject*)&isqlquoteType); @@ -1285,6 +1292,9 @@ INIT_MODULE(_psycopg)(void) if (0 != psyco_errors_init()) { goto exit; } psyco_errors_fill(dict); + replicationPhysicalConst = PyDict_GetItemString(dict, "REPLICATION_PHYSICAL"); + replicationLogicalConst = PyDict_GetItemString(dict, "REPLICATION_LOGICAL"); + Dprintf("initpsycopg: module initialization complete"); exit: diff --git a/psycopg/replication_connection.h b/psycopg/replication_connection.h new file mode 100644 index 00000000..9198f5de --- /dev/null +++ b/psycopg/replication_connection.h @@ -0,0 +1,53 @@ +/* replication_connection.h - definition for the psycopg replication connection type + * + * Copyright (C) 2015 Daniele Varrazzo + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#ifndef PSYCOPG_REPLICATION_CONNECTION_H +#define PSYCOPG_REPLICATION_CONNECTION_H 1 + +#include "psycopg/connection.h" + +#ifdef __cplusplus +extern "C" { +#endif + +extern HIDDEN PyTypeObject replicationConnectionType; + +typedef struct replicationConnectionObject { + connectionObject conn; + + long int type; +} replicationConnectionObject; + +#define REPLICATION_PHYSICAL 1 +#define REPLICATION_LOGICAL 2 + +extern HIDDEN PyObject *replicationPhysicalConst; +extern HIDDEN PyObject *replicationLogicalConst; + +#ifdef __cplusplus +} +#endif + +#endif /* !defined(PSYCOPG_REPLICATION_CONNECTION_H) */ diff --git a/psycopg/replication_connection_type.c b/psycopg/replication_connection_type.c new file mode 100644 index 00000000..16c52414 --- /dev/null +++ b/psycopg/replication_connection_type.c @@ -0,0 +1,210 @@ +/* replication_connection_type.c - python interface to replication connection objects + * + * Copyright (C) 2015 Daniele Varrazzo + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#define PSYCOPG_MODULE +#include "psycopg/psycopg.h" + +#include "psycopg/replication_connection.h" +#include "psycopg/replication_message.h" +#include "psycopg/green.h" +#include "psycopg/pqpath.h" + +#include +#include + + +#define psyco_repl_conn_type_doc \ +"replication_type -- the replication connection type" + +static PyObject * +psyco_repl_conn_get_type(replicationConnectionObject *self) +{ + connectionObject *conn = &self->conn; + PyObject *res = NULL; + + EXC_IF_CONN_CLOSED(conn); + + if (self->type == REPLICATION_PHYSICAL) { + res = replicationPhysicalConst; + } else if (self->type == REPLICATION_LOGICAL) { + res = replicationLogicalConst; + } else { + PyErr_Format(PyExc_TypeError, "unknown replication type constant: %ld", self->type); + } + + Py_XINCREF(res); + return res; +} + +static int +replicationConnection_init(PyObject *obj, PyObject *args, PyObject *kwargs) +{ + replicationConnectionObject *self = (replicationConnectionObject *)obj; + PyObject *dsn = NULL; + PyObject *async = NULL; + PyObject *tmp = NULL; + const char *repl = NULL; + int ret = -1; + + Py_XINCREF(args); + Py_XINCREF(kwargs); + + /* dsn, async, replication_type */ + if (!(dsn = parse_arg(0, "dsn", Py_None, args, kwargs))) { goto exit; } + if (!(async = parse_arg(1, "async", Py_False, args, kwargs))) { goto exit; } + if (!(tmp = parse_arg(2, "replication_type", Py_None, args, kwargs))) { goto exit; } + + if (tmp == replicationPhysicalConst) { + self->type = REPLICATION_PHYSICAL; + repl = "true"; + } else if (tmp == replicationLogicalConst) { + self->type = REPLICATION_LOGICAL; + repl = "database"; + } else { + PyErr_SetString(PyExc_TypeError, + "replication_type must be either REPLICATION_PHYSICAL or REPLICATION_LOGICAL"); + goto exit; + } + Py_DECREF(tmp); + tmp = NULL; + + if (dsn != Py_None) { + if (kwargs && PyMapping_Size(kwargs) > 0) { + PyErr_SetString(PyExc_TypeError, "both dsn and parameters given"); + goto exit; + } else { + if (!(tmp = PyTuple_Pack(1, dsn))) { goto exit; } + + Py_XDECREF(kwargs); + if (!(kwargs = psyco_parse_dsn(NULL, tmp, NULL))) { goto exit; } + } + } else { + if (!(kwargs && PyMapping_Size(kwargs) > 0)) { + PyErr_SetString(PyExc_TypeError, "missing dsn and no parameters"); + goto exit; + } + } + + if (!PyMapping_HasKeyString(kwargs, "replication")) { + PyMapping_SetItemString(kwargs, "replication", Text_FromUTF8(repl)); + } + + Py_DECREF(dsn); + if (!(dsn = psyco_make_dsn(NULL, NULL, kwargs))) { goto exit; } + + Py_DECREF(args); + Py_DECREF(kwargs); + kwargs = NULL; + if (!(args = PyTuple_Pack(2, dsn, async))) { goto exit; } + + if ((ret = connectionType.tp_init(obj, args, NULL)) < 0) { goto exit; } + + self->conn.autocommit = 1; + self->conn.cursor_factory = (PyObject *)&replicationCursorType; + Py_INCREF(self->conn.cursor_factory); + +exit: + Py_XDECREF(tmp); + Py_XDECREF(dsn); + Py_XDECREF(async); + Py_XDECREF(args); + Py_XDECREF(kwargs); + + return ret; +} + +static PyObject * +replicationConnection_repr(replicationConnectionObject *self) +{ + return PyString_FromFormat( + "", + self, self->conn.dsn, self->conn.closed); +} + + +/* object calculated member list */ + +static struct PyGetSetDef replicationConnectionObject_getsets[] = { + /* override to prevent user tweaking these: */ + { "autocommit", NULL, NULL, NULL }, + { "isolation_level", NULL, NULL, NULL }, + { "set_session", NULL, NULL, NULL }, + { "set_isolation_level", NULL, NULL, NULL }, + { "reset", NULL, NULL, NULL }, + /* an actual getter */ + { "replication_type", + (getter)psyco_repl_conn_get_type, NULL, + psyco_repl_conn_type_doc, NULL }, + {NULL} +}; + +/* object type */ + +#define replicationConnectionType_doc \ +"A replication connection." + +PyTypeObject replicationConnectionType = { + PyVarObject_HEAD_INIT(NULL, 0) + "psycopg2.extensions.ReplicationConnection", + sizeof(replicationConnectionObject), 0, + 0, /*tp_dealloc*/ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + 0, /*tp_compare*/ + (reprfunc)replicationConnection_repr, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash*/ + 0, /*tp_call*/ + (reprfunc)replicationConnection_repr, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_ITER | + Py_TPFLAGS_HAVE_GC, /*tp_flags*/ + replicationConnectionType_doc, /*tp_doc*/ + 0, /*tp_traverse*/ + 0, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + 0, /*tp_methods*/ + 0, /*tp_members*/ + replicationConnectionObject_getsets, /*tp_getset*/ + &connectionType, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + replicationConnection_init, /*tp_init*/ + 0, /*tp_alloc*/ + 0, /*tp_new*/ +}; + +PyObject *replicationPhysicalConst; +PyObject *replicationLogicalConst; diff --git a/psycopg2.cproj b/psycopg2.cproj index 75d96180..f6f85c72 100644 --- a/psycopg2.cproj +++ b/psycopg2.cproj @@ -92,6 +92,7 @@ + @@ -227,6 +228,7 @@ + diff --git a/setup.py b/setup.py index 18c47b7c..210ad831 100644 --- a/setup.py +++ b/setup.py @@ -466,7 +466,9 @@ sources = [ 'connection_int.c', 'connection_type.c', 'cursor_int.c', 'cursor_type.c', - 'replication_cursor_type.c', 'replication_message_type.c', + 'replication_connection_type.c', + 'replication_cursor_type.c', + 'replication_message_type.c', 'diagnostics_type.c', 'error_type.c', 'lobject_int.c', 'lobject_type.c', 'notify_type.c', 'xid_type.c', @@ -482,7 +484,9 @@ depends = [ # headers 'config.h', 'pgtypes.h', 'psycopg.h', 'python.h', 'connection.h', 'cursor.h', 'diagnostics.h', 'error.h', 'green.h', 'lobject.h', - 'replication_cursor.h', 'replication_message.h', + 'replication_connection.h', + 'replication_cursor.h', + 'replication_message.h', 'notify.h', 'pqpath.h', 'xid.h', 'libpq_support.h', 'win32_support.h', diff --git a/tests/testutils.py b/tests/testutils.py index 5f4493f2..70eb2cc9 100644 --- a/tests/testutils.py +++ b/tests/testutils.py @@ -129,9 +129,6 @@ class ConnectingTestCase(unittest.TestCase): conn = self.connect(**kwargs) except psycopg2.OperationalError, e: return self.skipTest("replication db not configured: %s" % e) - - if not conn.async: - conn.autocommit = True return conn def _get_conn(self): From a4cbb088fe2b1b3441b249a06e6498d17c3e56d9 Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Fri, 30 Oct 2015 11:10:41 +0100 Subject: [PATCH 052/151] Add connection.get_dsn_parameters() --- doc/src/connection.rst | 18 ++++++++++++++++++ psycopg/connection_type.c | 33 +++++++++++++++++++++++++++++++++ psycopg/psycopg.h | 3 +++ psycopg/psycopgmodule.c | 22 +++------------------- psycopg/utils.c | 29 +++++++++++++++++++++++++++++ tests/test_connection.py | 6 ++++++ 6 files changed, 92 insertions(+), 19 deletions(-) diff --git a/doc/src/connection.rst b/doc/src/connection.rst index cceef1e5..3d38180a 100644 --- a/doc/src/connection.rst +++ b/doc/src/connection.rst @@ -568,6 +568,24 @@ The ``connection`` class .. versionadded:: 2.0.12 + .. method:: get_dsn_parameters() + + Get the effective dsn parameters for the connection as a dictionary. + + The *password* parameter is removed from the result. + + Example:: + + >>> conn.get_dsn_parameters() + {'dbname': 'test', 'user': 'postgres', 'port': '5432', 'sslmode': 'prefer'} + + Requires libpq >= 9.3. + + .. seealso:: libpq docs for `PQconninfo()`__ for details. + + .. __: http://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-PQCONNINFO + + .. index:: pair: Transaction; Status diff --git a/psycopg/connection_type.c b/psycopg/connection_type.c index 2c1dddf2..5c74c301 100644 --- a/psycopg/connection_type.c +++ b/psycopg/connection_type.c @@ -733,6 +733,37 @@ psyco_conn_get_parameter_status(connectionObject *self, PyObject *args) return conn_text_from_chars(self, val); } +/* get_dsn_parameters method - Get connection parameters */ + +#define psyco_conn_get_dsn_parameters_doc \ +"get_dsn_parameters() -- Get effective connection parameters.\n\n" + +static PyObject * +psyco_conn_get_dsn_parameters(connectionObject *self) +{ +#if PG_VERSION_NUM >= 90300 + PyObject *res = NULL; + PQconninfoOption *options = NULL; + + EXC_IF_CONN_CLOSED(self); + + if (!(options = PQconninfo(self->pgconn))) { + PyErr_NoMemory(); + goto exit; + } + + res = psycopg_dict_from_conninfo_options(options, /* include_password = */ 0); + +exit: + PQconninfoFree(options); + + return res; +#else + PyErr_SetString(NotSupportedError, "PQconninfo not available in libpq < 9.3"); + return NULL; +#endif +} + /* lobject method - allocate a new lobject */ @@ -977,6 +1008,8 @@ static struct PyMethodDef connectionObject_methods[] = { METH_NOARGS, psyco_conn_get_transaction_status_doc}, {"get_parameter_status", (PyCFunction)psyco_conn_get_parameter_status, METH_VARARGS, psyco_conn_get_parameter_status_doc}, + {"get_dsn_parameters", (PyCFunction)psyco_conn_get_dsn_parameters, + METH_NOARGS, psyco_conn_get_dsn_parameters_doc}, {"get_backend_pid", (PyCFunction)psyco_conn_get_backend_pid, METH_NOARGS, psyco_conn_get_backend_pid_doc}, {"lobject", (PyCFunction)psyco_conn_lobject, diff --git a/psycopg/psycopg.h b/psycopg/psycopg.h index eb406fd2..13326ccf 100644 --- a/psycopg/psycopg.h +++ b/psycopg/psycopg.h @@ -131,6 +131,9 @@ STEALS(1) HIDDEN PyObject * psycopg_ensure_bytes(PyObject *obj); STEALS(1) HIDDEN PyObject * psycopg_ensure_text(PyObject *obj); +HIDDEN PyObject *psycopg_dict_from_conninfo_options(PQconninfoOption *options, + int include_password); + /* Exceptions docstrings */ #define Error_doc \ "Base class for error exceptions." diff --git a/psycopg/psycopgmodule.c b/psycopg/psycopgmodule.c index cf70a4ad..38dd539b 100644 --- a/psycopg/psycopgmodule.c +++ b/psycopg/psycopgmodule.c @@ -118,8 +118,8 @@ static PyObject * psyco_parse_dsn(PyObject *self, PyObject *args, PyObject *kwargs) { char *err = NULL; - PQconninfoOption *options = NULL, *o; - PyObject *dict = NULL, *res = NULL, *dsn; + PQconninfoOption *options = NULL; + PyObject *res = NULL, *dsn; static char *kwlist[] = {"dsn", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O", kwlist, &dsn)) { @@ -140,26 +140,10 @@ psyco_parse_dsn(PyObject *self, PyObject *args, PyObject *kwargs) goto exit; } - if (!(dict = PyDict_New())) { goto exit; } - for (o = options; o->keyword != NULL; o++) { - if (o->val != NULL) { - PyObject *value; - if (!(value = Text_FromUTF8(o->val))) { goto exit; } - if (PyDict_SetItemString(dict, o->keyword, value) != 0) { - Py_DECREF(value); - goto exit; - } - Py_DECREF(value); - } - } - - /* success */ - res = dict; - dict = NULL; + res = psycopg_dict_from_conninfo_options(options, /* include_password = */ 1); exit: PQconninfoFree(options); /* safe on null */ - Py_XDECREF(dict); Py_XDECREF(dsn); return res; diff --git a/psycopg/utils.c b/psycopg/utils.c index ec8e47c8..1b10c4aa 100644 --- a/psycopg/utils.c +++ b/psycopg/utils.c @@ -247,3 +247,32 @@ psycopg_is_text_file(PyObject *f) } } +/* Make a dict out of PQconninfoOption array */ +PyObject * +psycopg_dict_from_conninfo_options(PQconninfoOption *options, int include_password) +{ + PyObject *dict, *res = NULL; + PQconninfoOption *o; + + if (!(dict = PyDict_New())) { goto exit; } + for (o = options; o->keyword != NULL; o++) { + if (o->val != NULL && + (include_password || strcmp(o->keyword, "password") != 0)) { + PyObject *value; + if (!(value = Text_FromUTF8(o->val))) { goto exit; } + if (PyDict_SetItemString(dict, o->keyword, value) != 0) { + Py_DECREF(value); + goto exit; + } + Py_DECREF(value); + } + } + + res = dict; + dict = NULL; + +exit: + Py_XDECREF(dict); + + return res; +} diff --git a/tests/test_connection.py b/tests/test_connection.py index 68bb6f05..7e183a82 100755 --- a/tests/test_connection.py +++ b/tests/test_connection.py @@ -381,6 +381,12 @@ class ParseDsnTestCase(ConnectingTestCase): self.assertRaises(TypeError, parse_dsn, None) self.assertRaises(TypeError, parse_dsn, 42) + def test_get_dsn_paramaters(self): + conn = self.connect() + d = conn.get_dsn_parameters() + self.assertEqual(d['dbname'], dbname) # the only param we can check reliably + self.assertNotIn('password', d) + class IsolationLevelsTestCase(ConnectingTestCase): From 602fefcae33f52544e2fd4fd7883929999b1b5a0 Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Fri, 30 Oct 2015 11:38:28 +0100 Subject: [PATCH 053/151] Fix typo in a new test name --- tests/test_connection.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_connection.py b/tests/test_connection.py index 7e183a82..eddb0536 100755 --- a/tests/test_connection.py +++ b/tests/test_connection.py @@ -381,7 +381,7 @@ class ParseDsnTestCase(ConnectingTestCase): self.assertRaises(TypeError, parse_dsn, None) self.assertRaises(TypeError, parse_dsn, 42) - def test_get_dsn_paramaters(self): + def test_get_dsn_parameters(self): conn = self.connect() d = conn.get_dsn_parameters() self.assertEqual(d['dbname'], dbname) # the only param we can check reliably From e61db578cfc6b8ae18ffac41f2719c05cb04bb00 Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Fri, 30 Oct 2015 13:00:55 +0100 Subject: [PATCH 054/151] Add dbname=replication for physical replication type. --- psycopg/replication_connection_type.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/psycopg/replication_connection_type.c b/psycopg/replication_connection_type.c index 16c52414..154a0ddd 100644 --- a/psycopg/replication_connection_type.c +++ b/psycopg/replication_connection_type.c @@ -110,6 +110,10 @@ replicationConnection_init(PyObject *obj, PyObject *args, PyObject *kwargs) if (!PyMapping_HasKeyString(kwargs, "replication")) { PyMapping_SetItemString(kwargs, "replication", Text_FromUTF8(repl)); } + /* with physical specify dbname=replication for .pgpass lookup */ + if (self->type == REPLICATION_PHYSICAL) { + PyMapping_SetItemString(kwargs, "dbname", Text_FromUTF8("replication")); + } Py_DECREF(dsn); if (!(dsn = psyco_make_dsn(NULL, NULL, kwargs))) { goto exit; } From 051e6d13646d2cafe17e647ce79e69b32e6397b3 Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Fri, 30 Oct 2015 13:02:45 +0100 Subject: [PATCH 055/151] Add skip_before_libpq for test_get_dsn_parameters --- tests/test_connection.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/test_connection.py b/tests/test_connection.py index eddb0536..6f7ab88b 100755 --- a/tests/test_connection.py +++ b/tests/test_connection.py @@ -381,6 +381,7 @@ class ParseDsnTestCase(ConnectingTestCase): self.assertRaises(TypeError, parse_dsn, None) self.assertRaises(TypeError, parse_dsn, 42) + @skip_before_libpq(9, 3) def test_get_dsn_parameters(self): conn = self.connect() d = conn.get_dsn_parameters() From cf83470891f233156657ac4c9d20bd16c85690a3 Mon Sep 17 00:00:00 2001 From: Udi Oron Date: Tue, 10 Nov 2015 00:35:02 +0200 Subject: [PATCH 056/151] Suggest installing psycopg2 in windows using pip pip is becoming the standard method for installing python packages, and now binary wheels are a better and easier option for users: https://github.com/psycopg/psycopg2/issues/368 --- doc/src/install.rst | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/doc/src/install.rst b/doc/src/install.rst index ec1eeea8..a7a973c8 100644 --- a/doc/src/install.rst +++ b/doc/src/install.rst @@ -95,7 +95,15 @@ Install from a package pair: Install; Windows **Microsoft Windows** - Jason Erickson maintains a packaged `Windows port of Psycopg`__ with + There are two options to install a precompiled `psycopg2` package under windows: + + **Option 1:** Using `pip`__ (Included in python 2.7.9+ and python 3.4+) and a binary wheel package. Launch windows' command prompt (`cmd.exe`) and execute the following command:: + + pip install psycopg2 + + .. __: https://pip.pypa.io/en/stable/installing/ + + **Option 2:** Jason Erickson maintains a packaged `Windows port of Psycopg`__ with installation executable. Download. Double click. Done. .. __: http://www.stickpeople.com/projects/python/win-psycopg/ From b3def740028cd6c8756a6de35d20770d0971a212 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Jan=C3=9Fen?= Date: Tue, 10 Nov 2015 17:02:59 +0100 Subject: [PATCH 057/151] Update psycopg1.py --- lib/psycopg1.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/psycopg1.py b/lib/psycopg1.py index 7a24c5f2..95b36bff 100644 --- a/lib/psycopg1.py +++ b/lib/psycopg1.py @@ -28,7 +28,7 @@ old code while porting to psycopg 2. Import it as follows:: # FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public # License for more details. -import _psycopg as _2psycopg +import psycopg2._psycopg as _2psycopg from psycopg2.extensions import cursor as _2cursor from psycopg2.extensions import connection as _2connection From 5fd0f6c4eefecb0d6150179c32c43d16c11b173d Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Wed, 16 Dec 2015 12:00:52 +0000 Subject: [PATCH 058/151] Fixed race condition on import in errorcodes.lookup Fixes #382. --- NEWS | 1 + lib/errorcodes.py | 10 +++++-- tests/__init__.py | 2 ++ tests/test_errcodes.py | 65 ++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 76 insertions(+), 2 deletions(-) create mode 100755 tests/test_errcodes.py diff --git a/NEWS b/NEWS index 5200c4dd..c1e4152f 100644 --- a/NEWS +++ b/NEWS @@ -27,6 +27,7 @@ What's new in psycopg 2.6.2 - Raise `!NotSupportedError` on unhandled server response status (:ticket:`#352`). - Fixed `!PersistentConnectionPool` on Python 3 (:ticket:`#348`). +- Fixed `!errorcodes.lookup` initialization thread-safety (:ticket:`#382`). What's new in psycopg 2.6.1 diff --git a/lib/errorcodes.py b/lib/errorcodes.py index 12c300f6..aa5a723c 100644 --- a/lib/errorcodes.py +++ b/lib/errorcodes.py @@ -38,11 +38,17 @@ def lookup(code, _cache={}): return _cache[code] # Generate the lookup map at first usage. + tmp = {} for k, v in globals().iteritems(): if isinstance(v, str) and len(v) in (2, 5): - _cache[v] = k + tmp[v] = k - return lookup(code) + assert tmp + + # Atomic update, to avoid race condition on import (bug #382) + _cache.update(tmp) + + return _cache[code] # autogenerated data: do not edit below this point. diff --git a/tests/__init__.py b/tests/__init__.py index 3e677d85..3e0db779 100755 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -34,6 +34,7 @@ import test_connection import test_copy import test_cursor import test_dates +import test_errcodes import test_extras_dictcursor import test_green import test_lobject @@ -71,6 +72,7 @@ def test_suite(): suite.addTest(test_copy.test_suite()) suite.addTest(test_cursor.test_suite()) suite.addTest(test_dates.test_suite()) + suite.addTest(test_errcodes.test_suite()) suite.addTest(test_extras_dictcursor.test_suite()) suite.addTest(test_green.test_suite()) suite.addTest(test_lobject.test_suite()) diff --git a/tests/test_errcodes.py b/tests/test_errcodes.py new file mode 100755 index 00000000..6cf5ddba --- /dev/null +++ b/tests/test_errcodes.py @@ -0,0 +1,65 @@ +#!/usr/bin/env python + +# test_errcodes.py - unit test for psycopg2.errcodes module +# +# Copyright (C) 2015 Daniele Varrazzo +# +# psycopg2 is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# In addition, as a special exception, the copyright holders give +# permission to link this program with the OpenSSL library (or with +# modified versions of OpenSSL that use the same license as OpenSSL), +# and distribute linked combinations including the two. +# +# You must obey the GNU Lesser General Public License in all respects for +# all of the code used other than OpenSSL. +# +# psycopg2 is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +# License for more details. + +from testutils import unittest, ConnectingTestCase + +try: + reload +except NameError: + from imp import reload + +from threading import Thread +from psycopg2 import errorcodes + +class ErrocodeTests(ConnectingTestCase): + def test_lookup_threadsafe(self): + + # Increase if it does not fail with KeyError + MAX_CYCLES = 2000 + + errs = [] + def f(pg_code='40001'): + try: + errorcodes.lookup(pg_code) + except Exception, e: + errs.append(e) + + for __ in xrange(MAX_CYCLES): + reload(errorcodes) + (t1, t2) = (Thread(target=f), Thread(target=f)) + (t1.start(), t2.start()) + (t1.join(), t2.join()) + + if errs: + self.fail( + "raised %s errors in %s cycles (first is %s %s)" % ( + len(errs), MAX_CYCLES, + errs[0].__class__.__name__, errs[0])) + + +def test_suite(): + return unittest.TestLoader().loadTestsFromName(__name__) + +if __name__ == "__main__": + unittest.main() From 09a4bb70a168799a91f63f1c2039f456c485960f Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Tue, 5 Jan 2016 12:31:57 +0100 Subject: [PATCH 059/151] Allow retrying start_replication after syntax or data error. --- psycopg/pqpath.c | 7 +++++-- psycopg/replication_cursor.h | 20 +------------------- psycopg/replication_cursor_type.c | 25 +++++++++++++------------ tests/test_replication.py | 13 +++++++++++++ 4 files changed, 32 insertions(+), 33 deletions(-) diff --git a/psycopg/pqpath.c b/psycopg/pqpath.c index 760fc977..6d6728ca 100644 --- a/psycopg/pqpath.c +++ b/psycopg/pqpath.c @@ -1870,8 +1870,11 @@ pq_fetch(cursorObject *curs, int no_result) Dprintf("pq_fetch: data from a streaming replication slot (no tuples)"); curs->rowcount = -1; ex = 0; - /* nothing to do here: pq_copy_both will be called separately */ - CLEARPGRES(curs->pgres); + /* Nothing to do here: pq_copy_both will be called separately. + + Also don't clear the result status: it's checked in + consume_stream. */ + /*CLEARPGRES(curs->pgres);*/ break; case PGRES_TUPLES_OK: diff --git a/psycopg/replication_cursor.h b/psycopg/replication_cursor.h index 36ced138..71c6e190 100644 --- a/psycopg/replication_cursor.h +++ b/psycopg/replication_cursor.h @@ -38,11 +38,10 @@ extern HIDDEN PyTypeObject replicationCursorType; typedef struct replicationCursorObject { cursorObject cur; - int started:1; /* if replication is started */ int consuming:1; /* if running the consume loop */ int decode:1; /* if we should use character decoding on the messages */ - struct timeval last_io ; /* timestamp of the last exchange with the server */ + struct timeval last_io; /* timestamp of the last exchange with the server */ struct timeval keepalive_interval; /* interval for keepalive messages in replication mode */ XLogRecPtr write_lsn; /* LSNs for replication feedback messages */ @@ -53,23 +52,6 @@ typedef struct replicationCursorObject { RAISES_NEG int psyco_repl_curs_datetime_init(void); -/* exception-raising macros */ -#define EXC_IF_REPLICATING(self, cmd) \ -do \ - if ((self)->started) { \ - PyErr_SetString(ProgrammingError, \ - #cmd " cannot be used when replication is already in progress"); \ - return NULL; } \ -while (0) - -#define EXC_IF_NOT_REPLICATING(self, cmd) \ -do \ - if (!(self)->started) { \ - PyErr_SetString(ProgrammingError, \ - #cmd " cannot be used when replication is not in progress"); \ - return NULL; } \ -while (0) - #ifdef __cplusplus } #endif diff --git a/psycopg/replication_cursor_type.c b/psycopg/replication_cursor_type.c index f652984e..204ff20a 100644 --- a/psycopg/replication_cursor_type.c +++ b/psycopg/replication_cursor_type.c @@ -59,7 +59,6 @@ psyco_repl_curs_start_replication_expert(replicationCursorObject *self, EXC_IF_CURS_CLOSED(curs); EXC_IF_GREEN(start_replication_expert); EXC_IF_TPC_PREPARED(conn, start_replication_expert); - EXC_IF_REPLICATING(self, start_replication_expert); Dprintf("psyco_repl_curs_start_replication_expert: '%s'; decode: %d", command, decode); @@ -67,7 +66,6 @@ psyco_repl_curs_start_replication_expert(replicationCursorObject *self, res = Py_None; Py_INCREF(res); - self->started = 1; self->decode = decode; gettimeofday(&self->last_io, NULL); } @@ -96,13 +94,6 @@ psyco_repl_curs_consume_stream(replicationCursorObject *self, EXC_IF_CURS_ASYNC(curs, consume_stream); EXC_IF_GREEN(consume_stream); EXC_IF_TPC_PREPARED(self->cur.conn, consume_stream); - EXC_IF_NOT_REPLICATING(self, consume_stream); - - if (self->consuming) { - PyErr_SetString(ProgrammingError, - "consume_stream cannot be used when already in the consume loop"); - return NULL; - } Dprintf("psyco_repl_curs_consume_stream"); @@ -111,6 +102,19 @@ psyco_repl_curs_consume_stream(replicationCursorObject *self, return NULL; } + if (self->consuming) { + PyErr_SetString(ProgrammingError, + "consume_stream cannot be used when already in the consume loop"); + return NULL; + } + + if (curs->pgres == NULL || PQresultStatus(curs->pgres) != PGRES_COPY_BOTH) { + PyErr_SetString(ProgrammingError, + "consume_stream: not replicating, call start_replication first"); + return NULL; + } + CLEARPGRES(curs->pgres); + self->consuming = 1; if (pq_copy_both(self, consume, keepalive_interval) >= 0) { @@ -135,7 +139,6 @@ psyco_repl_curs_read_message(replicationCursorObject *self) EXC_IF_CURS_CLOSED(curs); EXC_IF_GREEN(read_message); EXC_IF_TPC_PREPARED(self->cur.conn, read_message); - EXC_IF_NOT_REPLICATING(self, read_message); if (pq_read_replication_message(self, &msg) < 0) { return NULL; @@ -160,7 +163,6 @@ psyco_repl_curs_send_feedback(replicationCursorObject *self, static char* kwlist[] = {"write_lsn", "flush_lsn", "apply_lsn", "reply", NULL}; EXC_IF_CURS_CLOSED(curs); - EXC_IF_NOT_REPLICATING(self, send_feedback); if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|KKKi", kwlist, &write_lsn, &flush_lsn, &apply_lsn, &reply)) { @@ -246,7 +248,6 @@ static struct PyGetSetDef replicationCursorObject_getsets[] = { static int replicationCursor_setup(replicationCursorObject* self) { - self->started = 0; self->consuming = 0; self->decode = 0; diff --git a/tests/test_replication.py b/tests/test_replication.py index 4441a266..a316135f 100644 --- a/tests/test_replication.py +++ b/tests/test_replication.py @@ -118,6 +118,18 @@ class ReplicationTest(ReplicationTestCase): self.create_replication_slot(cur) cur.start_replication(self.slot) + @skip_before_postgres(9, 4) # slots require 9.4 + def test_start_and_recover_from_error(self): + conn = self.repl_connect(connection_factory=LogicalReplicationConnection) + if conn is None: return + cur = conn.cursor() + + self.create_replication_slot(cur, output_plugin='test_decoding') + + self.assertRaises(psycopg2.DataError, cur.start_replication, + slot_name=self.slot, options=dict(invalid_param='value')) + cur.start_replication(slot_name=self.slot) + @skip_before_postgres(9, 4) # slots require 9.4 def test_stop_replication(self): conn = self.repl_connect(connection_factory=LogicalReplicationConnection) @@ -162,6 +174,7 @@ class AsyncReplicationTest(ReplicationTestCase): cur.send_feedback(flush_lsn=msg.data_start) + # cannot be used in asynchronous mode self.assertRaises(psycopg2.ProgrammingError, cur.consume_stream, consume) def process_stream(): From 5d33b39829d9733aa322246e12e2078a18d283b5 Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Thu, 21 Jan 2016 15:56:27 +0100 Subject: [PATCH 060/151] Fix error test for invalid START_REPLICATION command. --- tests/test_replication.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/tests/test_replication.py b/tests/test_replication.py index a316135f..f527edd2 100644 --- a/tests/test_replication.py +++ b/tests/test_replication.py @@ -126,8 +126,14 @@ class ReplicationTest(ReplicationTestCase): self.create_replication_slot(cur, output_plugin='test_decoding') - self.assertRaises(psycopg2.DataError, cur.start_replication, - slot_name=self.slot, options=dict(invalid_param='value')) + # try with invalid options + cur.start_replication(slot_name=self.slot, options={'invalid_param': 'value'}) + def consume(msg): + pass + # we don't see the error from the server before we try to read the data + self.assertRaises(psycopg2.DataError, cur.consume_stream, consume) + + # try with correct command cur.start_replication(slot_name=self.slot) @skip_before_postgres(9, 4) # slots require 9.4 From 3a54e8373733ef574a72b5c592cc6a5033de5c4a Mon Sep 17 00:00:00 2001 From: "Karl O. Pinc" Date: Tue, 2 Feb 2016 12:48:16 -0600 Subject: [PATCH 061/151] Improve sentence. --- doc/src/advanced.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/src/advanced.rst b/doc/src/advanced.rst index 82754ee0..e63fcff1 100644 --- a/doc/src/advanced.rst +++ b/doc/src/advanced.rst @@ -270,7 +270,7 @@ wasting resources. A simple application could poll the connection from time to time to check if something new has arrived. A better strategy is to use some I/O completion -function such as :py:func:`~select.select` to sleep until awaken from the kernel when there is +function such as :py:func:`~select.select` to sleep until awakened by the kernel when there is some data to read on the connection, thereby using no CPU unless there is something to read:: From 01856333c4783e9053b0cfcb00de8a71146cdc57 Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Thu, 3 Mar 2016 03:20:11 +0000 Subject: [PATCH 062/151] Some order in the extensions doc Classes, coroutine functions and extra functions grouped under separate headings. --- doc/src/extensions.rst | 117 ++++++++++++++++++++++++++--------------- 1 file changed, 74 insertions(+), 43 deletions(-) diff --git a/doc/src/extensions.rst b/doc/src/extensions.rst index d96cca4f..68619e5f 100644 --- a/doc/src/extensions.rst +++ b/doc/src/extensions.rst @@ -12,17 +12,12 @@ The module contains a few objects and function extending the minimum set of functionalities defined by the |DBAPI|_. -.. function:: parse_dsn(dsn) +Classes definitions +------------------- - Parse connection string into a dictionary of keywords and values. - - Uses libpq's ``PQconninfoParse`` to parse the string according to - accepted format(s) and check for supported keywords. - - Example:: - - >>> psycopg2.extensions.parse_dsn('dbname=test user=postgres password=secret') - {'password': 'secret', 'user': 'postgres', 'dbname': 'test'} +Instances of these classes are usually returned by factory functions or +attributes. Their definitions are exposed here to allow subclassing, +introspection etc. .. class:: connection(dsn, async=False) @@ -34,6 +29,7 @@ functionalities defined by the |DBAPI|_. For a complete description of the class, see `connection`. + .. class:: cursor(conn, name=None) It is the class usually returned by the `connection.cursor()` @@ -44,6 +40,7 @@ functionalities defined by the |DBAPI|_. For a complete description of the class, see `cursor`. + .. class:: lobject(conn [, oid [, mode [, new_oid [, new_file ]]]]) Wrapper for a PostgreSQL large object. See :ref:`large-objects` for an @@ -200,39 +197,6 @@ functionalities defined by the |DBAPI|_. server versions. -.. autofunction:: set_wait_callback(f) - - .. versionadded:: 2.2.0 - -.. autofunction:: get_wait_callback() - - .. versionadded:: 2.2.0 - -.. function:: libpq_version() - - Return the version number of the ``libpq`` dynamic library loaded as an - integer, in the same format of `~connection.server_version`. - - Raise `~psycopg2.NotSupportedError` if the ``psycopg2`` module was - compiled with a ``libpq`` version lesser than 9.1 (which can be detected - by the `~psycopg2.__libpq_version__` constant). - - .. seealso:: libpq docs for `PQlibVersion()`__. - - .. __: http://www.postgresql.org/docs/current/static/libpq-misc.html#LIBPQ-PQLIBVERSION - -.. function:: quote_ident(str, scope) - - Return quoted identifier according to PostgreSQL quoting rules. - - The *scope* must be a `connection` or a `cursor`, the underlying - connection encoding is used for any necessary character conversion. - - Requires libpq >= 9.0. - - .. seealso:: libpq docs for `PQescapeIdentifier()`__ - - .. __: http://www.postgresql.org/docs/current/static/libpq-exec.html#LIBPQ-PQESCAPEIDENTIFIER .. _sql-adaptation-objects: @@ -492,6 +456,73 @@ The module exports a few exceptions in addition to the :ref:`standard ones +.. _coroutines-functions: + +Coroutines support functions +---------------------------- + +These functions are used to set and retrieve the callback function for +:ref:`cooperation with coroutine libraries `. + +.. versionadded:: 2.2.0 + +.. autofunction:: set_wait_callback(f) + +.. autofunction:: get_wait_callback() + + + +Other functions +--------------- + +.. function:: libpq_version() + + Return the version number of the ``libpq`` dynamic library loaded as an + integer, in the same format of `~connection.server_version`. + + Raise `~psycopg2.NotSupportedError` if the ``psycopg2`` module was + compiled with a ``libpq`` version lesser than 9.1 (which can be detected + by the `~psycopg2.__libpq_version__` constant). + + .. versionadded:: 2.7 + + .. seealso:: libpq docs for `PQlibVersion()`__. + + .. __: http://www.postgresql.org/docs/current/static/libpq-misc.html#LIBPQ-PQLIBVERSION + + +.. function:: parse_dsn(dsn) + + Parse connection string into a dictionary of keywords and values. + + Uses libpq's ``PQconninfoParse`` to parse the string according to + accepted format(s) and check for supported keywords. + + Example:: + + >>> psycopg2.extensions.parse_dsn('dbname=test user=postgres password=secret') + {'password': 'secret', 'user': 'postgres', 'dbname': 'test'} + + .. versionadded:: 2.7 + + +.. function:: quote_ident(str, scope) + + Return quoted identifier according to PostgreSQL quoting rules. + + The *scope* must be a `connection` or a `cursor`, the underlying + connection encoding is used for any necessary character conversion. + + Requires libpq >= 9.0. + + .. versionadded:: 2.7 + + .. seealso:: libpq docs for `PQescapeIdentifier()`__ + + .. __: http://www.postgresql.org/docs/current/static/libpq-exec.html#LIBPQ-PQESCAPEIDENTIFIER + + + .. index:: pair: Isolation level; Constants From d40f81865f0735a03f80eaa8fbb2db5c036da680 Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Thu, 3 Mar 2016 03:25:47 +0000 Subject: [PATCH 063/151] Added parse_dsn() docstring --- psycopg/psycopgmodule.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/psycopg/psycopgmodule.c b/psycopg/psycopgmodule.c index cf70a4ad..24dd5f75 100644 --- a/psycopg/psycopgmodule.c +++ b/psycopg/psycopgmodule.c @@ -112,7 +112,8 @@ psyco_connect(PyObject *self, PyObject *args, PyObject *keywds) return conn; } -#define psyco_parse_dsn_doc "parse_dsn(dsn) -> dict" +#define psyco_parse_dsn_doc \ +"parse_dsn(dsn) -> dict -- parse a connection string into parameters" static PyObject * psyco_parse_dsn(PyObject *self, PyObject *args, PyObject *kwargs) From 1c4523f0ac685632381a0f4371e93031928326b1 Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Thu, 3 Mar 2016 04:33:59 +0000 Subject: [PATCH 064/151] Implementation of make_dsn in Python This is equivalent to what proposed in #363, but with a much simpler implementation. --- lib/__init__.py | 53 +++++--------------------------------------- lib/extensions.py | 45 ++++++++++++++++++++++++++++++++++--- tests/test_module.py | 38 ++++++++++++++++--------------- 3 files changed, 68 insertions(+), 68 deletions(-) diff --git a/lib/__init__.py b/lib/__init__.py index 994b15a8..608b5d14 100644 --- a/lib/__init__.py +++ b/lib/__init__.py @@ -80,28 +80,9 @@ else: _ext.register_adapter(Decimal, Adapter) del Decimal, Adapter -import re -def _param_escape(s, - re_escape=re.compile(r"([\\'])"), - re_space=re.compile(r'\s')): - """ - Apply the escaping rule required by PQconnectdb - """ - if not s: return "''" - - s = re_escape.sub(r'\\\1', s) - if re_space.search(s): - s = "'" + s + "'" - - return s - -del re - - -def connect(dsn=None, - database=None, user=None, password=None, host=None, port=None, - connection_factory=None, cursor_factory=None, async=False, **kwargs): +def connect(dsn=None, connection_factory=None, cursor_factory=None, + async=False, **kwargs): """ Create a new database connection. @@ -115,7 +96,7 @@ def connect(dsn=None, The basic connection parameters are: - - *dbname*: the database name (only in dsn string) + - *dbname*: the database name - *database*: the database name (only as keyword argument) - *user*: user name used to authenticate - *password*: password used to authenticate @@ -135,32 +116,10 @@ def connect(dsn=None, library: the list of supported parameters depends on the library version. """ - items = [] - if database is not None: - items.append(('dbname', database)) - if user is not None: - items.append(('user', user)) - if password is not None: - items.append(('password', password)) - if host is not None: - items.append(('host', host)) - if port is not None: - items.append(('port', port)) - - items.extend([(k, v) for (k, v) in kwargs.iteritems() if v is not None]) - - if dsn is not None and items: - raise TypeError( - "'%s' is an invalid keyword argument when the dsn is specified" - % items[0][0]) - - if dsn is None: - if not items: - raise TypeError('missing dsn and no parameters') - else: - dsn = " ".join(["%s=%s" % (k, _param_escape(str(v))) - for (k, v) in items]) + if dsn is None and not kwargs: + raise TypeError('missing dsn and no parameters') + dsn = _ext.make_dsn(dsn, **kwargs) conn = _connect(dsn, connection_factory=connection_factory, async=async) if cursor_factory is not None: conn.cursor_factory = cursor_factory diff --git a/lib/extensions.py b/lib/extensions.py index b40e28b8..3024b2fd 100644 --- a/lib/extensions.py +++ b/lib/extensions.py @@ -7,7 +7,7 @@ This module holds all the extensions to the DBAPI-2.0 provided by psycopg. - `lobject` -- the new-type inheritable large object class - `adapt()` -- exposes the PEP-246_ compatible adapting mechanism used by psycopg to adapt Python types to PostgreSQL ones - + .. _PEP-246: http://www.python.org/peps/pep-0246.html """ # psycopg/extensions.py - DBAPI-2.0 extensions specific to psycopg @@ -32,6 +32,9 @@ This module holds all the extensions to the DBAPI-2.0 provided by psycopg. # FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public # License for more details. +import re as _re +import sys as _sys + from psycopg2._psycopg import UNICODE, INTEGER, LONGINTEGER, BOOLEAN, FLOAT from psycopg2._psycopg import TIME, DATE, INTERVAL, DECIMAL from psycopg2._psycopg import BINARYARRAY, BOOLEANARRAY, DATEARRAY, DATETIMEARRAY @@ -56,7 +59,8 @@ try: except ImportError: pass -from psycopg2._psycopg import adapt, adapters, encodings, connection, cursor, lobject, Xid, libpq_version, parse_dsn, quote_ident +from psycopg2._psycopg import adapt, adapters, encodings, connection, cursor +from psycopg2._psycopg import lobject, Xid, libpq_version, parse_dsn, quote_ident from psycopg2._psycopg import string_types, binary_types, new_type, new_array_type, register_type from psycopg2._psycopg import ISQLQuote, Notify, Diagnostics, Column @@ -98,7 +102,6 @@ TRANSACTION_STATUS_INTRANS = 2 TRANSACTION_STATUS_INERROR = 3 TRANSACTION_STATUS_UNKNOWN = 4 -import sys as _sys # Return bytes from a string if _sys.version_info[0] < 3: @@ -108,6 +111,7 @@ else: def b(s): return s.encode('utf8') + def register_adapter(typ, callable): """Register 'callable' as an ISQLQuote adapter for type 'typ'.""" adapters[(typ, ISQLQuote)] = callable @@ -151,6 +155,41 @@ class NoneAdapter(object): return _null +def make_dsn(dsn=None, **kwargs): + """Convert a set of keywords into a connection strings.""" + # Override the dsn with the parameters + if 'database' in kwargs: + if 'dbname' in kwargs: + raise TypeError( + "you can't specify both 'database' and 'dbname' arguments") + kwargs['dbname'] = kwargs.pop('database') + + if dsn is not None: + tmp = parse_dsn(dsn) + tmp.update(kwargs) + kwargs = tmp + + dsn = " ".join(["%s=%s" % (k, _param_escape(str(v))) + for (k, v) in kwargs.iteritems()]) + return dsn + + +def _param_escape(s, + re_escape=_re.compile(r"([\\'])"), + re_space=_re.compile(r'\s')): + """ + Apply the escaping rule required by PQconnectdb + """ + if not s: + return "''" + + s = re_escape.sub(r'\\\1', s) + if re_space.search(s): + s = "'" + s + "'" + + return s + + # Create default json typecasters for PostgreSQL 9.2 oids from psycopg2._json import register_default_json, register_default_jsonb diff --git a/tests/test_module.py b/tests/test_module.py index 62b85ee2..c0e4bf87 100755 --- a/tests/test_module.py +++ b/tests/test_module.py @@ -43,6 +43,9 @@ class ConnectTestCase(unittest.TestCase): def tearDown(self): psycopg2._connect = self._connect_orig + def assertDsnEqual(self, dsn1, dsn2): + self.assertEqual(set(dsn1.split()), set(dsn2.split())) + def test_there_has_to_be_something(self): self.assertRaises(TypeError, psycopg2.connect) self.assertRaises(TypeError, psycopg2.connect, @@ -57,8 +60,8 @@ class ConnectTestCase(unittest.TestCase): self.assertEqual(self.args[2], False) def test_dsn(self): - psycopg2.connect('dbname=blah x=y') - self.assertEqual(self.args[0], 'dbname=blah x=y') + psycopg2.connect('dbname=blah application_name=y') + self.assertDsnEqual(self.args[0], 'dbname=blah application_name=y') self.assertEqual(self.args[1], None) self.assertEqual(self.args[2], False) @@ -90,30 +93,30 @@ class ConnectTestCase(unittest.TestCase): def f(dsn, async=False): pass - psycopg2.connect(database='foo', bar='baz', connection_factory=f) - self.assertEqual(self.args[0], 'dbname=foo bar=baz') + psycopg2.connect(database='foo', application_name='baz', connection_factory=f) + self.assertDsnEqual(self.args[0], 'dbname=foo application_name=baz') self.assertEqual(self.args[1], f) self.assertEqual(self.args[2], False) - psycopg2.connect("dbname=foo bar=baz", connection_factory=f) - self.assertEqual(self.args[0], 'dbname=foo bar=baz') + psycopg2.connect("dbname=foo application_name=baz", connection_factory=f) + self.assertDsnEqual(self.args[0], 'dbname=foo application_name=baz') self.assertEqual(self.args[1], f) self.assertEqual(self.args[2], False) def test_async(self): - psycopg2.connect(database='foo', bar='baz', async=1) - self.assertEqual(self.args[0], 'dbname=foo bar=baz') + psycopg2.connect(database='foo', application_name='baz', async=1) + self.assertDsnEqual(self.args[0], 'dbname=foo application_name=baz') self.assertEqual(self.args[1], None) self.assert_(self.args[2]) - psycopg2.connect("dbname=foo bar=baz", async=True) - self.assertEqual(self.args[0], 'dbname=foo bar=baz') + psycopg2.connect("dbname=foo application_name=baz", async=True) + self.assertDsnEqual(self.args[0], 'dbname=foo application_name=baz') self.assertEqual(self.args[1], None) self.assert_(self.args[2]) def test_empty_param(self): psycopg2.connect(database='sony', password='') - self.assertEqual(self.args[0], "dbname=sony password=''") + self.assertDsnEqual(self.args[0], "dbname=sony password=''") def test_escape(self): psycopg2.connect(database='hello world') @@ -131,13 +134,12 @@ class ConnectTestCase(unittest.TestCase): psycopg2.connect(database=r"\every thing'") self.assertEqual(self.args[0], r"dbname='\\every thing\''") - def test_no_kwargs_swallow(self): - self.assertRaises(TypeError, - psycopg2.connect, 'dbname=foo', database='foo') - self.assertRaises(TypeError, - psycopg2.connect, 'dbname=foo', user='postgres') - self.assertRaises(TypeError, - psycopg2.connect, 'dbname=foo', no_such_param='meh') + def test_params_merging(self): + psycopg2.connect('dbname=foo', database='bar') + self.assertEqual(self.args[0], 'dbname=bar') + + psycopg2.connect('dbname=foo', user='postgres') + self.assertDsnEqual(self.args[0], 'dbname=foo user=postgres') class ExceptionsTestCase(ConnectingTestCase): From 2c55a1bd5394ef82a49984fdce3c17ce956a9c9e Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Thu, 3 Mar 2016 15:07:38 +0000 Subject: [PATCH 065/151] Verify that the dsn is not manipulated by make_dsn if not necessary --- lib/__init__.py | 3 --- lib/extensions.py | 7 +++++++ tests/test_module.py | 37 +++++++++++++++++++++++++------------ 3 files changed, 32 insertions(+), 15 deletions(-) diff --git a/lib/__init__.py b/lib/__init__.py index 608b5d14..4a288197 100644 --- a/lib/__init__.py +++ b/lib/__init__.py @@ -116,9 +116,6 @@ def connect(dsn=None, connection_factory=None, cursor_factory=None, library: the list of supported parameters depends on the library version. """ - if dsn is None and not kwargs: - raise TypeError('missing dsn and no parameters') - dsn = _ext.make_dsn(dsn, **kwargs) conn = _connect(dsn, connection_factory=connection_factory, async=async) if cursor_factory is not None: diff --git a/lib/extensions.py b/lib/extensions.py index 3024b2fd..469f1932 100644 --- a/lib/extensions.py +++ b/lib/extensions.py @@ -157,6 +157,13 @@ class NoneAdapter(object): def make_dsn(dsn=None, **kwargs): """Convert a set of keywords into a connection strings.""" + if dsn is None and not kwargs: + raise TypeError('missing dsn and no parameters') + + # If no kwarg is specified don't mung the dsn + if not kwargs: + return dsn + # Override the dsn with the parameters if 'database' in kwargs: if 'dbname' in kwargs: diff --git a/tests/test_module.py b/tests/test_module.py index c0e4bf87..9f0adcc9 100755 --- a/tests/test_module.py +++ b/tests/test_module.py @@ -31,9 +31,11 @@ from testutils import ConnectingTestCase, skip_copy_if_green, script_to_py3 import psycopg2 + class ConnectTestCase(unittest.TestCase): def setUp(self): self.args = None + def conect_stub(dsn, connection_factory=None, async=False): self.args = (dsn, connection_factory, async) @@ -60,8 +62,8 @@ class ConnectTestCase(unittest.TestCase): self.assertEqual(self.args[2], False) def test_dsn(self): - psycopg2.connect('dbname=blah application_name=y') - self.assertDsnEqual(self.args[0], 'dbname=blah application_name=y') + psycopg2.connect('dbname=blah x=y') + self.assertEqual(self.args[0], 'dbname=blah x=y') self.assertEqual(self.args[1], None) self.assertEqual(self.args[2], False) @@ -93,24 +95,24 @@ class ConnectTestCase(unittest.TestCase): def f(dsn, async=False): pass - psycopg2.connect(database='foo', application_name='baz', connection_factory=f) - self.assertDsnEqual(self.args[0], 'dbname=foo application_name=baz') + psycopg2.connect(database='foo', bar='baz', connection_factory=f) + self.assertDsnEqual(self.args[0], 'dbname=foo bar=baz') self.assertEqual(self.args[1], f) self.assertEqual(self.args[2], False) - psycopg2.connect("dbname=foo application_name=baz", connection_factory=f) - self.assertDsnEqual(self.args[0], 'dbname=foo application_name=baz') + psycopg2.connect("dbname=foo bar=baz", connection_factory=f) + self.assertDsnEqual(self.args[0], 'dbname=foo bar=baz') self.assertEqual(self.args[1], f) self.assertEqual(self.args[2], False) def test_async(self): - psycopg2.connect(database='foo', application_name='baz', async=1) - self.assertDsnEqual(self.args[0], 'dbname=foo application_name=baz') + psycopg2.connect(database='foo', bar='baz', async=1) + self.assertDsnEqual(self.args[0], 'dbname=foo bar=baz') self.assertEqual(self.args[1], None) self.assert_(self.args[2]) - psycopg2.connect("dbname=foo application_name=baz", async=True) - self.assertDsnEqual(self.args[0], 'dbname=foo application_name=baz') + psycopg2.connect("dbname=foo bar=baz", async=True) + self.assertDsnEqual(self.args[0], 'dbname=foo bar=baz') self.assertEqual(self.args[1], None) self.assert_(self.args[2]) @@ -141,6 +143,16 @@ class ConnectTestCase(unittest.TestCase): psycopg2.connect('dbname=foo', user='postgres') self.assertDsnEqual(self.args[0], 'dbname=foo user=postgres') + def test_no_dsn_munging(self): + psycopg2.connect('nosuchparam=whatevs') + self.assertEqual(self.args[0], 'nosuchparam=whatevs') + + psycopg2.connect(nosuchparam='whatevs') + self.assertEqual(self.args[0], 'nosuchparam=whatevs') + + self.assertRaises(psycopg2.ProgrammingError, + psycopg2.connect, 'nosuchparam=whatevs', andthis='either') + class ExceptionsTestCase(ConnectingTestCase): def test_attributes(self): @@ -205,7 +217,8 @@ class ExceptionsTestCase(ConnectingTestCase): self.assertEqual(diag.sqlstate, '42P01') del diag - gc.collect(); gc.collect() + gc.collect() + gc.collect() assert(w() is None) @skip_copy_if_green @@ -327,7 +340,7 @@ class TestVersionDiscovery(unittest.TestCase): self.assertTrue(type(psycopg2.__libpq_version__) is int) try: self.assertTrue(type(psycopg2.extensions.libpq_version()) is int) - except NotSupportedError: + except psycopg2.NotSupportedError: self.assertTrue(psycopg2.__libpq_version__ < 90100) From 52087a79d99009e41129601639e022d100d6491b Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Thu, 3 Mar 2016 15:31:37 +0000 Subject: [PATCH 066/151] Added test suite specific for make_dsn --- tests/test_connection.py | 117 +++++++++++++++++++++++++++------------ 1 file changed, 82 insertions(+), 35 deletions(-) diff --git a/tests/test_connection.py b/tests/test_connection.py index 68bb6f05..5b296949 100755 --- a/tests/test_connection.py +++ b/tests/test_connection.py @@ -32,6 +32,7 @@ from StringIO import StringIO import psycopg2 import psycopg2.errorcodes import psycopg2.extensions +ext = psycopg2.extensions from testutils import unittest, decorate_all_tests, skip_if_no_superuser from testutils import skip_before_postgres, skip_after_postgres, skip_before_libpq @@ -125,7 +126,7 @@ class ConnectionTests(ConnectingTestCase): if self.conn.server_version >= 90300: cur.execute("set client_min_messages=debug1") for i in range(0, 100, 10): - sql = " ".join(["create temp table table%d (id serial);" % j for j in range(i, i+10)]) + sql = " ".join(["create temp table table%d (id serial);" % j for j in range(i, i + 10)]) cur.execute(sql) self.assertEqual(50, len(conn.notices)) @@ -151,7 +152,7 @@ class ConnectionTests(ConnectingTestCase): # not limited, but no error for i in range(0, 100, 10): - sql = " ".join(["create temp table table2_%d (id serial);" % j for j in range(i, i+10)]) + sql = " ".join(["create temp table table2_%d (id serial);" % j for j in range(i, i + 10)]) cur.execute(sql) self.assertEqual(len([n for n in conn.notices if 'CREATE TABLE' in n]), @@ -172,7 +173,7 @@ class ConnectionTests(ConnectingTestCase): self.assert_(self.conn.server_version) def test_protocol_version(self): - self.assert_(self.conn.protocol_version in (2,3), + self.assert_(self.conn.protocol_version in (2, 3), self.conn.protocol_version) def test_tpc_unsupported(self): @@ -252,7 +253,7 @@ class ConnectionTests(ConnectingTestCase): t1.start() i = 1 for i in range(1000): - cur.execute("select %s;",(i,)) + cur.execute("select %s;", (i,)) conn.commit() while conn.notices: notices.append((1, conn.notices.pop())) @@ -313,16 +314,15 @@ class ConnectionTests(ConnectingTestCase): class ParseDsnTestCase(ConnectingTestCase): def test_parse_dsn(self): from psycopg2 import ProgrammingError - from psycopg2.extensions import parse_dsn - self.assertEqual(parse_dsn('dbname=test user=tester password=secret'), + self.assertEqual(ext.parse_dsn('dbname=test user=tester password=secret'), dict(user='tester', password='secret', dbname='test'), "simple DSN parsed") - self.assertRaises(ProgrammingError, parse_dsn, + self.assertRaises(ProgrammingError, ext.parse_dsn, "dbname=test 2 user=tester password=secret") - self.assertEqual(parse_dsn("dbname='test 2' user=tester password=secret"), + self.assertEqual(ext.parse_dsn("dbname='test 2' user=tester password=secret"), dict(user='tester', password='secret', dbname='test 2'), "DSN with quoting parsed") @@ -332,7 +332,7 @@ class ParseDsnTestCase(ConnectingTestCase): raised = False try: # unterminated quote after dbname: - parse_dsn("dbname='test 2 user=tester password=secret") + ext.parse_dsn("dbname='test 2 user=tester password=secret") except ProgrammingError, e: raised = True self.assertTrue(str(e).find('secret') < 0, @@ -343,16 +343,14 @@ class ParseDsnTestCase(ConnectingTestCase): @skip_before_libpq(9, 2) def test_parse_dsn_uri(self): - from psycopg2.extensions import parse_dsn - - self.assertEqual(parse_dsn('postgresql://tester:secret@/test'), + self.assertEqual(ext.parse_dsn('postgresql://tester:secret@/test'), dict(user='tester', password='secret', dbname='test'), "valid URI dsn parsed") raised = False try: # extra '=' after port value - parse_dsn(dsn='postgresql://tester:secret@/test?port=1111=x') + ext.parse_dsn(dsn='postgresql://tester:secret@/test?port=1111=x') except psycopg2.ProgrammingError, e: raised = True self.assertTrue(str(e).find('secret') < 0, @@ -362,24 +360,76 @@ class ParseDsnTestCase(ConnectingTestCase): self.assertTrue(raised, "ProgrammingError raised due to invalid URI") def test_unicode_value(self): - from psycopg2.extensions import parse_dsn snowman = u"\u2603" - d = parse_dsn('dbname=' + snowman) + d = ext.parse_dsn('dbname=' + snowman) if sys.version_info[0] < 3: self.assertEqual(d['dbname'], snowman.encode('utf8')) else: self.assertEqual(d['dbname'], snowman) def test_unicode_key(self): - from psycopg2.extensions import parse_dsn snowman = u"\u2603" - self.assertRaises(psycopg2.ProgrammingError, parse_dsn, + self.assertRaises(psycopg2.ProgrammingError, ext.parse_dsn, snowman + '=' + snowman) def test_bad_param(self): - from psycopg2.extensions import parse_dsn - self.assertRaises(TypeError, parse_dsn, None) - self.assertRaises(TypeError, parse_dsn, 42) + self.assertRaises(TypeError, ext.parse_dsn, None) + self.assertRaises(TypeError, ext.parse_dsn, 42) + + +class MakeDsnTestCase(ConnectingTestCase): + def assertDsnEqual(self, dsn1, dsn2): + self.assertEqual(set(dsn1.split()), set(dsn2.split())) + + def test_there_has_to_be_something(self): + self.assertRaises(TypeError, ext.make_dsn) + + def test_empty_param(self): + dsn = ext.make_dsn(database='sony', password='') + self.assertDsnEqual(dsn, "dbname=sony password=''") + + def test_escape(self): + dsn = ext.make_dsn(database='hello world') + self.assertEqual(dsn, "dbname='hello world'") + + dsn = ext.make_dsn(database=r'back\slash') + self.assertEqual(dsn, r"dbname=back\\slash") + + dsn = ext.make_dsn(database="quo'te") + self.assertEqual(dsn, r"dbname=quo\'te") + + dsn = ext.make_dsn(database="with\ttab") + self.assertEqual(dsn, "dbname='with\ttab'") + + dsn = ext.make_dsn(database=r"\every thing'") + self.assertEqual(dsn, r"dbname='\\every thing\''") + + def test_params_merging(self): + dsn = ext.make_dsn('dbname=foo', database='bar') + self.assertEqual(dsn, 'dbname=bar') + + dsn = ext.make_dsn('dbname=foo', user='postgres') + self.assertDsnEqual(dsn, 'dbname=foo user=postgres') + + def test_no_dsn_munging(self): + dsn = ext.make_dsn('nosuchparam=whatevs') + self.assertEqual(dsn, 'nosuchparam=whatevs') + + dsn = ext.make_dsn(nosuchparam='whatevs') + self.assertEqual(dsn, 'nosuchparam=whatevs') + + self.assertRaises(psycopg2.ProgrammingError, + ext.make_dsn, 'nosuchparam=whatevs', andthis='either') + + @skip_before_libpq(9, 2) + def test_url_is_cool(self): + dsn = ext.make_dsn('postgresql://tester:secret@/test') + self.assertEqual(dsn, 'postgresql://tester:secret@/test') + + dsn = ext.make_dsn('postgresql://tester:secret@/test', + application_name='woot') + self.assertDsnEqual(dsn, + 'dbname=test user=tester password=secret application_name=woot') class IsolationLevelsTestCase(ConnectingTestCase): @@ -587,7 +637,7 @@ class ConnectionTwoPhaseTests(ConnectingTestCase): cnn.close() return - gids = [ r[0] for r in cur ] + gids = [r[0] for r in cur] for gid in gids: cur.execute("rollback prepared %s;", (gid,)) cnn.close() @@ -761,13 +811,13 @@ class ConnectionTwoPhaseTests(ConnectingTestCase): def test_status_after_recover(self): cnn = self.connect() self.assertEqual(psycopg2.extensions.STATUS_READY, cnn.status) - xns = cnn.tpc_recover() + cnn.tpc_recover() self.assertEqual(psycopg2.extensions.STATUS_READY, cnn.status) cur = cnn.cursor() cur.execute("select 1") self.assertEqual(psycopg2.extensions.STATUS_BEGIN, cnn.status) - xns = cnn.tpc_recover() + cnn.tpc_recover() self.assertEqual(psycopg2.extensions.STATUS_BEGIN, cnn.status) def test_recovered_xids(self): @@ -789,12 +839,12 @@ class ConnectionTwoPhaseTests(ConnectingTestCase): cnn = self.connect() xids = cnn.tpc_recover() - xids = [ xid for xid in xids if xid.database == dbname ] + xids = [xid for xid in xids if xid.database == dbname] xids.sort(key=attrgetter('gtrid')) # check the values returned self.assertEqual(len(okvals), len(xids)) - for (xid, (gid, prepared, owner, database)) in zip (xids, okvals): + for (xid, (gid, prepared, owner, database)) in zip(xids, okvals): self.assertEqual(xid.gtrid, gid) self.assertEqual(xid.prepared, prepared) self.assertEqual(xid.owner, owner) @@ -825,8 +875,7 @@ class ConnectionTwoPhaseTests(ConnectingTestCase): cnn.close() cnn = self.connect() - xids = [ xid for xid in cnn.tpc_recover() - if xid.database == dbname ] + xids = [x for x in cnn.tpc_recover() if x.database == dbname] self.assertEqual(1, len(xids)) xid = xids[0] self.assertEqual(xid.format_id, fid) @@ -847,8 +896,7 @@ class ConnectionTwoPhaseTests(ConnectingTestCase): cnn.close() cnn = self.connect() - xids = [ xid for xid in cnn.tpc_recover() - if xid.database == dbname ] + xids = [x for x in cnn.tpc_recover() if x.database == dbname] self.assertEqual(1, len(xids)) xid = xids[0] self.assertEqual(xid.format_id, None) @@ -893,8 +941,7 @@ class ConnectionTwoPhaseTests(ConnectingTestCase): cnn.tpc_begin(x1) cnn.tpc_prepare() cnn.reset() - xid = [ xid for xid in cnn.tpc_recover() - if xid.database == dbname ][0] + xid = [x for x in cnn.tpc_recover() if x.database == dbname][0] self.assertEqual(10, xid.format_id) self.assertEqual('uni', xid.gtrid) self.assertEqual('code', xid.bqual) @@ -909,8 +956,7 @@ class ConnectionTwoPhaseTests(ConnectingTestCase): cnn.tpc_prepare() cnn.reset() - xid = [ xid for xid in cnn.tpc_recover() - if xid.database == dbname ][0] + xid = [x for x in cnn.tpc_recover() if x.database == dbname][0] self.assertEqual(None, xid.format_id) self.assertEqual('transaction-id', xid.gtrid) self.assertEqual(None, xid.bqual) @@ -929,7 +975,7 @@ class ConnectionTwoPhaseTests(ConnectingTestCase): cnn.reset() xids = cnn.tpc_recover() - xid = [ xid for xid in xids if xid.database == dbname ][0] + xid = [x for x in xids if x.database == dbname][0] self.assertEqual(None, xid.format_id) self.assertEqual('dict-connection', xid.gtrid) self.assertEqual(None, xid.bqual) @@ -1182,7 +1228,8 @@ class ReplicationTest(ConnectingTestCase): @skip_before_postgres(9, 0) def test_replication_not_supported(self): conn = self.repl_connect() - if conn is None: return + if conn is None: + return cur = conn.cursor() f = StringIO() self.assertRaises(psycopg2.NotSupportedError, From 6893295a8794f332cb9c2a667001ba7dcefb880b Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Thu, 3 Mar 2016 16:08:09 +0000 Subject: [PATCH 067/151] Added docs about make_dsn connect() docs updated to document the arguments merging. --- doc/src/extensions.rst | 24 +++++++++++++++++++++++- doc/src/module.rst | 20 +++++++++----------- lib/__init__.py | 4 ++-- 3 files changed, 34 insertions(+), 14 deletions(-) diff --git a/doc/src/extensions.rst b/doc/src/extensions.rst index 68619e5f..594ffc03 100644 --- a/doc/src/extensions.rst +++ b/doc/src/extensions.rst @@ -491,6 +491,27 @@ Other functions .. __: http://www.postgresql.org/docs/current/static/libpq-misc.html#LIBPQ-PQLIBVERSION +.. function:: make_dsn(dsn=None, \*\*kwargs) + + Create a connection string from arguments. + + Put together the arguments in *kwargs* into a connection string. If *dsn* + is specified too, merge the arguments coming from both the sources. If the + same argument is specified in both the sources, the *kwargs* version + overrides the *dsn* version + + At least one param is required (either *dsn* or any keyword). Note that + the empty string is a valid connection string. + + Example:: + + >>> from psycopg2.extensions import make_dsn + >>> make_dsn('dbname=foo host=example.com', password="s3cr3t") + 'host=example.com password=s3cr3t dbname=foo' + + .. versionadded:: 2.7 + + .. function:: parse_dsn(dsn) Parse connection string into a dictionary of keywords and values. @@ -500,7 +521,8 @@ Other functions Example:: - >>> psycopg2.extensions.parse_dsn('dbname=test user=postgres password=secret') + >>> from psycopg2.extensions import parse_dsn + >>> parse_dsn('dbname=test user=postgres password=secret') {'password': 'secret', 'user': 'postgres', 'dbname': 'test'} .. versionadded:: 2.7 diff --git a/doc/src/module.rst b/doc/src/module.rst index 6950b703..25a9ba27 100644 --- a/doc/src/module.rst +++ b/doc/src/module.rst @@ -17,30 +17,25 @@ The module interface respects the standard defined in the |DBAPI|_. single: DSN (Database Source Name) .. function:: - connect(dsn, connection_factory=None, cursor_factory=None, async=False) - connect(\*\*kwargs, connection_factory=None, cursor_factory=None, async=False) + connect(dsn, connection_factory=None, cursor_factory=None, async=False, \*\*kwargs) Create a new database session and return a new `connection` object. - The connection parameters can be specified either as a `libpq connection + The connection parameters can be specified as a `libpq connection string`__ using the *dsn* parameter:: conn = psycopg2.connect("dbname=test user=postgres password=secret") or using a set of keyword arguments:: - conn = psycopg2.connect(database="test", user="postgres", password="secret") + conn = psycopg2.connect(dbname"test", user="postgres", password="secret") - The two call styles are mutually exclusive: you cannot specify connection - parameters as keyword arguments together with a connection string; only - the parameters not needed for the database connection (*i.e.* - *connection_factory*, *cursor_factory*, and *async*) are supported - together with the *dsn* argument. + or using a mix of both: if the same parameter name is specified in both + sources the *kwargs* value will have precedence over the *dsn* value. The basic connection parameters are: - - `!dbname` -- the database name (only in the *dsn* string) - - `!database` -- the database name (only as keyword argument) + - `!dbname` -- the database name (`!database` is a deprecated alias) - `!user` -- user name used to authenticate - `!password` -- password used to authenticate - `!host` -- database host address (defaults to UNIX socket if not provided) @@ -76,6 +71,9 @@ The module interface respects the standard defined in the |DBAPI|_. .. versionchanged:: 2.5 added the *cursor_factory* parameter. + .. versionchanged:: 2.7 + both *dsn* and keyword arguments can be specified. + .. seealso:: - `~psycopg2.extensions.parse_dsn` diff --git a/lib/__init__.py b/lib/__init__.py index 4a288197..698f50d6 100644 --- a/lib/__init__.py +++ b/lib/__init__.py @@ -86,7 +86,7 @@ def connect(dsn=None, connection_factory=None, cursor_factory=None, """ Create a new database connection. - The connection parameters can be specified either as a string: + The connection parameters can be specified as a string: conn = psycopg2.connect("dbname=test user=postgres password=secret") @@ -94,7 +94,7 @@ def connect(dsn=None, connection_factory=None, cursor_factory=None, conn = psycopg2.connect(database="test", user="postgres", password="secret") - The basic connection parameters are: + Or as a mix of both. The basic connection parameters are: - *dbname*: the database name - *database*: the database name (only as keyword argument) From 7155d06cdcc15064d28f8667e72d8f61b9b31e90 Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Thu, 3 Mar 2016 16:09:33 +0000 Subject: [PATCH 068/151] Test that the empty dsn is a valid make_dsn input --- tests/test_connection.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/test_connection.py b/tests/test_connection.py index 5b296949..f2ea3d67 100755 --- a/tests/test_connection.py +++ b/tests/test_connection.py @@ -384,6 +384,10 @@ class MakeDsnTestCase(ConnectingTestCase): def test_there_has_to_be_something(self): self.assertRaises(TypeError, ext.make_dsn) + def test_empty_string(self): + dsn = ext.make_dsn('') + self.assertEqual(dsn, '') + def test_empty_param(self): dsn = ext.make_dsn(database='sony', password='') self.assertDsnEqual(dsn, "dbname=sony password=''") From 7aab934ae5950c6fa1bcd25ed053857538624310 Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Thu, 3 Mar 2016 16:52:53 +0000 Subject: [PATCH 069/151] Validate output result from make_dsn() The output is not necessarily munged anyway: if no keyword is passed, validate the input but return it untouched. --- doc/src/extensions.rst | 26 +++++++++++++++----- lib/extensions.py | 7 +++++- psycopg/psycopgmodule.c | 2 +- tests/test_connection.py | 51 ++++++++++++++++++++++++---------------- tests/test_module.py | 34 ++++++++++----------------- 5 files changed, 70 insertions(+), 50 deletions(-) diff --git a/doc/src/extensions.rst b/doc/src/extensions.rst index 594ffc03..fa69d628 100644 --- a/doc/src/extensions.rst +++ b/doc/src/extensions.rst @@ -493,15 +493,19 @@ Other functions .. function:: make_dsn(dsn=None, \*\*kwargs) - Create a connection string from arguments. + Create a valid connection string from arguments. Put together the arguments in *kwargs* into a connection string. If *dsn* is specified too, merge the arguments coming from both the sources. If the same argument is specified in both the sources, the *kwargs* version - overrides the *dsn* version + overrides the *dsn* version. - At least one param is required (either *dsn* or any keyword). Note that - the empty string is a valid connection string. + At least one parameter is required (either *dsn* or any keyword). Note + that the empty string is a valid connection string. + + The input arguments are validated: the output should always be a valid + connection string (as far as `parse_dsn()` is concerned). If not raise + `~psycopg2.ProgrammingError`. Example:: @@ -516,17 +520,27 @@ Other functions Parse connection string into a dictionary of keywords and values. - Uses libpq's ``PQconninfoParse`` to parse the string according to - accepted format(s) and check for supported keywords. + Parsing is delegated to the libpq: different versions of the client + library may support different formats or parameters (for example, + `connection URIs`__ are only supported from libpq 9.2). Raise + `~psycopg2.ProgrammingError` if the *dsn* is not valid. + + .. __: http://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-CONNSTRING Example:: >>> from psycopg2.extensions import parse_dsn >>> parse_dsn('dbname=test user=postgres password=secret') {'password': 'secret', 'user': 'postgres', 'dbname': 'test'} + >>> parse_dsn("postgresql://someone@example.com/somedb?connect_timeout=10") + {'host': 'example.com', 'user': 'someone', 'dbname': 'somedb', 'connect_timeout': '10'} .. versionadded:: 2.7 + .. seealso:: libpq docs for `PQconninfoParse()`__. + + .. __: http://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-PQCONNINFOPARSE + .. function:: quote_ident(str, scope) diff --git a/lib/extensions.py b/lib/extensions.py index 469f1932..39cc4de6 100644 --- a/lib/extensions.py +++ b/lib/extensions.py @@ -160,8 +160,9 @@ def make_dsn(dsn=None, **kwargs): if dsn is None and not kwargs: raise TypeError('missing dsn and no parameters') - # If no kwarg is specified don't mung the dsn + # If no kwarg is specified don't mung the dsn, but verify it if not kwargs: + parse_dsn(dsn) return dsn # Override the dsn with the parameters @@ -178,6 +179,10 @@ def make_dsn(dsn=None, **kwargs): dsn = " ".join(["%s=%s" % (k, _param_escape(str(v))) for (k, v) in kwargs.iteritems()]) + + # verify that the returned dsn is valid + parse_dsn(dsn) + return dsn diff --git a/psycopg/psycopgmodule.c b/psycopg/psycopgmodule.c index 24dd5f75..55f8ed95 100644 --- a/psycopg/psycopgmodule.c +++ b/psycopg/psycopgmodule.c @@ -133,7 +133,7 @@ psyco_parse_dsn(PyObject *self, PyObject *args, PyObject *kwargs) options = PQconninfoParse(Bytes_AS_STRING(dsn), &err); if (options == NULL) { if (err != NULL) { - PyErr_Format(ProgrammingError, "error parsing the dsn: %s", err); + PyErr_Format(ProgrammingError, "invalid dsn: %s", err); PQfreemem(err); } else { PyErr_SetString(OperationalError, "PQconninfoParse() failed"); diff --git a/tests/test_connection.py b/tests/test_connection.py index f2ea3d67..0158f5cc 100755 --- a/tests/test_connection.py +++ b/tests/test_connection.py @@ -388,53 +388,64 @@ class MakeDsnTestCase(ConnectingTestCase): dsn = ext.make_dsn('') self.assertEqual(dsn, '') + def test_params_validation(self): + self.assertRaises(psycopg2.ProgrammingError, + ext.make_dsn, 'dbnamo=a') + self.assertRaises(psycopg2.ProgrammingError, + ext.make_dsn, dbnamo='a') + self.assertRaises(psycopg2.ProgrammingError, + ext.make_dsn, 'dbname=a', nosuchparam='b') + def test_empty_param(self): - dsn = ext.make_dsn(database='sony', password='') + dsn = ext.make_dsn(dbname='sony', password='') self.assertDsnEqual(dsn, "dbname=sony password=''") def test_escape(self): - dsn = ext.make_dsn(database='hello world') + dsn = ext.make_dsn(dbname='hello world') self.assertEqual(dsn, "dbname='hello world'") - dsn = ext.make_dsn(database=r'back\slash') + dsn = ext.make_dsn(dbname=r'back\slash') self.assertEqual(dsn, r"dbname=back\\slash") - dsn = ext.make_dsn(database="quo'te") + dsn = ext.make_dsn(dbname="quo'te") self.assertEqual(dsn, r"dbname=quo\'te") - dsn = ext.make_dsn(database="with\ttab") + dsn = ext.make_dsn(dbname="with\ttab") self.assertEqual(dsn, "dbname='with\ttab'") - dsn = ext.make_dsn(database=r"\every thing'") + dsn = ext.make_dsn(dbname=r"\every thing'") self.assertEqual(dsn, r"dbname='\\every thing\''") + def test_database_is_a_keyword(self): + self.assertEqual(ext.make_dsn(database='sigh'), "dbname=sigh") + def test_params_merging(self): - dsn = ext.make_dsn('dbname=foo', database='bar') - self.assertEqual(dsn, 'dbname=bar') + dsn = ext.make_dsn('dbname=foo host=bar', host='baz') + self.assertDsnEqual(dsn, 'dbname=foo host=baz') dsn = ext.make_dsn('dbname=foo', user='postgres') self.assertDsnEqual(dsn, 'dbname=foo user=postgres') def test_no_dsn_munging(self): - dsn = ext.make_dsn('nosuchparam=whatevs') - self.assertEqual(dsn, 'nosuchparam=whatevs') - - dsn = ext.make_dsn(nosuchparam='whatevs') - self.assertEqual(dsn, 'nosuchparam=whatevs') - - self.assertRaises(psycopg2.ProgrammingError, - ext.make_dsn, 'nosuchparam=whatevs', andthis='either') + dsnin = 'dbname=a host=b user=c password=d' + dsn = ext.make_dsn(dsnin) + self.assertEqual(dsn, dsnin) @skip_before_libpq(9, 2) def test_url_is_cool(self): - dsn = ext.make_dsn('postgresql://tester:secret@/test') - self.assertEqual(dsn, 'postgresql://tester:secret@/test') + url = 'postgresql://tester:secret@/test?application_name=wat' + dsn = ext.make_dsn(url) + self.assertEqual(dsn, url) - dsn = ext.make_dsn('postgresql://tester:secret@/test', - application_name='woot') + dsn = ext.make_dsn(url, application_name='woot') self.assertDsnEqual(dsn, 'dbname=test user=tester password=secret application_name=woot') + self.assertRaises(psycopg2.ProgrammingError, + ext.make_dsn, 'postgresql://tester:secret@/test?nosuch=param') + self.assertRaises(psycopg2.ProgrammingError, + ext.make_dsn, url, nosuch="param") + class IsolationLevelsTestCase(ConnectingTestCase): diff --git a/tests/test_module.py b/tests/test_module.py index 9f0adcc9..a6918cb0 100755 --- a/tests/test_module.py +++ b/tests/test_module.py @@ -62,8 +62,8 @@ class ConnectTestCase(unittest.TestCase): self.assertEqual(self.args[2], False) def test_dsn(self): - psycopg2.connect('dbname=blah x=y') - self.assertEqual(self.args[0], 'dbname=blah x=y') + psycopg2.connect('dbname=blah host=y') + self.assertEqual(self.args[0], 'dbname=blah host=y') self.assertEqual(self.args[1], None) self.assertEqual(self.args[2], False) @@ -88,31 +88,31 @@ class ConnectTestCase(unittest.TestCase): self.assertEqual(len(self.args[0].split()), 4) def test_generic_keywords(self): - psycopg2.connect(foo='bar') - self.assertEqual(self.args[0], 'foo=bar') + psycopg2.connect(options='stuff') + self.assertEqual(self.args[0], 'options=stuff') def test_factory(self): def f(dsn, async=False): pass - psycopg2.connect(database='foo', bar='baz', connection_factory=f) - self.assertDsnEqual(self.args[0], 'dbname=foo bar=baz') + psycopg2.connect(database='foo', host='baz', connection_factory=f) + self.assertDsnEqual(self.args[0], 'dbname=foo host=baz') self.assertEqual(self.args[1], f) self.assertEqual(self.args[2], False) - psycopg2.connect("dbname=foo bar=baz", connection_factory=f) - self.assertDsnEqual(self.args[0], 'dbname=foo bar=baz') + psycopg2.connect("dbname=foo host=baz", connection_factory=f) + self.assertDsnEqual(self.args[0], 'dbname=foo host=baz') self.assertEqual(self.args[1], f) self.assertEqual(self.args[2], False) def test_async(self): - psycopg2.connect(database='foo', bar='baz', async=1) - self.assertDsnEqual(self.args[0], 'dbname=foo bar=baz') + psycopg2.connect(database='foo', host='baz', async=1) + self.assertDsnEqual(self.args[0], 'dbname=foo host=baz') self.assertEqual(self.args[1], None) self.assert_(self.args[2]) - psycopg2.connect("dbname=foo bar=baz", async=True) - self.assertDsnEqual(self.args[0], 'dbname=foo bar=baz') + psycopg2.connect("dbname=foo host=baz", async=True) + self.assertDsnEqual(self.args[0], 'dbname=foo host=baz') self.assertEqual(self.args[1], None) self.assert_(self.args[2]) @@ -143,16 +143,6 @@ class ConnectTestCase(unittest.TestCase): psycopg2.connect('dbname=foo', user='postgres') self.assertDsnEqual(self.args[0], 'dbname=foo user=postgres') - def test_no_dsn_munging(self): - psycopg2.connect('nosuchparam=whatevs') - self.assertEqual(self.args[0], 'nosuchparam=whatevs') - - psycopg2.connect(nosuchparam='whatevs') - self.assertEqual(self.args[0], 'nosuchparam=whatevs') - - self.assertRaises(psycopg2.ProgrammingError, - psycopg2.connect, 'nosuchparam=whatevs', andthis='either') - class ExceptionsTestCase(ConnectingTestCase): def test_attributes(self): From c9fd828f8adeeed108a326892394c47e3747b558 Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Thu, 3 Mar 2016 16:58:24 +0000 Subject: [PATCH 070/151] Allow make_dsn to take no parameter The behaviour of connect() is unchanged: either dsn or params must be specified. --- doc/src/extensions.rst | 7 ++----- lib/__init__.py | 3 +++ lib/extensions.py | 2 +- tests/test_connection.py | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/doc/src/extensions.rst b/doc/src/extensions.rst index fa69d628..b661895d 100644 --- a/doc/src/extensions.rst +++ b/doc/src/extensions.rst @@ -497,11 +497,8 @@ Other functions Put together the arguments in *kwargs* into a connection string. If *dsn* is specified too, merge the arguments coming from both the sources. If the - same argument is specified in both the sources, the *kwargs* version - overrides the *dsn* version. - - At least one parameter is required (either *dsn* or any keyword). Note - that the empty string is a valid connection string. + same argument name is specified in both the sources, the *kwargs* value + overrides the *dsn* value. The input arguments are validated: the output should always be a valid connection string (as far as `parse_dsn()` is concerned). If not raise diff --git a/lib/__init__.py b/lib/__init__.py index 698f50d6..829e29eb 100644 --- a/lib/__init__.py +++ b/lib/__init__.py @@ -116,6 +116,9 @@ def connect(dsn=None, connection_factory=None, cursor_factory=None, library: the list of supported parameters depends on the library version. """ + if dsn is None and not kwargs: + raise TypeError('missing dsn and no parameters') + dsn = _ext.make_dsn(dsn, **kwargs) conn = _connect(dsn, connection_factory=connection_factory, async=async) if cursor_factory is not None: diff --git a/lib/extensions.py b/lib/extensions.py index 39cc4de6..21300985 100644 --- a/lib/extensions.py +++ b/lib/extensions.py @@ -158,7 +158,7 @@ class NoneAdapter(object): def make_dsn(dsn=None, **kwargs): """Convert a set of keywords into a connection strings.""" if dsn is None and not kwargs: - raise TypeError('missing dsn and no parameters') + return '' # If no kwarg is specified don't mung the dsn, but verify it if not kwargs: diff --git a/tests/test_connection.py b/tests/test_connection.py index 0158f5cc..ddec8cfc 100755 --- a/tests/test_connection.py +++ b/tests/test_connection.py @@ -381,8 +381,8 @@ class MakeDsnTestCase(ConnectingTestCase): def assertDsnEqual(self, dsn1, dsn2): self.assertEqual(set(dsn1.split()), set(dsn2.split())) - def test_there_has_to_be_something(self): - self.assertRaises(TypeError, ext.make_dsn) + def test_empty_arguments(self): + self.assertEqual(ext.make_dsn(), '') def test_empty_string(self): dsn = ext.make_dsn('') From e33073576cc7d238a8a01e6674b1232b928cdd27 Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Thu, 3 Mar 2016 17:02:47 +0000 Subject: [PATCH 071/151] Brag about make_dsn in the NEWS file --- NEWS | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/NEWS b/NEWS index c1e4152f..9467fea7 100644 --- a/NEWS +++ b/NEWS @@ -6,7 +6,10 @@ What's new in psycopg 2.7 New features: -- Added `~psycopg2.extensions.parse_dsn()` function (:ticket:`#321`). +- Added `~psycopg2.extensions.parse_dsn()` and + `~psycopg2.extensions.make_dsn()` functions (:tickets:`#321, #363`). + `~psycopg2.connect()` now can take both *dsn* and keyword arguments, merging + them together. - Added `~psycopg2.__libpq_version__` and `~psycopg2.extensions.libpq_version()` to inspect the version of the ``libpq`` library the module was compiled/loaded with From ab5d8f419069bec1c35329fd67b9fe76fbbce4c8 Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Thu, 3 Mar 2016 17:28:56 +0000 Subject: [PATCH 072/151] Style the dsn arg in connect() as a normal optional parameter Plus some more connect() docs wordsmithing. --- doc/src/module.rst | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/doc/src/module.rst b/doc/src/module.rst index 25a9ba27..97fbdf19 100644 --- a/doc/src/module.rst +++ b/doc/src/module.rst @@ -17,7 +17,7 @@ The module interface respects the standard defined in the |DBAPI|_. single: DSN (Database Source Name) .. function:: - connect(dsn, connection_factory=None, cursor_factory=None, async=False, \*\*kwargs) + connect(dsn=None, connection_factory=None, cursor_factory=None, async=False, \*\*kwargs) Create a new database session and return a new `connection` object. @@ -31,7 +31,9 @@ The module interface respects the standard defined in the |DBAPI|_. conn = psycopg2.connect(dbname"test", user="postgres", password="secret") or using a mix of both: if the same parameter name is specified in both - sources the *kwargs* value will have precedence over the *dsn* value. + sources, the *kwargs* value will have precedence over the *dsn* value. + Note that either the *dsn* or at least one connection-related keyword + argument is required. The basic connection parameters are: @@ -42,7 +44,7 @@ The module interface respects the standard defined in the |DBAPI|_. - `!port` -- connection port number (defaults to 5432 if not provided) Any other connection parameter supported by the client library/server can - be passed either in the connection string or as keywords. The PostgreSQL + be passed either in the connection string or as a keyword. The PostgreSQL documentation contains the complete list of the `supported parameters`__. Also note that the same parameters can be passed to the client library using `environment variables`__. @@ -87,8 +89,8 @@ The module interface respects the standard defined in the |DBAPI|_. .. extension:: - The parameters *connection_factory* and *async* are Psycopg extensions - to the |DBAPI|. + The non-connection-related keyword parameters are Psycopg extensions + to the |DBAPI|_. .. data:: apilevel From da6e061ee8c6e3a5cad4386daf5aa814cdb7be80 Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Tue, 8 Mar 2016 15:44:29 +0100 Subject: [PATCH 073/151] Use python-defined make_dsn() for ReplicationConnection class --- lib/__init__.py | 2 +- lib/extras.py | 15 +- psycopg/cursor.h | 2 - psycopg/psycopg.h | 5 - psycopg/psycopgmodule.c | 209 +------------------------- psycopg/replication_connection.h | 6 +- psycopg/replication_connection_type.c | 109 +++++++------- tests/test_module.py | 2 +- 8 files changed, 73 insertions(+), 277 deletions(-) diff --git a/lib/__init__.py b/lib/__init__.py index 2cc0acb2..829e29eb 100644 --- a/lib/__init__.py +++ b/lib/__init__.py @@ -56,7 +56,7 @@ from psycopg2._psycopg import Error, Warning, DataError, DatabaseError, Programm from psycopg2._psycopg import IntegrityError, InterfaceError, InternalError from psycopg2._psycopg import NotSupportedError, OperationalError -from psycopg2._psycopg import _connect, parse_args, apilevel, threadsafety, paramstyle +from psycopg2._psycopg import _connect, apilevel, threadsafety, paramstyle from psycopg2._psycopg import __version__, __libpq_version__ from psycopg2 import tz diff --git a/lib/extras.py b/lib/extras.py index 6e815d69..78452239 100644 --- a/lib/extras.py +++ b/lib/extras.py @@ -441,25 +441,14 @@ class MinTimeLoggingCursor(LoggingCursor): return LoggingCursor.callproc(self, procname, vars) -class ReplicationConnectionBase(_replicationConnection): - """ - Base class for Logical and Physical replication connection - classes. Uses `ReplicationCursor` automatically. - """ - - def __init__(self, *args, **kwargs): - super(ReplicationConnectionBase, self).__init__(*args, **kwargs) - self.cursor_factory = ReplicationCursor - - -class LogicalReplicationConnection(ReplicationConnectionBase): +class LogicalReplicationConnection(_replicationConnection): def __init__(self, *args, **kwargs): kwargs['replication_type'] = REPLICATION_LOGICAL super(LogicalReplicationConnection, self).__init__(*args, **kwargs) -class PhysicalReplicationConnection(ReplicationConnectionBase): +class PhysicalReplicationConnection(_replicationConnection): def __init__(self, *args, **kwargs): kwargs['replication_type'] = REPLICATION_PHYSICAL diff --git a/psycopg/cursor.h b/psycopg/cursor.h index 44d8a47a..5170900f 100644 --- a/psycopg/cursor.h +++ b/psycopg/cursor.h @@ -72,8 +72,6 @@ struct cursorObject { #define DEFAULT_COPYSIZE 16384 #define DEFAULT_COPYBUFF 8192 - /* replication cursor attrs */ - PyObject *tuple_factory; /* factory for result tuples */ PyObject *tzinfo_factory; /* factory for tzinfo objects */ diff --git a/psycopg/psycopg.h b/psycopg/psycopg.h index 8134a83f..7834cf67 100644 --- a/psycopg/psycopg.h +++ b/psycopg/psycopg.h @@ -120,11 +120,6 @@ typedef struct connectionObject connectionObject; typedef struct replicationMessageObject replicationMessageObject; /* some utility functions */ -HIDDEN PyObject *parse_arg(int pos, char *name, PyObject *defval, PyObject *args, PyObject *kwargs); -HIDDEN PyObject *psyco_parse_args(PyObject *self, PyObject *args, PyObject *kwargs); -HIDDEN PyObject *psyco_parse_dsn(PyObject *self, PyObject *args, PyObject *kwargs); -HIDDEN PyObject *psyco_make_dsn(PyObject *self, PyObject *args, PyObject *kwargs); - RAISES HIDDEN PyObject *psyco_set_error(PyObject *exc, cursorObject *curs, const char *msg); HIDDEN char *psycopg_escape_string(connectionObject *conn, diff --git a/psycopg/psycopgmodule.c b/psycopg/psycopgmodule.c index fc9a8ebd..5e8eb5b7 100644 --- a/psycopg/psycopgmodule.c +++ b/psycopg/psycopgmodule.c @@ -74,103 +74,23 @@ HIDDEN PyObject *psyco_null = NULL; HIDDEN PyObject *psyco_DescriptionType = NULL; -/* finds a keyword or positional arg (pops it from kwargs if found there) */ -PyObject * -parse_arg(int pos, char *name, PyObject *defval, PyObject *args, PyObject *kwargs) -{ - Py_ssize_t nargs = PyTuple_GET_SIZE(args); - PyObject *val = NULL; - - if (kwargs && PyMapping_HasKeyString(kwargs, name)) { - val = PyMapping_GetItemString(kwargs, name); - Py_XINCREF(val); - PyMapping_DelItemString(kwargs, name); /* pop from the kwargs dict! */ - } - if (nargs > pos) { - if (!val) { - val = PyTuple_GET_ITEM(args, pos); - Py_XINCREF(val); - } else { - PyErr_Format(PyExc_TypeError, - "parse_args() got multiple values for keyword argument '%s'", name); - return NULL; - } - } - if (!val) { - val = defval; - Py_XINCREF(val); - } - - return val; -} - - -#define psyco_parse_args_doc \ -"parse_args(...) -- parse connection parameters.\n\n" \ -"Return a tuple of (dsn, connection_factory, async)" - -PyObject * -psyco_parse_args(PyObject *self, PyObject *args, PyObject *kwargs) -{ - Py_ssize_t nargs = PyTuple_GET_SIZE(args); - PyObject *dsn = NULL; - PyObject *factory = NULL; - PyObject *async = NULL; - PyObject *res = NULL; - - if (nargs > 3) { - PyErr_Format(PyExc_TypeError, - "parse_args() takes at most 3 arguments (%d given)", (int)nargs); - goto exit; - } - /* parse and remove all keywords we know, so they are not interpreted as part of DSN */ - if (!(dsn = parse_arg(0, "dsn", Py_None, args, kwargs))) { goto exit; } - if (!(factory = parse_arg(1, "connection_factory", Py_None, - args, kwargs))) { goto exit; } - if (!(async = parse_arg(2, "async", Py_False, args, kwargs))) { goto exit; } - - if (kwargs && PyMapping_Size(kwargs) > 0) { - if (dsn == Py_None) { - Py_DECREF(dsn); - if (!(dsn = psyco_make_dsn(NULL, NULL, kwargs))) { goto exit; } - } else { - PyErr_SetString(PyExc_TypeError, "both dsn and parameters given"); - goto exit; - } - } else { - if (dsn == Py_None) { - PyErr_SetString(PyExc_TypeError, "missing dsn and no parameters"); - goto exit; - } - } - - res = PyTuple_Pack(3, dsn, factory, async); - -exit: - Py_XDECREF(dsn); - Py_XDECREF(factory); - Py_XDECREF(async); - - return res; -} - - /** connect module-level function **/ #define psyco_connect_doc \ -"_connect(dsn, [connection_factory], [async], **kwargs) -- New database connection.\n\n" +"_connect(dsn, [connection_factory], [async]) -- New database connection.\n\n" static PyObject * psyco_connect(PyObject *self, PyObject *args, PyObject *keywds) { PyObject *conn = NULL; - PyObject *tuple = NULL; PyObject *factory = NULL; const char *dsn = NULL; int async = 0; - if (!(tuple = psyco_parse_args(self, args, keywds))) { goto exit; } - - if (!PyArg_ParseTuple(tuple, "s|Oi", &dsn, &factory, &async)) { goto exit; } + static char *kwlist[] = {"dsn", "connection_factory", "async", NULL}; + if (!PyArg_ParseTupleAndKeywords(args, keywds, "s|Oi", kwlist, + &dsn, &factory, &async)) { + return NULL; + } Dprintf("psyco_connect: dsn = '%s', async = %d", dsn, async); @@ -192,9 +112,6 @@ psyco_connect(PyObject *self, PyObject *args, PyObject *keywds) conn = PyObject_CallFunction(factory, "si", dsn, async); } -exit: - Py_XDECREF(tuple); - return conn; } @@ -202,7 +119,7 @@ exit: #define psyco_parse_dsn_doc \ "parse_dsn(dsn) -> dict -- parse a connection string into parameters" -PyObject * +static PyObject * psyco_parse_dsn(PyObject *self, PyObject *args, PyObject *kwargs) { char *err = NULL; @@ -254,114 +171,6 @@ exit: } -#define psyco_make_dsn_doc "make_dsn(**kwargs) -> str" - -PyObject * -psyco_make_dsn(PyObject *self, PyObject *args, PyObject *kwargs) -{ - Py_ssize_t len, pos; - PyObject *res = NULL; - PyObject *key = NULL, *value = NULL; - PyObject *newkey, *newval; - PyObject *dict = NULL; - char *str = NULL, *p, *q; - - if (args && (len = PyTuple_Size(args)) > 0) { - PyErr_Format(PyExc_TypeError, "make_dsn() takes no arguments (%d given)", (int)len); - goto exit; - } - if (kwargs == NULL) { - return Text_FromUTF8(""); - } - - /* iterate through kwargs, calculating the total resulting string - length and saving prepared key/values to a temp. dict */ - if (!(dict = PyDict_New())) { goto exit; } - - len = 0; - pos = 0; - while (PyDict_Next(kwargs, &pos, &key, &value)) { - if (value == NULL || value == Py_None) { continue; } - - Py_INCREF(key); /* for ensure_bytes */ - if (!(newkey = psycopg_ensure_bytes(key))) { goto exit; } - - /* special handling of 'database' keyword */ - if (strcmp(Bytes_AsString(newkey), "database") == 0) { - key = Bytes_FromString("dbname"); - Py_DECREF(newkey); - } else { - key = newkey; - } - - /* now transform the value */ - if (Bytes_CheckExact(value)) { - Py_INCREF(value); - } else if (PyUnicode_CheckExact(value)) { - if (!(value = PyUnicode_AsUTF8String(value))) { goto exit; } - } else { - /* this could be port=5432, so we need to get the text representation */ - if (!(value = PyObject_Str(value))) { goto exit; } - /* and still ensure it's bytes() (but no need to incref here) */ - if (!(value = psycopg_ensure_bytes(value))) { goto exit; } - } - - /* passing NULL for plen checks for NIL bytes in content and errors out */ - if (Bytes_AsStringAndSize(value, &str, NULL) < 0) { goto exit; } - /* escape any special chars */ - if (!(str = psycopg_escape_conninfo(str, 0))) { goto exit; } - if (!(newval = Bytes_FromString(str))) { - goto exit; - } - PyMem_Free(str); - str = NULL; - Py_DECREF(value); - value = newval; - - /* finally put into the temp. dict */ - if (PyDict_SetItem(dict, key, value) < 0) { goto exit; } - - len += Bytes_GET_SIZE(key) + Bytes_GET_SIZE(value) + 2; /* =, space or NIL */ - - Py_DECREF(key); - Py_DECREF(value); - } - key = NULL; - value = NULL; - - if (!(str = PyMem_Malloc(len))) { - PyErr_NoMemory(); - goto exit; - } - - p = str; - pos = 0; - while (PyDict_Next(dict, &pos, &newkey, &newval)) { - if (p != str) { - *(p++) = ' '; - } - if (Bytes_AsStringAndSize(newkey, &q, &len) < 0) { goto exit; } - strncpy(p, q, len); - p += len; - *(p++) = '='; - if (Bytes_AsStringAndSize(newval, &q, &len) < 0) { goto exit; } - strncpy(p, q, len); - p += len; - } - *p = '\0'; - - res = Text_FromUTF8AndSize(str, p - str); - -exit: - PyMem_Free(str); - Py_XDECREF(key); - Py_XDECREF(value); - Py_XDECREF(dict); - - return res; -} - - #define psyco_quote_ident_doc \ "quote_ident(str, conn_or_curs) -> str -- wrapper around PQescapeIdentifier\n\n" \ ":Parameters:\n" \ @@ -1016,12 +825,8 @@ error: static PyMethodDef psycopgMethods[] = { {"_connect", (PyCFunction)psyco_connect, METH_VARARGS|METH_KEYWORDS, psyco_connect_doc}, - {"parse_args", (PyCFunction)psyco_parse_args, - METH_VARARGS|METH_KEYWORDS, psyco_parse_args_doc}, {"parse_dsn", (PyCFunction)psyco_parse_dsn, METH_VARARGS|METH_KEYWORDS, psyco_parse_dsn_doc}, - {"make_dsn", (PyCFunction)psyco_make_dsn, - METH_VARARGS|METH_KEYWORDS, psyco_make_dsn_doc}, {"quote_ident", (PyCFunction)psyco_quote_ident, METH_VARARGS|METH_KEYWORDS, psyco_quote_ident_doc}, {"adapt", (PyCFunction)psyco_microprotocols_adapt, diff --git a/psycopg/replication_connection.h b/psycopg/replication_connection.h index 9198f5de..e693038a 100644 --- a/psycopg/replication_connection.h +++ b/psycopg/replication_connection.h @@ -40,8 +40,10 @@ typedef struct replicationConnectionObject { long int type; } replicationConnectionObject; -#define REPLICATION_PHYSICAL 1 -#define REPLICATION_LOGICAL 2 +/* The funny constant values should help to avoid mixups with some + commonly used numbers like 1 and 2. */ +#define REPLICATION_PHYSICAL 12345678 +#define REPLICATION_LOGICAL 87654321 extern HIDDEN PyObject *replicationPhysicalConst; extern HIDDEN PyObject *replicationLogicalConst; diff --git a/psycopg/replication_connection_type.c b/psycopg/replication_connection_type.c index 154a0ddd..5e5d2229 100644 --- a/psycopg/replication_connection_type.c +++ b/psycopg/replication_connection_type.c @@ -58,81 +58,88 @@ psyco_repl_conn_get_type(replicationConnectionObject *self) return res; } + static int replicationConnection_init(PyObject *obj, PyObject *args, PyObject *kwargs) { replicationConnectionObject *self = (replicationConnectionObject *)obj; - PyObject *dsn = NULL; - PyObject *async = NULL; - PyObject *tmp = NULL; - const char *repl = NULL; + PyObject *dsn = NULL, *replication_type = NULL, + *item = NULL, *ext = NULL, *make_dsn = NULL, + *extras = NULL, *cursor = NULL; + int async = 0; int ret = -1; - Py_XINCREF(args); - Py_XINCREF(kwargs); + /* 'replication_type' is not actually optional, but there's no + good way to put it before 'async' in the list */ + static char *kwlist[] = {"dsn", "async", "replication_type", NULL}; + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|iO", kwlist, + &dsn, &async, &replication_type)) { return ret; } - /* dsn, async, replication_type */ - if (!(dsn = parse_arg(0, "dsn", Py_None, args, kwargs))) { goto exit; } - if (!(async = parse_arg(1, "async", Py_False, args, kwargs))) { goto exit; } - if (!(tmp = parse_arg(2, "replication_type", Py_None, args, kwargs))) { goto exit; } + /* + We have to call make_dsn() to add replication-specific + connection parameters, because the DSN might be an URI (if there + were no keyword arguments to connect() it is passed unchanged). + */ + /* we reuse args and kwargs to call make_dsn() and parent type's tp_init() */ + if (!(kwargs = PyDict_New())) { return ret; } + Py_INCREF(args); - if (tmp == replicationPhysicalConst) { + /* we also reuse the dsn to hold the result of the make_dsn() call */ + Py_INCREF(dsn); + + if (!(ext = PyImport_ImportModule("psycopg2.extensions"))) { goto exit; } + if (!(make_dsn = PyObject_GetAttrString(ext, "make_dsn"))) { goto exit; } + + /* all the nice stuff is located in python-level ReplicationCursor class */ + if (!(extras = PyImport_ImportModule("psycopg2.extras"))) { goto exit; } + if (!(cursor = PyObject_GetAttrString(extras, "ReplicationCursor"))) { goto exit; } + + /* checking the object reference helps to avoid recognizing + unrelated integer constants as valid input values */ + if (replication_type == replicationPhysicalConst) { self->type = REPLICATION_PHYSICAL; - repl = "true"; - } else if (tmp == replicationLogicalConst) { + +#define SET_ITEM(k, v) \ + if (!(item = Text_FromUTF8(#v))) { goto exit; } \ + if (PyDict_SetItemString(kwargs, #k, item) != 0) { goto exit; } \ + Py_DECREF(item); \ + item = NULL; + + SET_ITEM(replication, true); + SET_ITEM(dbname, replication); /* required for .pgpass lookup */ + } else if (replication_type == replicationLogicalConst) { self->type = REPLICATION_LOGICAL; - repl = "database"; + + SET_ITEM(replication, database); +#undef SET_ITEM } else { PyErr_SetString(PyExc_TypeError, "replication_type must be either REPLICATION_PHYSICAL or REPLICATION_LOGICAL"); goto exit; } - Py_DECREF(tmp); - tmp = NULL; - - if (dsn != Py_None) { - if (kwargs && PyMapping_Size(kwargs) > 0) { - PyErr_SetString(PyExc_TypeError, "both dsn and parameters given"); - goto exit; - } else { - if (!(tmp = PyTuple_Pack(1, dsn))) { goto exit; } - - Py_XDECREF(kwargs); - if (!(kwargs = psyco_parse_dsn(NULL, tmp, NULL))) { goto exit; } - } - } else { - if (!(kwargs && PyMapping_Size(kwargs) > 0)) { - PyErr_SetString(PyExc_TypeError, "missing dsn and no parameters"); - goto exit; - } - } - - if (!PyMapping_HasKeyString(kwargs, "replication")) { - PyMapping_SetItemString(kwargs, "replication", Text_FromUTF8(repl)); - } - /* with physical specify dbname=replication for .pgpass lookup */ - if (self->type == REPLICATION_PHYSICAL) { - PyMapping_SetItemString(kwargs, "dbname", Text_FromUTF8("replication")); - } - - Py_DECREF(dsn); - if (!(dsn = psyco_make_dsn(NULL, NULL, kwargs))) { goto exit; } Py_DECREF(args); - Py_DECREF(kwargs); - kwargs = NULL; - if (!(args = PyTuple_Pack(2, dsn, async))) { goto exit; } + if (!(args = PyTuple_Pack(1, dsn))) { goto exit; } + Py_DECREF(dsn); + if (!(dsn = PyObject_Call(make_dsn, args, kwargs))) { goto exit; } + + Py_DECREF(args); + if (!(args = Py_BuildValue("(Oi)", dsn, async))) { goto exit; } + + /* only attempt the connection once we've handled all possible errors */ if ((ret = connectionType.tp_init(obj, args, NULL)) < 0) { goto exit; } self->conn.autocommit = 1; - self->conn.cursor_factory = (PyObject *)&replicationCursorType; - Py_INCREF(self->conn.cursor_factory); + Py_INCREF(self->conn.cursor_factory = cursor); exit: - Py_XDECREF(tmp); + Py_XDECREF(item); + Py_XDECREF(ext); + Py_XDECREF(make_dsn); + Py_XDECREF(extras); + Py_XDECREF(cursor); Py_XDECREF(dsn); - Py_XDECREF(async); Py_XDECREF(args); Py_XDECREF(kwargs); diff --git a/tests/test_module.py b/tests/test_module.py index 7d4ae9a3..1a9a19d4 100755 --- a/tests/test_module.py +++ b/tests/test_module.py @@ -40,7 +40,7 @@ class ConnectTestCase(unittest.TestCase): self.args = (dsn, connection_factory, async) self._connect_orig = psycopg2._connect - psycopg2._connect = connect_stub + psycopg2._connect = conect_stub def tearDown(self): psycopg2._connect = self._connect_orig From 1d52f34e6045648397709c74b3c2538404dc679a Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Tue, 8 Mar 2016 18:23:32 +0100 Subject: [PATCH 074/151] We don't need to expose cursor_init(), call tp_init() on the type instead. --- psycopg/cursor.h | 3 --- psycopg/cursor_type.c | 2 +- psycopg/replication_cursor_type.c | 13 ++++--------- 3 files changed, 5 insertions(+), 13 deletions(-) diff --git a/psycopg/cursor.h b/psycopg/cursor.h index 5170900f..e291d45f 100644 --- a/psycopg/cursor.h +++ b/psycopg/cursor.h @@ -90,14 +90,11 @@ struct cursorObject { /* C-callable functions in cursor_int.c and cursor_type.c */ -HIDDEN int cursor_init(PyObject *obj, PyObject *args, PyObject *kwargs); - BORROWED HIDDEN PyObject *curs_get_cast(cursorObject *self, PyObject *oid); HIDDEN void curs_reset(cursorObject *self); HIDDEN int psyco_curs_withhold_set(cursorObject *self, PyObject *pyvalue); HIDDEN int psyco_curs_scrollable_set(cursorObject *self, PyObject *pyvalue); - /* exception-raising macros */ #define EXC_IF_CURS_CLOSED(self) \ do { \ diff --git a/psycopg/cursor_type.c b/psycopg/cursor_type.c index 63bd5a10..cd8d5ca3 100644 --- a/psycopg/cursor_type.c +++ b/psycopg/cursor_type.c @@ -1901,7 +1901,7 @@ cursor_dealloc(PyObject* obj) Py_TYPE(obj)->tp_free(obj); } -int +static int cursor_init(PyObject *obj, PyObject *args, PyObject *kwargs) { PyObject *conn; diff --git a/psycopg/replication_cursor_type.c b/psycopg/replication_cursor_type.c index f652984e..8d96c0e1 100644 --- a/psycopg/replication_cursor_type.c +++ b/psycopg/replication_cursor_type.c @@ -244,8 +244,10 @@ static struct PyGetSetDef replicationCursorObject_getsets[] = { }; static int -replicationCursor_setup(replicationCursorObject* self) +replicationCursor_init(PyObject *obj, PyObject *args, PyObject *kwargs) { + replicationCursorObject *self = (replicationCursorObject *)obj; + self->started = 0; self->consuming = 0; self->decode = 0; @@ -254,14 +256,7 @@ replicationCursor_setup(replicationCursorObject* self) self->flush_lsn = 0; self->apply_lsn = 0; - return 0; -} - -static int -replicationCursor_init(PyObject *obj, PyObject *args, PyObject *kwargs) -{ - replicationCursor_setup((replicationCursorObject *)obj); - return cursor_init(obj, args, kwargs); + return cursorType.tp_init(obj, args, kwargs); } static PyObject * From 2de2ed7c6354fb640b43ce24cf45bdb5183fd408 Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Tue, 8 Mar 2016 18:35:55 +0100 Subject: [PATCH 075/151] Remove some dead code --- psycopg/psycopg.h | 2 -- psycopg/psycopgmodule.c | 2 +- psycopg/utils.c | 44 ----------------------------------------- 3 files changed, 1 insertion(+), 47 deletions(-) diff --git a/psycopg/psycopg.h b/psycopg/psycopg.h index 7834cf67..adda12d9 100644 --- a/psycopg/psycopg.h +++ b/psycopg/psycopg.h @@ -125,8 +125,6 @@ RAISES HIDDEN PyObject *psyco_set_error(PyObject *exc, cursorObject *curs, const HIDDEN char *psycopg_escape_string(connectionObject *conn, const char *from, Py_ssize_t len, char *to, Py_ssize_t *tolen); HIDDEN char *psycopg_escape_identifier_easy(const char *from, Py_ssize_t len); -HIDDEN char *psycopg_escape_conninfo(const char *from, Py_ssize_t len); - HIDDEN int psycopg_strdup(char **to, const char *from, Py_ssize_t len); HIDDEN int psycopg_is_text_file(PyObject *f); diff --git a/psycopg/psycopgmodule.c b/psycopg/psycopgmodule.c index 5e8eb5b7..c08cd70e 100644 --- a/psycopg/psycopgmodule.c +++ b/psycopg/psycopgmodule.c @@ -73,7 +73,6 @@ HIDDEN PyObject *psyco_null = NULL; /* The type of the cursor.description items */ HIDDEN PyObject *psyco_DescriptionType = NULL; - /** connect module-level function **/ #define psyco_connect_doc \ "_connect(dsn, [connection_factory], [async]) -- New database connection.\n\n" @@ -87,6 +86,7 @@ psyco_connect(PyObject *self, PyObject *args, PyObject *keywds) int async = 0; static char *kwlist[] = {"dsn", "connection_factory", "async", NULL}; + if (!PyArg_ParseTupleAndKeywords(args, keywds, "s|Oi", kwlist, &dsn, &factory, &async)) { return NULL; diff --git a/psycopg/utils.c b/psycopg/utils.c index e9dc3ba6..ec8e47c8 100644 --- a/psycopg/utils.c +++ b/psycopg/utils.c @@ -124,50 +124,6 @@ psycopg_escape_identifier_easy(const char *from, Py_ssize_t len) return rv; } -char * -psycopg_escape_conninfo(const char *from, Py_ssize_t len) -{ - char *rv = NULL; - const char *src; - const char *end; - char *dst; - int space = 0; - - if (!len) { len = strlen(from); } - end = from + len; - - if (!(rv = PyMem_Malloc(3 + 2 * len))) { - PyErr_NoMemory(); - return NULL; - } - - /* check for any whitespace or empty string */ - if (from < end && *from) { - for (src = from; src < end && *src; ++src) { - if (isspace(*src)) { - space = 1; - break; - } - } - } else { - /* empty string: we should produce '' */ - space = 1; - } - - dst = rv; - if (space) { *(dst++) = '\''; } - /* scan and copy */ - for (src = from; src < end && *src; ++src, ++dst) { - if (*src == '\'' || *src == '\\') - *(dst++) = '\\'; - *dst = *src; - } - if (space) { *(dst++) = '\''; } - *dst = '\0'; - - return rv; -} - /* Duplicate a string. * * Allocate a new buffer on the Python heap containing the new string. From b21c8f7a4e4c64795e8a10c30f68531ad1d9580f Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Tue, 8 Mar 2016 18:34:22 +0100 Subject: [PATCH 076/151] Move replication-related imports to extras.py --- lib/extensions.py | 2 -- lib/extras.py | 8 ++++---- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/lib/extensions.py b/lib/extensions.py index 65792fc7..21300985 100644 --- a/lib/extensions.py +++ b/lib/extensions.py @@ -65,8 +65,6 @@ from psycopg2._psycopg import string_types, binary_types, new_type, new_array_ty from psycopg2._psycopg import ISQLQuote, Notify, Diagnostics, Column from psycopg2._psycopg import QueryCanceledError, TransactionRollbackError -from psycopg2._psycopg import REPLICATION_PHYSICAL, REPLICATION_LOGICAL -from psycopg2._psycopg import ReplicationConnection, ReplicationCursor, ReplicationMessage try: from psycopg2._psycopg import set_wait_callback, get_wait_callback diff --git a/lib/extras.py b/lib/extras.py index 78452239..6ae98517 100644 --- a/lib/extras.py +++ b/lib/extras.py @@ -39,12 +39,12 @@ import psycopg2 from psycopg2 import extensions as _ext from psycopg2.extensions import cursor as _cursor from psycopg2.extensions import connection as _connection -from psycopg2.extensions import REPLICATION_PHYSICAL, REPLICATION_LOGICAL -from psycopg2.extensions import ReplicationConnection as _replicationConnection -from psycopg2.extensions import ReplicationCursor as _replicationCursor -from psycopg2.extensions import ReplicationMessage from psycopg2.extensions import adapt as _A, quote_ident from psycopg2.extensions import b +from psycopg2._psycopg import REPLICATION_PHYSICAL, REPLICATION_LOGICAL +from psycopg2._psycopg import ReplicationConnection as _replicationConnection +from psycopg2._psycopg import ReplicationCursor as _replicationCursor +from psycopg2._psycopg import ReplicationMessage class DictCursorBase(_cursor): From 3f10b4dd315e6d86813e302a6ed7d0143b7484ec Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Tue, 8 Mar 2016 18:27:57 +0100 Subject: [PATCH 077/151] Remove duplicated doc for make_dsn() --- doc/src/extensions.rst | 22 ---------------------- 1 file changed, 22 deletions(-) diff --git a/doc/src/extensions.rst b/doc/src/extensions.rst index 1a0e154e..b661895d 100644 --- a/doc/src/extensions.rst +++ b/doc/src/extensions.rst @@ -19,28 +19,6 @@ Instances of these classes are usually returned by factory functions or attributes. Their definitions are exposed here to allow subclassing, introspection etc. -.. function:: make_dsn(**kwargs) - - Wrap keyword parameters into a connection string, applying necessary - quoting and escaping any special characters (namely, single quote and - backslash). - - Example (note the order of parameters in the resulting string is - arbitrary):: - - >>> psycopg2.extensions.make_dsn(dbname='test', user='postgres', password='secret') - 'user=postgres dbname=test password=secret' - - As a special case, the *database* keyword is translated to *dbname*:: - - >>> psycopg2.extensions.make_dsn(database='test') - 'dbname=test' - - An example of quoting (using `print()` for clarity):: - - >>> print(psycopg2.extensions.make_dsn(database='test', password="some\\thing ''special")) - password='some\\thing \'\'special' dbname=test - .. class:: connection(dsn, async=False) Is the class usually returned by the `~psycopg2.connect()` function. From d829a75f2ef1b784a4304c65eb2b21c1a3b2aaf1 Mon Sep 17 00:00:00 2001 From: "Bernhard M. Wiedemann" Date: Wed, 9 Mar 2016 16:32:38 +0100 Subject: [PATCH 078/151] dont claim copyright for future years otherwise, when building from unchanged source in 2018, it would claim Copyright 2018 which is not true Being able to reproduce identical output from identical input is important to Linux distributions --- doc/src/conf.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/doc/src/conf.py b/doc/src/conf.py index 18b81e07..94ffa349 100644 --- a/doc/src/conf.py +++ b/doc/src/conf.py @@ -42,9 +42,7 @@ master_doc = 'index' # General information about the project. project = u'Psycopg' -from datetime import date -year = date.today().year -copyright = u'2001-%s, Federico Di Gregorio, Daniele Varrazzo' % year +copyright = u'2001-2016, Federico Di Gregorio, Daniele Varrazzo' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the From 88d3d7fc7ef761da15dbe7e4f38b4d2c7415ccfb Mon Sep 17 00:00:00 2001 From: Gabriel Linder Date: Wed, 9 Mar 2016 21:51:02 +0100 Subject: [PATCH 079/151] Typo. --- doc/src/advanced.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/src/advanced.rst b/doc/src/advanced.rst index e63fcff1..f2e279f8 100644 --- a/doc/src/advanced.rst +++ b/doc/src/advanced.rst @@ -47,7 +47,7 @@ it is the class where query building, execution and result type-casting into Python variables happens. The `~psycopg2.extras` module contains several examples of :ref:`connection -and cursor sublcasses `. +and cursor subclasses `. .. note:: From 2cdc8d61a2da9f02c5b61daca6c83b61aca386f3 Mon Sep 17 00:00:00 2001 From: Jason Erickson Date: Mon, 8 Jun 2015 11:37:23 -0600 Subject: [PATCH 080/151] Fix Windows 64bit lobject support for very (>2GB) large objects The type 'long' with Windows Visual C is 32bits in size for both 32bit and 64bit platforms. Changed type of variables that could be > 2GB from long to Py_ssize_t. --- psycopg/lobject.h | 4 ++-- psycopg/lobject_int.c | 22 +++++++++++----------- psycopg/lobject_type.c | 20 +++++++++++--------- 3 files changed, 24 insertions(+), 22 deletions(-) diff --git a/psycopg/lobject.h b/psycopg/lobject.h index b9c8c3d8..73cf6192 100644 --- a/psycopg/lobject.h +++ b/psycopg/lobject.h @@ -60,8 +60,8 @@ RAISES_NEG HIDDEN int lobject_export(lobjectObject *self, const char *filename); RAISES_NEG HIDDEN Py_ssize_t lobject_read(lobjectObject *self, char *buf, size_t len); RAISES_NEG HIDDEN Py_ssize_t lobject_write(lobjectObject *self, const char *buf, size_t len); -RAISES_NEG HIDDEN long lobject_seek(lobjectObject *self, long pos, int whence); -RAISES_NEG HIDDEN long lobject_tell(lobjectObject *self); +RAISES_NEG HIDDEN Py_ssize_t lobject_seek(lobjectObject *self, Py_ssize_t pos, int whence); +RAISES_NEG HIDDEN Py_ssize_t lobject_tell(lobjectObject *self); RAISES_NEG HIDDEN int lobject_truncate(lobjectObject *self, size_t len); RAISES_NEG HIDDEN int lobject_close(lobjectObject *self); diff --git a/psycopg/lobject_int.c b/psycopg/lobject_int.c index 8788c100..279ef1e2 100644 --- a/psycopg/lobject_int.c +++ b/psycopg/lobject_int.c @@ -376,12 +376,12 @@ lobject_read(lobjectObject *self, char *buf, size_t len) /* lobject_seek - move the current position in the lo */ -RAISES_NEG long -lobject_seek(lobjectObject *self, long pos, int whence) +RAISES_NEG Py_ssize_t +lobject_seek(lobjectObject *self, Py_ssize_t pos, int whence) { PGresult *pgres = NULL; char *error = NULL; - long where; + Py_ssize_t where; Dprintf("lobject_seek: fd = %d, pos = %ld, whence = %d", self->fd, pos, whence); @@ -391,12 +391,12 @@ lobject_seek(lobjectObject *self, long pos, int whence) #ifdef HAVE_LO64 if (self->conn->server_version < 90300) { - where = (long)lo_lseek(self->conn->pgconn, self->fd, (int)pos, whence); + where = (Py_ssize_t)lo_lseek(self->conn->pgconn, self->fd, (int)pos, whence); } else { - where = lo_lseek64(self->conn->pgconn, self->fd, pos, whence); + where = (Py_ssize_t)lo_lseek64(self->conn->pgconn, self->fd, pos, whence); } #else - where = (long)lo_lseek(self->conn->pgconn, self->fd, (int)pos, whence); + where = (Py_ssize_t)lo_lseek(self->conn->pgconn, self->fd, (int)pos, whence); #endif Dprintf("lobject_seek: where = %ld", where); if (where < 0) @@ -412,12 +412,12 @@ lobject_seek(lobjectObject *self, long pos, int whence) /* lobject_tell - tell the current position in the lo */ -RAISES_NEG long +RAISES_NEG Py_ssize_t lobject_tell(lobjectObject *self) { PGresult *pgres = NULL; char *error = NULL; - long where; + Py_ssize_t where; Dprintf("lobject_tell: fd = %d", self->fd); @@ -426,12 +426,12 @@ lobject_tell(lobjectObject *self) #ifdef HAVE_LO64 if (self->conn->server_version < 90300) { - where = (long)lo_tell(self->conn->pgconn, self->fd); + where = (Py_ssize_t)lo_tell(self->conn->pgconn, self->fd); } else { - where = lo_tell64(self->conn->pgconn, self->fd); + where = (Py_ssize_t)lo_tell64(self->conn->pgconn, self->fd); } #else - where = (long)lo_tell(self->conn->pgconn, self->fd); + where = (Py_ssize_t)lo_tell(self->conn->pgconn, self->fd); #endif Dprintf("lobject_tell: where = %ld", where); if (where < 0) diff --git a/psycopg/lobject_type.c b/psycopg/lobject_type.c index a43325d4..634e76ca 100644 --- a/psycopg/lobject_type.c +++ b/psycopg/lobject_type.c @@ -105,7 +105,7 @@ psyco_lobj_write(lobjectObject *self, PyObject *args) goto exit; } - rv = PyInt_FromLong((long)res); + rv = PyInt_FromSsize_t((Py_ssize_t)res); exit: Py_XDECREF(data); @@ -121,7 +121,7 @@ static PyObject * psyco_lobj_read(lobjectObject *self, PyObject *args) { PyObject *res; - long where, end; + Py_ssize_t where, end; Py_ssize_t size = -1; char *buffer; @@ -165,10 +165,10 @@ psyco_lobj_read(lobjectObject *self, PyObject *args) static PyObject * psyco_lobj_seek(lobjectObject *self, PyObject *args) { - long offset, pos=0; + Py_ssize_t offset, pos=0; int whence=0; - if (!PyArg_ParseTuple(args, "l|i", &offset, &whence)) + if (!PyArg_ParseTuple(args, "n|i", &offset, &whence)) return NULL; EXC_IF_LOBJ_CLOSED(self); @@ -197,7 +197,7 @@ psyco_lobj_seek(lobjectObject *self, PyObject *args) if ((pos = lobject_seek(self, offset, whence)) < 0) return NULL; - return PyLong_FromLong(pos); + return PyLong_FromSsize_t(pos); } /* tell method - tell current position in the lobject */ @@ -208,7 +208,7 @@ psyco_lobj_seek(lobjectObject *self, PyObject *args) static PyObject * psyco_lobj_tell(lobjectObject *self, PyObject *args) { - long pos; + Py_ssize_t pos; EXC_IF_LOBJ_CLOSED(self); EXC_IF_LOBJ_LEVEL0(self); @@ -217,7 +217,7 @@ psyco_lobj_tell(lobjectObject *self, PyObject *args) if ((pos = lobject_tell(self)) < 0) return NULL; - return PyLong_FromLong(pos); + return PyLong_FromSsize_t(pos); } /* unlink method - unlink (destroy) the lobject */ @@ -274,10 +274,12 @@ psyco_lobj_get_closed(lobjectObject *self, void *closure) static PyObject * psyco_lobj_truncate(lobjectObject *self, PyObject *args) { - long len = 0; + Py_ssize_t len = 0; - if (!PyArg_ParseTuple(args, "|l", &len)) + Dprintf("psyco_lobj_truncate: Enter lobject object at %p", self); + if (!PyArg_ParseTuple(args, "|n", &len)) return NULL; + Dprintf("psyco_lobj_truncate: Parsed Successfully"); EXC_IF_LOBJ_CLOSED(self); EXC_IF_LOBJ_LEVEL0(self); From d0309333b77fa589760d44ad112a580535196a51 Mon Sep 17 00:00:00 2001 From: Jason Erickson Date: Mon, 8 Jun 2015 14:05:05 -0600 Subject: [PATCH 081/151] Removed added Dprintf statements Removed extra Dprintf statements added to trouble large objects --- psycopg/lobject_type.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/psycopg/lobject_type.c b/psycopg/lobject_type.c index 634e76ca..c4009f7a 100644 --- a/psycopg/lobject_type.c +++ b/psycopg/lobject_type.c @@ -276,10 +276,8 @@ psyco_lobj_truncate(lobjectObject *self, PyObject *args) { Py_ssize_t len = 0; - Dprintf("psyco_lobj_truncate: Enter lobject object at %p", self); if (!PyArg_ParseTuple(args, "|n", &len)) return NULL; - Dprintf("psyco_lobj_truncate: Parsed Successfully"); EXC_IF_LOBJ_CLOSED(self); EXC_IF_LOBJ_LEVEL0(self); From c13956dc10e66dc60269674ca414269e8b34bec5 Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Sun, 14 Jun 2015 18:33:22 +0100 Subject: [PATCH 082/151] Fixed compiler warnings about Py_ssize_t printf format --- psycopg/lobject_type.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/psycopg/lobject_type.c b/psycopg/lobject_type.c index c4009f7a..926c109c 100644 --- a/psycopg/lobject_type.c +++ b/psycopg/lobject_type.c @@ -187,8 +187,8 @@ psyco_lobj_seek(lobjectObject *self, PyObject *args) #else if (offset < INT_MIN || offset > INT_MAX) { PyErr_Format(InterfaceError, - "offset out of range (%ld): this psycopg version was not built " - "with lobject 64 API support", + "offset out of range (" FORMAT_CODE_PY_SSIZE_T "): " + "this psycopg version was not built with lobject 64 API support", offset); return NULL; } @@ -286,16 +286,16 @@ psyco_lobj_truncate(lobjectObject *self, PyObject *args) #ifdef HAVE_LO64 if (len > INT_MAX && self->conn->server_version < 90300) { PyErr_Format(NotSupportedError, - "len out of range (%ld): server version %d " - "does not support the lobject 64 API", + "len out of range (" FORMAT_CODE_PY_SSIZE_T "): " + "server version %d does not support the lobject 64 API", len, self->conn->server_version); return NULL; } #else if (len > INT_MAX) { PyErr_Format(InterfaceError, - "len out of range (%ld): this psycopg version was not built " - "with lobject 64 API support", + "len out of range (" FORMAT_CODE_PY_SSIZE_T "): " + "this psycopg version was not built with lobject 64 API support", len); return NULL; } From eb687103b4332da3fe1080d3586a18889b472994 Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Sun, 14 Jun 2015 18:43:58 +0100 Subject: [PATCH 083/151] Skip null array test on Postgres versions not supporting it --- tests/test_types_basic.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/test_types_basic.py b/tests/test_types_basic.py index 199dc1b6..4923d820 100755 --- a/tests/test_types_basic.py +++ b/tests/test_types_basic.py @@ -192,6 +192,7 @@ class TypesBasicTests(ConnectingTestCase): self.assertRaises(psycopg2.DataError, psycopg2.extensions.STRINGARRAY, b(s), curs) + @testutils.skip_before_postgres(8, 2) def testArrayOfNulls(self): curs = self.conn.cursor() curs.execute(""" From 244f233e1cb5b8890d5de04b22a4078efdfc5dbd Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Mon, 15 Jun 2015 03:43:11 +0100 Subject: [PATCH 084/151] Fixed manifest trying to include Makefiles from build env --- MANIFEST.in | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/MANIFEST.in b/MANIFEST.in index 00e4fc32..66fc2656 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -2,8 +2,8 @@ recursive-include psycopg *.c *.h *.manifest recursive-include lib *.py recursive-include tests *.py recursive-include examples *.py somehackers.jpg whereareyou.jpg -recursive-include doc README SUCCESS COPYING.LESSER pep-0249.txt -recursive-include doc Makefile requirements.txt +include doc/README.rst doc/SUCCESS doc/COPYING.LESSER doc/pep-0249.txt +include doc/Makefile doc/requirements.txt recursive-include doc/src *.rst *.py *.css Makefile recursive-include scripts *.py *.sh include scripts/maketypes.sh scripts/buildtypes.py From 8611d91b356dff668089394ba6c8b81bc27261e6 Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Mon, 15 Jun 2015 10:31:14 +0100 Subject: [PATCH 085/151] Fixed build on Python 2.5 --- psycopg/lobject_type.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/psycopg/lobject_type.c b/psycopg/lobject_type.c index 926c109c..d15eb20e 100644 --- a/psycopg/lobject_type.c +++ b/psycopg/lobject_type.c @@ -197,7 +197,7 @@ psyco_lobj_seek(lobjectObject *self, PyObject *args) if ((pos = lobject_seek(self, offset, whence)) < 0) return NULL; - return PyLong_FromSsize_t(pos); + return PyInt_FromSsize_t(pos); } /* tell method - tell current position in the lobject */ @@ -217,7 +217,7 @@ psyco_lobj_tell(lobjectObject *self, PyObject *args) if ((pos = lobject_tell(self)) < 0) return NULL; - return PyLong_FromSsize_t(pos); + return PyInt_FromSsize_t(pos); } /* unlink method - unlink (destroy) the lobject */ From 22fe6e7aad5b55923a8fdbf154a5fe740491257b Mon Sep 17 00:00:00 2001 From: Jason Erickson Date: Fri, 6 Nov 2015 15:01:32 -0700 Subject: [PATCH 086/151] Modify setup.py to support setuptools/wheel To support creation of whl files for PyPI, setuptools need to be imported instead of distutils. Created try/except case to fall back to integrated distutils if setuptools is not installed. --- setup.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 2de8c5ef..570c5def 100644 --- a/setup.py +++ b/setup.py @@ -57,7 +57,10 @@ import os import sys import re import subprocess -from distutils.core import setup, Extension +try: + from setuptools import setup, Extension +except ImportError: + from distutils.core import setup, Extension from distutils.command.build_ext import build_ext from distutils.sysconfig import get_python_inc from distutils.ccompiler import get_default_compiler From 2d91864977be1fd590305e895402ebd3b655196e Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Mon, 7 Mar 2016 10:38:40 +0000 Subject: [PATCH 087/151] setuptools in the news --- NEWS | 1 + 1 file changed, 1 insertion(+) diff --git a/NEWS b/NEWS index 9467fea7..8f085599 100644 --- a/NEWS +++ b/NEWS @@ -30,6 +30,7 @@ What's new in psycopg 2.6.2 - Raise `!NotSupportedError` on unhandled server response status (:ticket:`#352`). - Fixed `!PersistentConnectionPool` on Python 3 (:ticket:`#348`). +- Added support for setuptools/wheel (:ticket:`#370`). - Fixed `!errorcodes.lookup` initialization thread-safety (:ticket:`#382`). From 006693421d6bc9ef37be506798034433b538fe6e Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Tue, 8 Mar 2016 04:34:12 +0000 Subject: [PATCH 088/151] Fixed 'make sdist' to work with setuptools --- MANIFEST.in | 2 +- Makefile | 9 ++------- 2 files changed, 3 insertions(+), 8 deletions(-) diff --git a/MANIFEST.in b/MANIFEST.in index 66fc2656..0d34fd3d 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -8,4 +8,4 @@ recursive-include doc/src *.rst *.py *.css Makefile recursive-include scripts *.py *.sh include scripts/maketypes.sh scripts/buildtypes.py include AUTHORS README.rst INSTALL LICENSE NEWS -include PKG-INFO MANIFEST.in MANIFEST setup.py setup.cfg Makefile +include MANIFEST.in setup.py setup.cfg Makefile diff --git a/Makefile b/Makefile index 232f0d0b..a8f491e4 100644 --- a/Makefile +++ b/Makefile @@ -92,14 +92,9 @@ $(PACKAGE)/tests/%.py: tests/%.py $(PYTHON) setup.py build_py $(BUILD_OPT) touch $@ -$(SDIST): MANIFEST $(SOURCE) +$(SDIST): $(SOURCE) $(PYTHON) setup.py sdist $(SDIST_OPT) -MANIFEST: MANIFEST.in $(SOURCE) - # Run twice as MANIFEST.in includes MANIFEST - $(PYTHON) setup.py sdist --manifest-only - $(PYTHON) setup.py sdist --manifest-only - # docs depend on the build as it partly use introspection. doc/html/genindex.html: $(PLATLIB) $(PURELIB) $(SOURCE_DOC) $(MAKE) -C doc html @@ -111,5 +106,5 @@ doc/docs.zip: doc/html/genindex.html (cd doc/html && zip -r ../docs.zip *) clean: - rm -rf build MANIFEST + rm -rf build $(MAKE) -C doc clean From 654eeec24cc1ad63f84fcbf22a84994b9796a82e Mon Sep 17 00:00:00 2001 From: Christian Ullrich Date: Wed, 6 Jan 2016 15:04:33 +0100 Subject: [PATCH 089/151] Work around late initialization in distutils._msvccompiler. --- setup.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/setup.py b/setup.py index 570c5def..45e76bf0 100644 --- a/setup.py +++ b/setup.py @@ -304,6 +304,10 @@ class psycopg_build_ext(build_ext): except AttributeError: ext_path = os.path.join(self.build_lib, 'psycopg2', '_psycopg.pyd') + # Make sure spawn() will work if compile() was never + # called. https://github.com/psycopg/psycopg2/issues/380 + if not self.compiler.initialized: + self.compiler.initialize() self.compiler.spawn( ['mt.exe', '-nologo', '-manifest', os.path.join('psycopg', manifest), From 48260c64063ba6d1d6045d57822c22d3378a832e Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Tue, 8 Mar 2016 00:25:19 +0000 Subject: [PATCH 090/151] Py 3.5 MSVC 2015 build fixed noted in news Close issue #380. --- NEWS | 1 + 1 file changed, 1 insertion(+) diff --git a/NEWS b/NEWS index 8f085599..8a1b477b 100644 --- a/NEWS +++ b/NEWS @@ -31,6 +31,7 @@ What's new in psycopg 2.6.2 (:ticket:`#352`). - Fixed `!PersistentConnectionPool` on Python 3 (:ticket:`#348`). - Added support for setuptools/wheel (:ticket:`#370`). +- Fix build on Windows with Python 3.5, VS 2015 (:ticket:`#380`). - Fixed `!errorcodes.lookup` initialization thread-safety (:ticket:`#382`). From 65ec7e8bcb5f082bdbf6eddadd7ef6e601098fbd Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Tue, 8 Mar 2016 05:12:06 +0000 Subject: [PATCH 091/151] Fixed read() exception propagation in copy_from Close issue #412. --- NEWS | 1 + psycopg/pqpath.c | 6 +++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/NEWS b/NEWS index 8a1b477b..52307ed2 100644 --- a/NEWS +++ b/NEWS @@ -33,6 +33,7 @@ What's new in psycopg 2.6.2 - Added support for setuptools/wheel (:ticket:`#370`). - Fix build on Windows with Python 3.5, VS 2015 (:ticket:`#380`). - Fixed `!errorcodes.lookup` initialization thread-safety (:ticket:`#382`). +- Fixed `!read()` exception propagation in copy_from (:ticket:`#412`). What's new in psycopg 2.6.1 diff --git a/psycopg/pqpath.c b/psycopg/pqpath.c index b643512d..99dd40be 100644 --- a/psycopg/pqpath.c +++ b/psycopg/pqpath.c @@ -1393,7 +1393,11 @@ _pq_copy_in_v3(cursorObject *curs) Py_DECREF(str); } } - PyErr_Restore(t, ex, tb); + /* Clear the Py exception: it will be re-raised from the libpq */ + Py_XDECREF(t); + Py_XDECREF(ex); + Py_XDECREF(tb); + PyErr_Clear(); } res = PQputCopyEnd(curs->conn->pgconn, buf); } From 4fb236e68822f69f89c5ea29a7fcdddcaa819e4c Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Tue, 8 Mar 2016 05:13:57 +0000 Subject: [PATCH 092/151] Start advertising Py 3.5 support --- doc/src/install.rst | 2 +- setup.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/src/install.rst b/doc/src/install.rst index ec1eeea8..0c9bbdc0 100644 --- a/doc/src/install.rst +++ b/doc/src/install.rst @@ -18,7 +18,7 @@ The current `!psycopg2` implementation supports: NOTE: keep consistent with setup.py and the /features/ page. - Python 2 versions from 2.5 to 2.7 -- Python 3 versions from 3.1 to 3.4 +- Python 3 versions from 3.1 to 3.5 - PostgreSQL versions from 7.4 to 9.4 .. _PostgreSQL: http://www.postgresql.org/ diff --git a/setup.py b/setup.py index 45e76bf0..d8908fc0 100644 --- a/setup.py +++ b/setup.py @@ -41,6 +41,7 @@ Programming Language :: Python :: 3.1 Programming Language :: Python :: 3.2 Programming Language :: Python :: 3.3 Programming Language :: Python :: 3.4 +Programming Language :: Python :: 3.5 Programming Language :: C Programming Language :: SQL Topic :: Database From 5ce00f8e5baa79548122b438c8f4d9f92eec682a Mon Sep 17 00:00:00 2001 From: Gabriel Kihlman Date: Mon, 18 Apr 2016 17:28:50 +0200 Subject: [PATCH 093/151] Avoid a possible null deref, tz might be NULL. Found by clang static analyzer. --- psycopg/adapter_datetime.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/psycopg/adapter_datetime.c b/psycopg/adapter_datetime.c index 0571837d..9d04df40 100644 --- a/psycopg/adapter_datetime.c +++ b/psycopg/adapter_datetime.c @@ -451,7 +451,7 @@ psyco_TimestampFromTicks(PyObject *self, PyObject *args) tz); exit: - Py_DECREF(tz); + Py_XDECREF(tz); Py_XDECREF(m); return res; } From d5443c65fde6cae87a1dcd901f31b6cdca7a1811 Mon Sep 17 00:00:00 2001 From: Oleksandr Shulgin Date: Thu, 21 Apr 2016 15:32:05 +0200 Subject: [PATCH 094/151] Fix TODOs in ReplicationMessage inline docs --- psycopg/replication_message_type.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/psycopg/replication_message_type.c b/psycopg/replication_message_type.c index f607d2ba..358d1497 100644 --- a/psycopg/replication_message_type.c +++ b/psycopg/replication_message_type.c @@ -125,15 +125,15 @@ psyco_replmsg_get_send_time(replicationMessageObject *self) static struct PyMemberDef replicationMessageObject_members[] = { {"cursor", T_OBJECT, OFFSETOF(cursor), READONLY, - "TODO"}, + "Related ReplcationCursor object."}, {"payload", T_OBJECT, OFFSETOF(payload), READONLY, - "TODO"}, + "The actual message data."}, {"data_size", T_INT, OFFSETOF(data_size), READONLY, - "TODO"}, + "Raw size of the message data in bytes."}, {"data_start", T_ULONGLONG, OFFSETOF(data_start), READONLY, - "TODO"}, + "LSN position of the start of this message."}, {"wal_end", T_ULONGLONG, OFFSETOF(wal_end), READONLY, - "TODO"}, + "LSN position of the current end of WAL on the server."}, {NULL} }; From 3ed2c54790cd377759da5bc5a8b596fe2195685a Mon Sep 17 00:00:00 2001 From: Greg Ward Date: Tue, 28 Jun 2016 18:14:57 -0400 Subject: [PATCH 095/151] Fix scattered grammar/spelling errors in comments, debug output, etc. --- psycopg/cursor_type.c | 2 +- psycopg/pqpath.c | 8 ++++---- tests/testutils.py | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/psycopg/cursor_type.c b/psycopg/cursor_type.c index cd8d5ca3..fe79bbf9 100644 --- a/psycopg/cursor_type.c +++ b/psycopg/cursor_type.c @@ -335,7 +335,7 @@ _psyco_curs_merge_query_args(cursorObject *self, PyErr_Fetch(&err, &arg, &trace); if (err && PyErr_GivenExceptionMatches(err, PyExc_TypeError)) { - Dprintf("psyco_curs_execute: TypeError exception catched"); + Dprintf("psyco_curs_execute: TypeError exception caught"); PyErr_NormalizeException(&err, &arg, &trace); if (PyObject_HasAttrString(arg, "args")) { diff --git a/psycopg/pqpath.c b/psycopg/pqpath.c index 99dd40be..eb862d3d 100644 --- a/psycopg/pqpath.c +++ b/psycopg/pqpath.c @@ -161,11 +161,11 @@ pq_raise(connectionObject *conn, cursorObject *curs, PGresult **pgres) if (conn == NULL) { PyErr_SetString(DatabaseError, - "psycopg went psycotic and raised a null error"); + "psycopg went psychotic and raised a null error"); return; } - /* if the connection has somehow beed broken, we mark the connection + /* if the connection has somehow been broken, we mark the connection object as closed but requiring cleanup */ if (conn->pgconn != NULL && PQstatus(conn->pgconn) == CONNECTION_BAD) conn->closed = 2; @@ -907,7 +907,7 @@ pq_execute(cursorObject *curs, const char *query, int async, int no_result, int PyErr_SetString(OperationalError, PQerrorMessage(curs->conn->pgconn)); return -1; } - Dprintf("curs_execute: pg connection at %p OK", curs->conn->pgconn); + Dprintf("pq_execute: pg connection at %p OK", curs->conn->pgconn); Py_BEGIN_ALLOW_THREADS; pthread_mutex_lock(&(curs->conn->lock)); @@ -932,7 +932,7 @@ pq_execute(cursorObject *curs, const char *query, int async, int no_result, int Py_UNBLOCK_THREADS; } - /* dont let pgres = NULL go to pq_fetch() */ + /* don't let pgres = NULL go to pq_fetch() */ if (curs->pgres == NULL) { pthread_mutex_unlock(&(curs->conn->lock)); Py_BLOCK_THREADS; diff --git a/tests/testutils.py b/tests/testutils.py index 76671d99..fc2b59d7 100644 --- a/tests/testutils.py +++ b/tests/testutils.py @@ -100,7 +100,7 @@ class ConnectingTestCase(unittest.TestCase): self._conns except AttributeError, e: raise AttributeError( - "%s (did you remember calling ConnectingTestCase.setUp()?)" + "%s (did you forget to call ConnectingTestCase.setUp()?)" % e) if 'dsn' in kwargs: From 52753b23e89b6bb83e724dd6c59fd342427dd54a Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Fri, 1 Jul 2016 01:18:27 +0100 Subject: [PATCH 096/151] Document that the libpq must be available at runtime Fix issue #408. --- doc/src/install.rst | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/doc/src/install.rst b/doc/src/install.rst index 4d82c620..a3f7ae4a 100644 --- a/doc/src/install.rst +++ b/doc/src/install.rst @@ -51,6 +51,16 @@ extension packages, *above all if you are a Windows or a Mac OS user*, please use a pre-compiled package and go straight to the :ref:`module usage ` avoid bothering with the gory details. +.. note:: + + Regardless of the way `!psycopg2` is installed, at runtime it will need to + use the libpq_ library. `!psycopg2` relies on the host OS to find the + library file (usually ``libpq.so`` or ``libpq.dll``): if the library is + installed in a standard location there is usually no problem; if the + library is in a non-standard location you will have to tell somehow + psycopg how to find it, which is OS-dependent (for instance setting a + suitable :envvar:`LD_LIBRARY_PATH` on Linux). + .. _install-from-package: From b7330283bc13ee6b2072f333acbf1b84aff0206a Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Fri, 1 Jul 2016 01:39:10 +0100 Subject: [PATCH 097/151] Wordsmithing on COPY commands Address somehow issue #397. --- doc/src/cursor.rst | 5 ++++- doc/src/usage.rst | 10 +++++++++- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/doc/src/cursor.rst b/doc/src/cursor.rst index 73bb5375..45e62781 100644 --- a/doc/src/cursor.rst +++ b/doc/src/cursor.rst @@ -494,6 +494,9 @@ The ``cursor`` class .. rubric:: COPY-related methods + Efficiently copy data from file-like objects to the database and back. See + :ref:`copy` for an overview. + .. extension:: The :sql:`COPY` command is a PostgreSQL extension to the SQL standard. @@ -502,7 +505,7 @@ The ``cursor`` class .. method:: copy_from(file, table, sep='\\t', null='\\\\N', size=8192, columns=None) Read data *from* the file-like object *file* appending them to - the table named *table*. See :ref:`copy` for an overview. + the table named *table*. :param file: file-like object to read data from. It must have both `!read()` and `!readline()` methods. diff --git a/doc/src/usage.rst b/doc/src/usage.rst index 9dd31df2..3b42aeb9 100644 --- a/doc/src/usage.rst +++ b/doc/src/usage.rst @@ -864,11 +864,19 @@ Using COPY TO and COPY FROM Psycopg `cursor` objects provide an interface to the efficient PostgreSQL |COPY|__ command to move data from files to tables and back. + +Currently no adaptation is provided between Python and PostgreSQL types on +|COPY|: the file can be any Python file-like object but its format must be in +the format accepted by `PostgreSQL COPY command`__ (data fromat, escaped +characters, etc). + +.. __: COPY_ + The methods exposed are: `~cursor.copy_from()` Reads data *from* a file-like object appending them to a database table - (:sql:`COPY table FROM file` syntax). The source file must have both + (:sql:`COPY table FROM file` syntax). The source file must provide both `!read()` and `!readline()` method. `~cursor.copy_to()` From 7aedc61d410a551f74cf9b721b794c1540377887 Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Fri, 1 Jul 2016 02:09:56 +0100 Subject: [PATCH 098/151] Fixed segfault on repr() for uninitialized connections Close #361. --- NEWS | 5 +++-- psycopg/connection_type.c | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/NEWS b/NEWS index 52307ed2..9056b0f0 100644 --- a/NEWS +++ b/NEWS @@ -24,12 +24,13 @@ What's new in psycopg 2.6.2 ^^^^^^^^^^^^^^^^^^^^^^^^^^^ - Report the server response status on errors (such as :ticket:`#281`). +- Raise `!NotSupportedError` on unhandled server response status + (:ticket:`#352`). - The `~psycopg2.extras.wait_select` callback allows interrupting a long-running query in an interactive shell using :kbd:`Ctrl-C` (:ticket:`#333`). -- Raise `!NotSupportedError` on unhandled server response status - (:ticket:`#352`). - Fixed `!PersistentConnectionPool` on Python 3 (:ticket:`#348`). +- Fixed segfault on `repr()` of an unitialized connection (:ticket:`#361`). - Added support for setuptools/wheel (:ticket:`#370`). - Fix build on Windows with Python 3.5, VS 2015 (:ticket:`#380`). - Fixed `!errorcodes.lookup` initialization thread-safety (:ticket:`#382`). diff --git a/psycopg/connection_type.c b/psycopg/connection_type.c index 2c1dddf2..e1966b37 100644 --- a/psycopg/connection_type.c +++ b/psycopg/connection_type.c @@ -1171,7 +1171,7 @@ connection_repr(connectionObject *self) { return PyString_FromFormat( "", - self, self->dsn, self->closed); + self, (self->dsn ? self->dsn : ""), self->closed); } static int From c29b5cd46a24fd81cff8b3affd9c78d18d53aa69 Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Fri, 1 Jul 2016 02:23:59 +0100 Subject: [PATCH 099/151] Fixed build on win32 Fix #422. --- setup.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index d8908fc0..65d5fa69 100644 --- a/setup.py +++ b/setup.py @@ -351,7 +351,8 @@ class psycopg_build_ext(build_ext): self.libraries.append("advapi32") if self.compiler_is_msvc(): # MSVC requires an explicit "libpq" - self.libraries.remove("pq") + if "pq" in self.libraries: + self.libraries.remove("pq") self.libraries.append("secur32") self.libraries.append("libpq") self.libraries.append("shfolder") From bada1f1f8e390c7d0ef8eea88cdb16e6f78c1eec Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Fri, 1 Jul 2016 11:17:55 +0100 Subject: [PATCH 100/151] Work in progress on writable encoding Would help using adapt(unicode) to quote strings without a connection, see ticket #331. Currently in heisenbug state: if test_connection_wins_anyway and test_encoding_default run (in this order), the latter fail because the returned value is "'\xe8 '", with an extra space. Skipping the first test, the second succeed. The bad value is returned by the libpq: ql = PQescapeString(to+eq+1, from, len); just returns len = 2 and an extra space in the string... meh. --- psycopg/adapter_qstring.c | 73 +++++++++++++++++++++++++++++---------- psycopg/adapter_qstring.h | 3 ++ tests/test_types_basic.py | 52 +++++++++++++++++++++++----- 3 files changed, 101 insertions(+), 27 deletions(-) diff --git a/psycopg/adapter_qstring.c b/psycopg/adapter_qstring.c index 2e3ab0ae..1e256cf0 100644 --- a/psycopg/adapter_qstring.c +++ b/psycopg/adapter_qstring.c @@ -36,28 +36,43 @@ static const char *default_encoding = "latin1"; /* qstring_quote - do the quote process on plain and unicode strings */ +const char * +_qstring_get_encoding(qstringObject *self) +{ + /* if the wrapped object is an unicode object we can encode it to match + conn->encoding but if the encoding is not specified we don't know what + to do and we raise an exception */ + if (self->conn) { + return self->conn->codec; + } + else { + return self->encoding ? self->encoding : default_encoding; + } +} + static PyObject * qstring_quote(qstringObject *self) { PyObject *str = NULL; char *s, *buffer = NULL; Py_ssize_t len, qlen; - const char *encoding = default_encoding; + const char *encoding; PyObject *rv = NULL; - /* if the wrapped object is an unicode object we can encode it to match - conn->encoding but if the encoding is not specified we don't know what - to do and we raise an exception */ - if (self->conn) { - encoding = self->conn->codec; - } - + encoding = _qstring_get_encoding(self); Dprintf("qstring_quote: encoding to %s", encoding); - if (PyUnicode_Check(self->wrapped) && encoding) { - str = PyUnicode_AsEncodedString(self->wrapped, encoding, NULL); - Dprintf("qstring_quote: got encoded object at %p", str); - if (str == NULL) goto exit; + if (PyUnicode_Check(self->wrapped)) { + if (encoding) { + str = PyUnicode_AsEncodedString(self->wrapped, encoding, NULL); + Dprintf("qstring_quote: got encoded object at %p", str); + if (str == NULL) goto exit; + } + else { + PyErr_SetString(PyExc_TypeError, + "missing encoding to encode unicode object"); + goto exit; + } } #if PY_MAJOR_VERSION < 3 @@ -150,15 +165,34 @@ qstring_conform(qstringObject *self, PyObject *args) static PyObject * qstring_get_encoding(qstringObject *self) { - const char *encoding = default_encoding; - - if (self->conn) { - encoding = self->conn->codec; - } - + const char *encoding; + encoding = _qstring_get_encoding(self); return Text_FromUTF8(encoding); } +static int +qstring_set_encoding(qstringObject *self, PyObject *pyenc) +{ + int rv = -1; + const char *tmp; + char *cenc; + + /* get a C copy of the encoding (which may come from unicode) */ + Py_INCREF(pyenc); + if (!(pyenc = psycopg_ensure_bytes(pyenc))) { goto exit; } + if (!(tmp = Bytes_AsString(pyenc))) { goto exit; } + if (0 > psycopg_strdup(&cenc, tmp, 0)) { goto exit; } + + Dprintf("qstring_set_encoding: encoding set to %s", cenc); + PyMem_Free((void *)self->encoding); + self->encoding = cenc; + rv = 0; + +exit: + Py_XDECREF(pyenc); + return rv; +} + /** the QuotedString object **/ /* object member list */ @@ -183,7 +217,7 @@ static PyMethodDef qstringObject_methods[] = { static PyGetSetDef qstringObject_getsets[] = { { "encoding", (getter)qstring_get_encoding, - (setter)NULL, + (setter)qstring_set_encoding, "current encoding of the adapter" }, {NULL} }; @@ -216,6 +250,7 @@ qstring_dealloc(PyObject* obj) Py_CLEAR(self->wrapped); Py_CLEAR(self->buffer); Py_CLEAR(self->conn); + PyMem_Free((void *)self->encoding); Dprintf("qstring_dealloc: deleted qstring object at %p, refcnt = " FORMAT_CODE_PY_SSIZE_T, diff --git a/psycopg/adapter_qstring.h b/psycopg/adapter_qstring.h index b7b086f3..8abdc5f2 100644 --- a/psycopg/adapter_qstring.h +++ b/psycopg/adapter_qstring.h @@ -39,6 +39,9 @@ typedef struct { PyObject *buffer; connectionObject *conn; + + const char *encoding; + } qstringObject; #ifdef __cplusplus diff --git a/tests/test_types_basic.py b/tests/test_types_basic.py index 4923d820..baa80c01 100755 --- a/tests/test_types_basic.py +++ b/tests/test_types_basic.py @@ -95,11 +95,11 @@ class TypesBasicTests(ConnectingTestCase): except ValueError: return self.skipTest("inf not available on this platform") s = self.execute("SELECT %s AS foo", (float("inf"),)) - self.failUnless(str(s) == "inf", "wrong float quoting: " + str(s)) + self.failUnless(str(s) == "inf", "wrong float quoting: " + str(s)) self.failUnless(type(s) == float, "wrong float conversion: " + repr(s)) s = self.execute("SELECT %s AS foo", (float("-inf"),)) - self.failUnless(str(s) == "-inf", "wrong float quoting: " + str(s)) + self.failUnless(str(s) == "-inf", "wrong float quoting: " + str(s)) def testBinary(self): if sys.version_info[0] < 3: @@ -344,6 +344,43 @@ class TypesBasicTests(ConnectingTestCase): self.assertEqual(a, [2,4,'nada']) +class TestStringAdapter(ConnectingTestCase): + def test_encoding_default(self): + from psycopg2.extensions import adapt + a = adapt("hello") + self.assertEqual(a.encoding, 'latin1') + self.assertEqual(a.getquoted(), "'hello'") + + egrave = u'\xe8' + self.assertEqual(adapt(egrave).getquoted(), "'\xe8'") + + def test_encoding_error(self): + from psycopg2.extensions import adapt + snowman = u"\u2603" + a = adapt(snowman) + self.assertRaises(UnicodeEncodeError, a.getquoted) + + def test_set_encoding(self): + from psycopg2.extensions import adapt + snowman = u"\u2603" + a = adapt(snowman) + a.encoding = 'utf8' + self.assertEqual(a.encoding, 'utf8') + self.assertEqual(a.getquoted(), "'\xe2\x98\x83'") + + def test_connection_wins_anyway(self): + from psycopg2.extensions import adapt + snowman = u"\u2603" + a = adapt(snowman) + a.encoding = 'latin9' + + self.conn.set_client_encoding('utf8') + a.prepare(self.conn) + + self.assertEqual(a.encoding, 'utf_8') + self.assertEqual(a.getquoted(), "'\xe2\x98\x83'") + + class AdaptSubclassTest(unittest.TestCase): def test_adapt_subtype(self): from psycopg2.extensions import adapt @@ -364,8 +401,8 @@ class AdaptSubclassTest(unittest.TestCase): try: self.assertEqual(b('b'), adapt(C()).getquoted()) finally: - del psycopg2.extensions.adapters[A, psycopg2.extensions.ISQLQuote] - del psycopg2.extensions.adapters[B, psycopg2.extensions.ISQLQuote] + del psycopg2.extensions.adapters[A, psycopg2.extensions.ISQLQuote] + del psycopg2.extensions.adapters[B, psycopg2.extensions.ISQLQuote] @testutils.skip_from_python(3) def test_no_mro_no_joy(self): @@ -378,8 +415,7 @@ class AdaptSubclassTest(unittest.TestCase): try: self.assertRaises(psycopg2.ProgrammingError, adapt, B()) finally: - del psycopg2.extensions.adapters[A, psycopg2.extensions.ISQLQuote] - + del psycopg2.extensions.adapters[A, psycopg2.extensions.ISQLQuote] @testutils.skip_before_python(3) def test_adapt_subtype_3(self): @@ -392,7 +428,7 @@ class AdaptSubclassTest(unittest.TestCase): try: self.assertEqual(b("a"), adapt(B()).getquoted()) finally: - del psycopg2.extensions.adapters[A, psycopg2.extensions.ISQLQuote] + del psycopg2.extensions.adapters[A, psycopg2.extensions.ISQLQuote] class ByteaParserTest(unittest.TestCase): @@ -480,6 +516,7 @@ class ByteaParserTest(unittest.TestCase): self.assertEqual(rv, tgt) + def skip_if_cant_cast(f): @wraps(f) def skip_if_cant_cast_(self, *args, **kwargs): @@ -499,4 +536,3 @@ def test_suite(): if __name__ == "__main__": unittest.main() - From 2e8e61b8d41144cbb65dfce786335ff7c625b4f7 Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Fri, 1 Jul 2016 16:57:25 +0100 Subject: [PATCH 101/151] Test moved to the right module, cleanup, but same problem --- psycopg/adapter_qstring.c | 3 +-- tests/test_quote.py | 47 ++++++++++++++++++++++++++++++++++----- tests/test_types_basic.py | 37 ------------------------------ tests/testconfig.py | 2 -- 4 files changed, 43 insertions(+), 46 deletions(-) diff --git a/psycopg/adapter_qstring.c b/psycopg/adapter_qstring.c index 1e256cf0..110093e5 100644 --- a/psycopg/adapter_qstring.c +++ b/psycopg/adapter_qstring.c @@ -87,8 +87,7 @@ qstring_quote(qstringObject *self) /* if the wrapped object is not a string, this is an error */ else { - PyErr_SetString(PyExc_TypeError, - "can't quote non-string object (or missing encoding)"); + PyErr_SetString(PyExc_TypeError, "can't quote non-string object"); goto exit; } diff --git a/tests/test_quote.py b/tests/test_quote.py index 6e945624..9d00c539 100755 --- a/tests/test_quote.py +++ b/tests/test_quote.py @@ -29,6 +29,7 @@ import psycopg2 import psycopg2.extensions from psycopg2.extensions import b + class QuotingTestCase(ConnectingTestCase): r"""Checks the correct quoting of strings and binary objects. @@ -51,7 +52,7 @@ class QuotingTestCase(ConnectingTestCase): data = """some data with \t chars to escape into, 'quotes' and \\ a backslash too. """ - data += "".join(map(chr, range(1,127))) + data += "".join(map(chr, range(1, 127))) curs = self.conn.cursor() curs.execute("SELECT %s;", (data,)) @@ -90,13 +91,13 @@ class QuotingTestCase(ConnectingTestCase): if server_encoding != "UTF8": return self.skipTest( "Unicode test skipped since server encoding is %s" - % server_encoding) + % server_encoding) data = u"""some data with \t chars to escape into, 'quotes', \u20ac euro sign and \\ a backslash too. """ - data += u"".join(map(unichr, [ u for u in range(1,65536) - if not 0xD800 <= u <= 0xDFFF ])) # surrogate area + data += u"".join(map(unichr, [u for u in range(1, 65536) + if not 0xD800 <= u <= 0xDFFF])) # surrogate area self.conn.set_client_encoding('UNICODE') psycopg2.extensions.register_type(psycopg2.extensions.UNICODE, self.conn) @@ -183,9 +184,45 @@ class TestQuotedIdentifier(ConnectingTestCase): self.assertEqual(quote_ident(snowman, self.conn), quoted) +class TestStringAdapter(ConnectingTestCase): + def test_encoding_default(self): + from psycopg2.extensions import adapt + a = adapt("hello") + self.assertEqual(a.encoding, 'latin1') + self.assertEqual(a.getquoted(), "'hello'") + + egrave = u'\xe8' + self.assertEqual(adapt(egrave).getquoted(), "'\xe8'") + + def test_encoding_error(self): + from psycopg2.extensions import adapt + snowman = u"\u2603" + a = adapt(snowman) + self.assertRaises(UnicodeEncodeError, a.getquoted) + + def test_set_encoding(self): + from psycopg2.extensions import adapt + snowman = u"\u2603" + a = adapt(snowman) + a.encoding = 'utf8' + self.assertEqual(a.encoding, 'utf8') + self.assertEqual(a.getquoted(), "'\xe2\x98\x83'") + + def test_connection_wins_anyway(self): + from psycopg2.extensions import adapt + snowman = u"\u2603" + a = adapt(snowman) + a.encoding = 'latin9' + + self.conn.set_client_encoding('utf8') + a.prepare(self.conn) + + self.assertEqual(a.encoding, 'utf_8') + self.assertEqual(a.getquoted(), "'\xe2\x98\x83'") + + def test_suite(): return unittest.TestLoader().loadTestsFromName(__name__) if __name__ == "__main__": unittest.main() - diff --git a/tests/test_types_basic.py b/tests/test_types_basic.py index baa80c01..248712b0 100755 --- a/tests/test_types_basic.py +++ b/tests/test_types_basic.py @@ -344,43 +344,6 @@ class TypesBasicTests(ConnectingTestCase): self.assertEqual(a, [2,4,'nada']) -class TestStringAdapter(ConnectingTestCase): - def test_encoding_default(self): - from psycopg2.extensions import adapt - a = adapt("hello") - self.assertEqual(a.encoding, 'latin1') - self.assertEqual(a.getquoted(), "'hello'") - - egrave = u'\xe8' - self.assertEqual(adapt(egrave).getquoted(), "'\xe8'") - - def test_encoding_error(self): - from psycopg2.extensions import adapt - snowman = u"\u2603" - a = adapt(snowman) - self.assertRaises(UnicodeEncodeError, a.getquoted) - - def test_set_encoding(self): - from psycopg2.extensions import adapt - snowman = u"\u2603" - a = adapt(snowman) - a.encoding = 'utf8' - self.assertEqual(a.encoding, 'utf8') - self.assertEqual(a.getquoted(), "'\xe2\x98\x83'") - - def test_connection_wins_anyway(self): - from psycopg2.extensions import adapt - snowman = u"\u2603" - a = adapt(snowman) - a.encoding = 'latin9' - - self.conn.set_client_encoding('utf8') - a.prepare(self.conn) - - self.assertEqual(a.encoding, 'utf_8') - self.assertEqual(a.getquoted(), "'\xe2\x98\x83'") - - class AdaptSubclassTest(unittest.TestCase): def test_adapt_subtype(self): from psycopg2.extensions import adapt diff --git a/tests/testconfig.py b/tests/testconfig.py index 0f995fbf..72c533ec 100644 --- a/tests/testconfig.py +++ b/tests/testconfig.py @@ -34,5 +34,3 @@ if dbuser is not None: dsn += ' user=%s' % dbuser if dbpass is not None: dsn += ' password=%s' % dbpass - - From 4a450b63c418bf7e6e62f7b444fd2edd9db246da Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Fri, 1 Jul 2016 17:33:12 +0100 Subject: [PATCH 102/151] Don't hope to encode stuff in an arbitrary encoding libpq's PQescapeString will use the same encoding it has seen before in a connection (static_client_encoding). So I think I'll leave this feature here for people who know what is doing, but won't really document it as a feature: it can't really work in a generic way (unless adding some disgusting hack like creating a fake connection with the encoding we want to call PQescapeStringConn instead of PQescapeString). --- NEWS | 1 + tests/test_quote.py | 12 +++++++++--- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/NEWS b/NEWS index 9056b0f0..4571f673 100644 --- a/NEWS +++ b/NEWS @@ -26,6 +26,7 @@ What's new in psycopg 2.6.2 - Report the server response status on errors (such as :ticket:`#281`). - Raise `!NotSupportedError` on unhandled server response status (:ticket:`#352`). +- Allow overriding string adapter encoding with no connection (:ticket:`#331`). - The `~psycopg2.extras.wait_select` callback allows interrupting a long-running query in an interactive shell using :kbd:`Ctrl-C` (:ticket:`#333`). diff --git a/tests/test_quote.py b/tests/test_quote.py index 9d00c539..0a204c83 100755 --- a/tests/test_quote.py +++ b/tests/test_quote.py @@ -157,7 +157,7 @@ class QuotingTestCase(ConnectingTestCase): class TestQuotedString(ConnectingTestCase): - def test_encoding(self): + def test_encoding_from_conn(self): q = psycopg2.extensions.QuotedString('hi') self.assertEqual(q.encoding, 'latin1') @@ -191,8 +191,11 @@ class TestStringAdapter(ConnectingTestCase): self.assertEqual(a.encoding, 'latin1') self.assertEqual(a.getquoted(), "'hello'") - egrave = u'\xe8' - self.assertEqual(adapt(egrave).getquoted(), "'\xe8'") + # NOTE: we can't really test an encoding different from utf8, because + # when encoding without connection the libpq will use parameters from + # a previous one, so what would happens depends jn the tests run order. + # egrave = u'\xe8' + # self.assertEqual(adapt(egrave).getquoted(), "'\xe8'") def test_encoding_error(self): from psycopg2.extensions import adapt @@ -201,6 +204,9 @@ class TestStringAdapter(ConnectingTestCase): self.assertRaises(UnicodeEncodeError, a.getquoted) def test_set_encoding(self): + # Note: this works-ish mostly in case when the standard db connection + # we test with is utf8, otherwise the encoding chosen by PQescapeString + # may give bad results. from psycopg2.extensions import adapt snowman = u"\u2603" a = adapt(snowman) From 9c156d41bbd9afe202c74cddb5c95d5ebd8aeb67 Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Fri, 1 Jul 2016 17:56:29 +0100 Subject: [PATCH 103/151] Docs wrapping --- doc/src/install.rst | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/doc/src/install.rst b/doc/src/install.rst index a3f7ae4a..3a95adc4 100644 --- a/doc/src/install.rst +++ b/doc/src/install.rst @@ -106,13 +106,15 @@ Install from a package **Microsoft Windows** There are two options to install a precompiled `psycopg2` package under windows: - - **Option 1:** Using `pip`__ (Included in python 2.7.9+ and python 3.4+) and a binary wheel package. Launch windows' command prompt (`cmd.exe`) and execute the following command:: + + **Option 1:** Using `pip`__ (Included in python 2.7.9+ and python 3.4+) + and a binary wheel package. Launch windows' command prompt (`cmd.exe`) + and execute the following command:: pip install psycopg2 - + .. __: https://pip.pypa.io/en/stable/installing/ - + **Option 2:** Jason Erickson maintains a packaged `Windows port of Psycopg`__ with installation executable. Download. Double click. Done. From 70af49c0a2a59c516fe89b30019430b8db551833 Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Fri, 1 Jul 2016 18:50:24 +0100 Subject: [PATCH 104/151] Fixed encoding tests on Py3 --- tests/test_quote.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/test_quote.py b/tests/test_quote.py index 0a204c83..74b366c9 100755 --- a/tests/test_quote.py +++ b/tests/test_quote.py @@ -189,7 +189,7 @@ class TestStringAdapter(ConnectingTestCase): from psycopg2.extensions import adapt a = adapt("hello") self.assertEqual(a.encoding, 'latin1') - self.assertEqual(a.getquoted(), "'hello'") + self.assertEqual(a.getquoted(), b("'hello'")) # NOTE: we can't really test an encoding different from utf8, because # when encoding without connection the libpq will use parameters from @@ -212,7 +212,7 @@ class TestStringAdapter(ConnectingTestCase): a = adapt(snowman) a.encoding = 'utf8' self.assertEqual(a.encoding, 'utf8') - self.assertEqual(a.getquoted(), "'\xe2\x98\x83'") + self.assertEqual(a.getquoted(), b("'\xe2\x98\x83'")) def test_connection_wins_anyway(self): from psycopg2.extensions import adapt @@ -224,7 +224,7 @@ class TestStringAdapter(ConnectingTestCase): a.prepare(self.conn) self.assertEqual(a.encoding, 'utf_8') - self.assertEqual(a.getquoted(), "'\xe2\x98\x83'") + self.assertEqual(a.getquoted(), b("'\xe2\x98\x83'")) def test_suite(): From 5bcaf11f9db43f15d52943916647a0cc0dc6ffca Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Fri, 1 Jul 2016 19:11:04 +0100 Subject: [PATCH 105/151] Allow adapting bytes using QuotedString on Python 3 too Close #365. --- NEWS | 1 + psycopg/adapter_qstring.c | 6 ++---- tests/test_quote.py | 15 ++++++++++++--- 3 files changed, 15 insertions(+), 7 deletions(-) diff --git a/NEWS b/NEWS index 4571f673..28a9ac2f 100644 --- a/NEWS +++ b/NEWS @@ -32,6 +32,7 @@ What's new in psycopg 2.6.2 (:ticket:`#333`). - Fixed `!PersistentConnectionPool` on Python 3 (:ticket:`#348`). - Fixed segfault on `repr()` of an unitialized connection (:ticket:`#361`). +- Allow adapting bytes using QuotedString on Python 3 too (:ticket:`#365`). - Added support for setuptools/wheel (:ticket:`#370`). - Fix build on Windows with Python 3.5, VS 2015 (:ticket:`#380`). - Fixed `!errorcodes.lookup` initialization thread-safety (:ticket:`#382`). diff --git a/psycopg/adapter_qstring.c b/psycopg/adapter_qstring.c index 110093e5..8c5a8f10 100644 --- a/psycopg/adapter_qstring.c +++ b/psycopg/adapter_qstring.c @@ -75,15 +75,13 @@ qstring_quote(qstringObject *self) } } -#if PY_MAJOR_VERSION < 3 - /* if the wrapped object is a simple string, we don't know how to + /* if the wrapped object is a binary string, we don't know how to (re)encode it, so we pass it as-is */ - else if (PyString_Check(self->wrapped)) { + else if (Bytes_Check(self->wrapped)) { str = self->wrapped; /* INCREF to make it ref-wise identical to unicode one */ Py_INCREF(str); } -#endif /* if the wrapped object is not a string, this is an error */ else { diff --git a/tests/test_quote.py b/tests/test_quote.py index 74b366c9..25d1d31c 100755 --- a/tests/test_quote.py +++ b/tests/test_quote.py @@ -23,7 +23,8 @@ # License for more details. import sys -from testutils import unittest, ConnectingTestCase, skip_before_libpq +import testutils +from testutils import unittest, ConnectingTestCase import psycopg2 import psycopg2.extensions @@ -167,13 +168,13 @@ class TestQuotedString(ConnectingTestCase): class TestQuotedIdentifier(ConnectingTestCase): - @skip_before_libpq(9, 0) + @testutils.skip_before_libpq(9, 0) def test_identifier(self): from psycopg2.extensions import quote_ident self.assertEqual(quote_ident('blah-blah', self.conn), '"blah-blah"') self.assertEqual(quote_ident('quote"inside', self.conn), '"quote""inside"') - @skip_before_libpq(9, 0) + @testutils.skip_before_libpq(9, 0) def test_unicode_ident(self): from psycopg2.extensions import quote_ident snowman = u"\u2603" @@ -226,6 +227,14 @@ class TestStringAdapter(ConnectingTestCase): self.assertEqual(a.encoding, 'utf_8') self.assertEqual(a.getquoted(), b("'\xe2\x98\x83'")) + @testutils.skip_before_python(3) + def test_adapt_bytes(self): + snowman = u"\u2603" + self.conn.set_client_encoding('utf8') + a = psycopg2.extensions.QuotedString(snowman.encode('utf8')) + a.prepare(self.conn) + self.assertEqual(a.getquoted(), b("'\xe2\x98\x83'")) + def test_suite(): return unittest.TestLoader().loadTestsFromName(__name__) From 80fd14463be54c13600b27e7fd4a9228a3500712 Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Fri, 1 Jul 2016 19:27:31 +0100 Subject: [PATCH 106/151] Mention closing bug #424 in the news --- NEWS | 1 + 1 file changed, 1 insertion(+) diff --git a/NEWS b/NEWS index 52307ed2..c25e1c38 100644 --- a/NEWS +++ b/NEWS @@ -34,6 +34,7 @@ What's new in psycopg 2.6.2 - Fix build on Windows with Python 3.5, VS 2015 (:ticket:`#380`). - Fixed `!errorcodes.lookup` initialization thread-safety (:ticket:`#382`). - Fixed `!read()` exception propagation in copy_from (:ticket:`#412`). +- Fixed possible NULL TZ decref (:ticket:`#424`). What's new in psycopg 2.6.1 From 00de4052d156c5b17dce4fa1539b48e97bb2131c Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Fri, 1 Jul 2016 20:04:00 +0100 Subject: [PATCH 107/151] Mention get_dsn_parameters() in news, improved docs metadata --- NEWS | 1 + doc/src/connection.rst | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/NEWS b/NEWS index 5200c4dd..98c309fe 100644 --- a/NEWS +++ b/NEWS @@ -15,6 +15,7 @@ New features: customized replacing them with any object exposing an `!append()` method (:ticket:`#326`). - Added `~psycopg2.extensions.quote_ident()` function (:ticket:`#359`). +- Added `~connection.get_dsn_parameters()` connection method (:ticket:`#364`). What's new in psycopg 2.6.2 diff --git a/doc/src/connection.rst b/doc/src/connection.rst index 3d38180a..c99c8bd8 100644 --- a/doc/src/connection.rst +++ b/doc/src/connection.rst @@ -568,6 +568,9 @@ The ``connection`` class .. versionadded:: 2.0.12 + .. index:: + pair: Connection; Parameters + .. method:: get_dsn_parameters() Get the effective dsn parameters for the connection as a dictionary. @@ -585,6 +588,8 @@ The ``connection`` class .. __: http://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-PQCONNINFO + .. versionadded:: 2.7 + .. index:: pair: Transaction; Status From 90ee1ebba5d1da4bd9d8c6e12944308074732f08 Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Fri, 1 Jul 2016 20:08:53 +0100 Subject: [PATCH 108/151] errorcodes map updated to PostgreSQL 9.5. --- NEWS | 1 + lib/errorcodes.py | 4 ++++ scripts/make_errorcodes.py | 2 +- 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/NEWS b/NEWS index 5d0d448c..49ed56a9 100644 --- a/NEWS +++ b/NEWS @@ -39,6 +39,7 @@ What's new in psycopg 2.6.2 - Fixed `!errorcodes.lookup` initialization thread-safety (:ticket:`#382`). - Fixed `!read()` exception propagation in copy_from (:ticket:`#412`). - Fixed possible NULL TZ decref (:ticket:`#424`). +- `~psycopg2.errorcodes` map updated to PostgreSQL 9.5. What's new in psycopg 2.6.1 diff --git a/lib/errorcodes.py b/lib/errorcodes.py index aa5a723c..60181c1c 100644 --- a/lib/errorcodes.py +++ b/lib/errorcodes.py @@ -199,6 +199,8 @@ INVALID_ESCAPE_SEQUENCE = '22025' STRING_DATA_LENGTH_MISMATCH = '22026' TRIM_ERROR = '22027' ARRAY_SUBSCRIPT_ERROR = '2202E' +INVALID_TABLESAMPLE_REPEAT = '2202G' +INVALID_TABLESAMPLE_ARGUMENT = '2202H' FLOATING_POINT_EXCEPTION = '22P01' INVALID_TEXT_REPRESENTATION = '22P02' INVALID_BINARY_REPRESENTATION = '22P03' @@ -271,6 +273,7 @@ INVALID_SQLSTATE_RETURNED = '39001' NULL_VALUE_NOT_ALLOWED = '39004' TRIGGER_PROTOCOL_VIOLATED = '39P01' SRF_PROTOCOL_VIOLATED = '39P02' +EVENT_TRIGGER_PROTOCOL_VIOLATED = '39P03' # Class 3B - Savepoint Exception SAVEPOINT_EXCEPTION = '3B000' @@ -408,6 +411,7 @@ PLPGSQL_ERROR = 'P0000' RAISE_EXCEPTION = 'P0001' NO_DATA_FOUND = 'P0002' TOO_MANY_ROWS = 'P0003' +ASSERT_FAILURE = 'P0004' # Class XX - Internal Error INTERNAL_ERROR = 'XX000' diff --git a/scripts/make_errorcodes.py b/scripts/make_errorcodes.py index 122e0d56..58d05b85 100755 --- a/scripts/make_errorcodes.py +++ b/scripts/make_errorcodes.py @@ -33,7 +33,7 @@ def main(): file_start = read_base_file(filename) # If you add a version to the list fix the docs (errorcodes.rst, err.rst) classes, errors = fetch_errors( - ['8.1', '8.2', '8.3', '8.4', '9.0', '9.1', '9.2', '9.3', '9.4']) + ['8.1', '8.2', '8.3', '8.4', '9.0', '9.1', '9.2', '9.3', '9.4', '9.5']) f = open(filename, "w") for line in file_start: From 03824a1dba8eb4b82fff3fd6c0a8ae513e72a2a1 Mon Sep 17 00:00:00 2001 From: Alexander Schrijver Date: Sun, 17 Jul 2016 16:32:47 +0200 Subject: [PATCH 109/151] Throw an exception when a NUL character is used as a parameter. --- psycopg/utils.c | 7 ++++++- tests/test_quote.py | 9 +++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/psycopg/utils.c b/psycopg/utils.c index 1b10c4aa..b919180c 100644 --- a/psycopg/utils.c +++ b/psycopg/utils.c @@ -50,8 +50,13 @@ psycopg_escape_string(connectionObject *conn, const char *from, Py_ssize_t len, Py_ssize_t ql; int eq = (conn && (conn->equote)) ? 1 : 0; - if (len == 0) + if (len == 0) { len = strlen(from); + } else if (strchr(from, '\0') != from + len) { + PyErr_Format(PyExc_ValueError, "A string literal cannot contain NUL (0x00) characters."); + + return NULL; + } if (to == NULL) { to = (char *)PyMem_Malloc((len * 2 + 4) * sizeof(char)); diff --git a/tests/test_quote.py b/tests/test_quote.py index 25d1d31c..7176e1a6 100755 --- a/tests/test_quote.py +++ b/tests/test_quote.py @@ -62,6 +62,15 @@ class QuotingTestCase(ConnectingTestCase): self.assertEqual(res, data) self.assert_(not self.conn.notices) + def test_string_null_terminator(self): + curs = self.conn.cursor() + data = 'abcd\x01\x00cdefg' + + with self.assertRaises(ValueError) as e: + curs.execute("SELECT %s", (data,)) + + self.assertEquals(e.exception.message, 'A string literal cannot contain NUL (0x00) characters.') + def test_binary(self): data = b("""some data with \000\013 binary stuff into, 'quotes' and \\ a backslash too. From 9a4f8f915f6bd994cfb11eaa09fa3d97e48d2f47 Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Sun, 7 Aug 2016 02:07:16 +0100 Subject: [PATCH 110/151] Reshuffling and indexing of replication docs --- doc/src/advanced.rst | 87 +++++++++++++++++++++++++++++++++++++++++++- doc/src/extras.rst | 82 ++++++----------------------------------- 2 files changed, 97 insertions(+), 72 deletions(-) diff --git a/doc/src/advanced.rst b/doc/src/advanced.rst index f2e279f8..0f1882cc 100644 --- a/doc/src/advanced.rst +++ b/doc/src/advanced.rst @@ -423,7 +423,7 @@ this will be probably implemented in a future release. Support for coroutine libraries ------------------------------- -.. versionadded:: 2.2.0 +.. versionadded:: 2.2 Psycopg can be used together with coroutine_\-based libraries and participate in cooperative multithreading. @@ -509,3 +509,88 @@ resources about the topic. conn.commit() cur.close() conn.close() + + + +.. index:: + single: Replication + +Replication protocol support +---------------------------- + +.. versionadded:: 2.7 + +Modern PostgreSQL servers (version 9.0 and above) support replication. The +replication protocol is built on top of the client-server protocol and can be +operated using ``libpq``, as such it can be also operated by ``psycopg2``. +The replication protocol can be operated on both synchronous and +:ref:`asynchronous ` connections. + +Server version 9.4 adds a new feature called *Logical Replication*. + +.. seealso:: + + - PostgreSQL `Streaming Replication Protocol`__ + + .. __: http://www.postgresql.org/docs/current/static/protocol-replication.html + + +Logical replication Quick-Start +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +You must be using PostgreSQL server version 9.4 or above to run this quick +start. + +Make sure that replication connections are permitted for user ``postgres`` in +``pg_hba.conf`` and reload the server configuration. You also need to set +``wal_level=logical`` and ``max_wal_senders``, ``max_replication_slots`` to +value greater than zero in ``postgresql.conf`` (these changes require a server +restart). Create a database ``psycopg2test``. + +Then run the following code to quickly try the replication support out. This +is not production code -- it has no error handling, it sends feedback too +often, etc. -- and it's only intended as a simple demo of logical +replication:: + + from __future__ import print_function + import sys + import psycopg2 + import psycopg2.extras + + conn = psycopg2.connect('dbname=psycopg2test user=postgres', + connection_factory=psycopg2.extras.LogicalReplicationConnection) + cur = conn.cursor() + try: + # test_decoding produces textual output + cur.start_replication(slot_name='pytest', decode=True) + except psycopg2.ProgrammingError: + cur.create_replication_slot('pytest', output_plugin='test_decoding') + cur.start_replication(slot_name='pytest', decode=True) + + class DemoConsumer(object): + def __call__(self, msg): + print(msg.payload) + msg.cursor.send_feedback(flush_lsn=msg.data_start) + + democonsumer = DemoConsumer() + + print("Starting streaming, press Control-C to end...", file=sys.stderr) + try: + cur.consume_stream(democonsumer) + except KeyboardInterrupt: + cur.close() + conn.close() + print("The slot 'pytest' still exists. Drop it with " + "SELECT pg_drop_replication_slot('pytest'); if no longer needed.", + file=sys.stderr) + print("WARNING: Transaction logs will accumulate in pg_xlog " + "until the slot is dropped.", file=sys.stderr) + + +You can now make changes to the ``psycopg2test`` database using a normal +psycopg2 session, ``psql``, etc. and see the logical decoding stream printed +by this demo client. + +This will continue running until terminated with ``Control-C``. + +For the details see :ref:`replication-objects`. diff --git a/doc/src/extras.rst b/doc/src/extras.rst index 58b0dc07..b46afdb3 100644 --- a/doc/src/extras.rst +++ b/doc/src/extras.rst @@ -141,81 +141,15 @@ Logging cursor .. autoclass:: MinTimeLoggingCursor -Replication protocol support -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Modern PostgreSQL servers (version 9.0 and above) support replication. The -replication protocol is built on top of the client-server protocol and can be -operated using ``libpq``, as such it can be also operated by ``psycopg2``. -The replication protocol can be operated on both synchronous and -:ref:`asynchronous ` connections. - -Server version 9.4 adds a new feature called *Logical Replication*. - -.. seealso:: - - - PostgreSQL `Streaming Replication Protocol`__ - - .. __: http://www.postgresql.org/docs/current/static/protocol-replication.html -Logical replication Quick-Start -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -You must be using PostgreSQL server version 9.4 or above to run this quick -start. - -Make sure that replication connections are permitted for user ``postgres`` in -``pg_hba.conf`` and reload the server configuration. You also need to set -``wal_level=logical`` and ``max_wal_senders``, ``max_replication_slots`` to -value greater than zero in ``postgresql.conf`` (these changes require a server -restart). Create a database ``psycopg2test``. - -Then run the following code to quickly try the replication support out. This -is not production code -- it has no error handling, it sends feedback too -often, etc. -- and it's only intended as a simple demo of logical -replication:: - - from __future__ import print_function - import sys - import psycopg2 - import psycopg2.extras - - conn = psycopg2.connect('dbname=psycopg2test user=postgres', - connection_factory=psycopg2.extras.LogicalReplicationConnection) - cur = conn.cursor() - try: - cur.start_replication(slot_name='pytest', decode=True) # test_decoding produces textual output - except psycopg2.ProgrammingError: - cur.create_replication_slot('pytest', output_plugin='test_decoding') - cur.start_replication(slot_name='pytest', decode=True) - - class DemoConsumer(object): - def __call__(self, msg): - print(msg.payload) - msg.cursor.send_feedback(flush_lsn=msg.data_start) - - democonsumer = DemoConsumer() - - print("Starting streaming, press Control-C to end...", file=sys.stderr) - try: - cur.consume_stream(democonsumer) - except KeyboardInterrupt: - cur.close() - conn.close() - print("The slot 'pytest' still exists. Drop it with SELECT pg_drop_replication_slot('pytest'); if no longer needed.", file=sys.stderr) - print("WARNING: Transaction logs will accumulate in pg_xlog until the slot is dropped.", file=sys.stderr) - - -You can now make changes to the ``psycopg2test`` database using a normal -psycopg2 session, ``psql``, etc. and see the logical decoding stream printed -by this demo client. - -This will continue running until terminated with ``Control-C``. - +.. _replication-objects: Replication connection and cursor classes -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. index:: + pair: Connection; replication .. autoclass:: ReplicationConnectionBase @@ -253,6 +187,9 @@ The following replication types are defined: `ReplicationCursor` for actual communication with the server. +.. index:: + pair: Message; replication + The individual messages in the replication stream are represented by `ReplicationMessage` objects (both logical and physical type): @@ -290,6 +227,9 @@ The individual messages in the replication stream are represented by A reference to the corresponding `ReplicationCursor` object. +.. index:: + pair: Cursor; replication + .. autoclass:: ReplicationCursor .. method:: create_replication_slot(slot_name, slot_type=None, output_plugin=None) From 86434548a7f9b38bf32cdbacfc52b12bea60776c Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Sun, 7 Aug 2016 02:23:02 +0100 Subject: [PATCH 111/151] Replication docs massaging, mostly formatting --- doc/src/advanced.rst | 2 ++ doc/src/extras.rst | 22 +++++++++++----------- 2 files changed, 13 insertions(+), 11 deletions(-) diff --git a/doc/src/advanced.rst b/doc/src/advanced.rst index 0f1882cc..258aec93 100644 --- a/doc/src/advanced.rst +++ b/doc/src/advanced.rst @@ -515,6 +515,8 @@ resources about the topic. .. index:: single: Replication +.. _replication-support: + Replication protocol support ---------------------------- diff --git a/doc/src/extras.rst b/doc/src/extras.rst index b46afdb3..78e96efe 100644 --- a/doc/src/extras.rst +++ b/doc/src/extras.rst @@ -148,10 +148,7 @@ Logging cursor Replication connection and cursor classes ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. index:: - pair: Connection; replication - -.. autoclass:: ReplicationConnectionBase +See :ref:`replication-support` for an introduction to the topic. The following replication types are defined: @@ -160,6 +157,9 @@ The following replication types are defined: .. data:: REPLICATION_PHYSICAL +.. index:: + pair: Connection; replication + .. autoclass:: LogicalReplicationConnection This connection factory class can be used to open a special type of @@ -200,8 +200,8 @@ The individual messages in the replication stream are represented by The actual data received from the server. An instance of either `bytes()` or `unicode()`, depending on the value - of `decode` option passed to `ReplicationCursor.start_replication()` - on the connection. See `ReplicationCursor.read_message()` for + of `decode` option passed to `~ReplicationCursor.start_replication()` + on the connection. See `~ReplicationCursor.read_message()` for details. .. attribute:: data_size @@ -353,7 +353,7 @@ The individual messages in the replication stream are represented by .. method:: consume_stream(consume, keepalive_interval=10) - :param consume: a callable object with signature ``consume(msg)`` + :param consume: a callable object with signature :samp:`consume({msg})` :param keepalive_interval: interval (in seconds) to send keepalive messages to the server @@ -451,11 +451,11 @@ The individual messages in the replication stream are represented by (after calling `start_replication()` once). For synchronous connections see `consume_stream()`. - The returned message's `ReplicationMessage.payload` is an instance of - `unicode()` decoded according to connection `connection.encoding` - *iff* `decode` was set to `!True` in the initial call to + The returned message's `~ReplicationMessage.payload` is an instance of + `!unicode` decoded according to connection `~connection.encoding` + *iff* *decode* was set to `!True` in the initial call to `start_replication()` on this connection, otherwise it is an instance - of `bytes()` with no decoding. + of `!bytes` with no decoding. It is expected that the calling code will call this method repeatedly in order to consume all of the messages that might have been buffered From cde19c4d59c1fa1a74bc5503435c5c6a5143994f Mon Sep 17 00:00:00 2001 From: Jonathan Ross Rogers Date: Mon, 1 Aug 2016 12:40:52 -0400 Subject: [PATCH 112/151] Make Range pickleable --- lib/_range.py | 11 +++++++++++ tests/test_types_extras.py | 7 +++++++ 2 files changed, 18 insertions(+) diff --git a/lib/_range.py b/lib/_range.py index 47b82086..b6fe0bdc 100644 --- a/lib/_range.py +++ b/lib/_range.py @@ -171,6 +171,17 @@ class Range(object): else: return self.__gt__(other) + def __getstate__(self): + return dict( + (slot, getattr(self, slot)) + for slot in self.__slots__ + if hasattr(self, slot) + ) + + def __setstate__(self, state): + for slot, value in state.items(): + setattr(self, slot, value) + def register_range(pgrange, pyrange, conn_or_curs, globally=False): """Create and register an adapter and the typecasters to convert between diff --git a/tests/test_types_extras.py b/tests/test_types_extras.py index b81cecab..d8444010 100755 --- a/tests/test_types_extras.py +++ b/tests/test_types_extras.py @@ -20,6 +20,7 @@ import sys from decimal import Decimal from datetime import date, datetime from functools import wraps +from pickle import dumps, loads from testutils import unittest, skip_if_no_uuid, skip_before_postgres from testutils import ConnectingTestCase, decorate_all_tests @@ -1397,6 +1398,12 @@ class RangeTestCase(unittest.TestCase): with py3_raises_typeerror(): self.assert_(Range(1, 2) >= 1) + def test_pickling(self): + from psycopg2.extras import Range + + r = Range(0, 4) + self.assertEqual(loads(dumps(r)), r) + def skip_if_no_range(f): @wraps(f) From ec1e578e4b0adda20ca2d227c7abfc2bee544eab Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Sun, 7 Aug 2016 02:33:38 +0100 Subject: [PATCH 113/151] Report range picklable in NEWS file Fix #462 Conflicts: NEWS --- NEWS | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/NEWS b/NEWS index 49ed56a9..2c828c2a 100644 --- a/NEWS +++ b/NEWS @@ -21,6 +21,12 @@ New features: - Added `~connection.get_dsn_parameters()` connection method (:ticket:`#364`). +What's new in psycopg 2.6.3 +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- Make `~psycopg2.extras.Range` objects picklable (:ticket:`#462`). + + What's new in psycopg 2.6.2 ^^^^^^^^^^^^^^^^^^^^^^^^^^^ From edd51aac25b2cc9f25e0df1d94724b945797832a Mon Sep 17 00:00:00 2001 From: SpootDev Date: Fri, 15 Jul 2016 22:17:34 -0500 Subject: [PATCH 114/151] spelling fix --- doc/src/faq.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/src/faq.rst b/doc/src/faq.rst index 69273ba5..d0636669 100644 --- a/doc/src/faq.rst +++ b/doc/src/faq.rst @@ -73,7 +73,7 @@ Why does `!cursor.execute()` raise the exception *can't adapt*? I can't pass an integer or a float parameter to my query: it says *a number is required*, but *it is* a number! In your query string, you always have to use ``%s`` placeholders, - event when passing a number. All Python objects are converted by Psycopg + even when passing a number. All Python objects are converted by Psycopg in their SQL representation, so they get passed to the query as strings. See :ref:`query-parameters`. :: From 478f43f0c88ba115f2ec9132b8ecb4b0129794bb Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Sun, 7 Aug 2016 02:50:50 +0100 Subject: [PATCH 115/151] Mention NULL characters guard in NEWS file Fix #420. --- NEWS | 2 ++ 1 file changed, 2 insertions(+) diff --git a/NEWS b/NEWS index 2c828c2a..7e6fba48 100644 --- a/NEWS +++ b/NEWS @@ -24,6 +24,8 @@ New features: What's new in psycopg 2.6.3 ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +- Throw an exception trying to pass ``NULL`` chars as parameters + (:ticket:`#420). - Make `~psycopg2.extras.Range` objects picklable (:ticket:`#462`). From 12ecb4b2ce0b3e708d632857832d28a9d196f8d5 Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Sun, 14 Aug 2016 19:34:24 +0100 Subject: [PATCH 116/151] Dropped import of postgres internal/c.h Stops warning (caused by command line definition of PG_VERSION, so it could have been avoided otherwise), but the file comment says: Note that the definitions here are not intended to be exposed to clients of the frontend interface libraries --- so we don't worry much about polluting the namespace with lots of stuff... so it doesn't seem a good idea gulping it. --- psycopg/libpq_support.h | 4 ++-- psycopg/replication_message_type.c | 5 +++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/psycopg/libpq_support.h b/psycopg/libpq_support.h index 77d7ab12..e57fae99 100644 --- a/psycopg/libpq_support.h +++ b/psycopg/libpq_support.h @@ -26,10 +26,10 @@ #define PSYCOPG_LIBPQ_SUPPORT_H 1 #include "psycopg/config.h" -#include "internal/c.h" -/* type and constant definitions from internal postgres includes not available otherwise */ +/* type and constant definitions from internal postgres include */ typedef unsigned PG_INT64_TYPE XLogRecPtr; +typedef uint32_t uint32; /* have to use lowercase %x, as PyString_FromFormat can't do %X */ #define XLOGFMTSTR "%x/%x" diff --git a/psycopg/replication_message_type.c b/psycopg/replication_message_type.c index 358d1497..b37c402e 100644 --- a/psycopg/replication_message_type.c +++ b/psycopg/replication_message_type.c @@ -49,9 +49,10 @@ static PyObject * replmsg_repr(replicationMessageObject *self) { return PyString_FromFormat( - "", + "", self, self->data_size, XLOGFMTARGS(self->data_start), XLOGFMTARGS(self->wal_end), - self->send_time); + (long int)self->send_time); } static int From e5390fed983a18bc6a3cf0479cf026ebcddc17b6 Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Sun, 14 Aug 2016 19:48:31 +0100 Subject: [PATCH 117/151] Use inttypes.h definitions --- psycopg/libpq_support.c | 22 +++++++++++----------- psycopg/libpq_support.h | 9 ++++----- psycopg/pqpath.c | 2 +- psycopg/replication_message.h | 2 +- 4 files changed, 17 insertions(+), 18 deletions(-) diff --git a/psycopg/libpq_support.c b/psycopg/libpq_support.c index 160c8491..6c0b5f8e 100644 --- a/psycopg/libpq_support.c +++ b/psycopg/libpq_support.c @@ -46,15 +46,15 @@ * backend code. The protocol always uses integer timestamps, regardless of * server setting. */ -pg_int64 +int64_t feGetCurrentTimestamp(void) { - pg_int64 result; + int64_t result; struct timeval tp; gettimeofday(&tp, NULL); - result = (pg_int64) tp.tv_sec - + result = (int64_t) tp.tv_sec - ((POSTGRES_EPOCH_JDATE - UNIX_EPOCH_JDATE) * SECS_PER_DAY); result = (result * USECS_PER_SEC) + tp.tv_usec; @@ -66,17 +66,17 @@ feGetCurrentTimestamp(void) * Converts an int64 to network byte order. */ void -fe_sendint64(pg_int64 i, char *buf) +fe_sendint64(int64_t i, char *buf) { - uint32 n32; + uint32_t n32; /* High order half first, since we're doing MSB-first */ - n32 = (uint32) (i >> 32); + n32 = (uint32_t) (i >> 32); n32 = htonl(n32); memcpy(&buf[0], &n32, 4); /* Now the low order half */ - n32 = (uint32) i; + n32 = (uint32_t) i; n32 = htonl(n32); memcpy(&buf[4], &n32, 4); } @@ -84,12 +84,12 @@ fe_sendint64(pg_int64 i, char *buf) /* * Converts an int64 from network byte order to native format. */ -pg_int64 +int64_t fe_recvint64(char *buf) { - pg_int64 result; - uint32 h32; - uint32 l32; + int64_t result; + uint32_t h32; + uint32_t l32; memcpy(&h32, buf, 4); memcpy(&l32, buf + 4, 4); diff --git a/psycopg/libpq_support.h b/psycopg/libpq_support.h index e57fae99..c8f10665 100644 --- a/psycopg/libpq_support.h +++ b/psycopg/libpq_support.h @@ -29,11 +29,10 @@ /* type and constant definitions from internal postgres include */ typedef unsigned PG_INT64_TYPE XLogRecPtr; -typedef uint32_t uint32; /* have to use lowercase %x, as PyString_FromFormat can't do %X */ #define XLOGFMTSTR "%x/%x" -#define XLOGFMTARGS(x) ((uint32)((x) >> 32)), ((uint32)((x) & 0xFFFFFFFF)) +#define XLOGFMTARGS(x) ((uint32_t)((x) >> 32)), ((uint32_t)((x) & 0xFFFFFFFF)) /* Julian-date equivalents of Day 0 in Unix and Postgres reckoning */ #define UNIX_EPOCH_JDATE 2440588 /* == date2j(1970, 1, 1) */ @@ -42,8 +41,8 @@ typedef uint32_t uint32; #define SECS_PER_DAY 86400 #define USECS_PER_SEC 1000000LL -HIDDEN pg_int64 feGetCurrentTimestamp(void); -HIDDEN void fe_sendint64(pg_int64 i, char *buf); -HIDDEN pg_int64 fe_recvint64(char *buf); +HIDDEN int64_t feGetCurrentTimestamp(void); +HIDDEN void fe_sendint64(int64_t i, char *buf); +HIDDEN int64_t fe_recvint64(char *buf); #endif /* !defined(PSYCOPG_LIBPQ_SUPPORT_H) */ diff --git a/psycopg/pqpath.c b/psycopg/pqpath.c index 220ae246..d7283d0c 100644 --- a/psycopg/pqpath.c +++ b/psycopg/pqpath.c @@ -1555,7 +1555,7 @@ pq_read_replication_message(replicationCursorObject *repl, replicationMessageObj char *buffer = NULL; int len, data_size, consumed, hdr, reply; XLogRecPtr data_start, wal_end; - pg_int64 send_time; + int64_t send_time; PyObject *str = NULL, *result = NULL; int ret = -1; diff --git a/psycopg/replication_message.h b/psycopg/replication_message.h index 201b9fb4..b4d93d67 100644 --- a/psycopg/replication_message.h +++ b/psycopg/replication_message.h @@ -45,7 +45,7 @@ struct replicationMessageObject { int data_size; XLogRecPtr data_start; XLogRecPtr wal_end; - pg_int64 send_time; + int64_t send_time; }; RAISES_NEG int psyco_replmsg_datetime_init(void); From e0883f19677140ec03288034f1bf5e43f941990b Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Sun, 14 Aug 2016 19:57:29 +0100 Subject: [PATCH 118/151] Name the db in the replication test like the unit test one --- doc/src/advanced.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/src/advanced.rst b/doc/src/advanced.rst index 258aec93..5b5fb354 100644 --- a/doc/src/advanced.rst +++ b/doc/src/advanced.rst @@ -547,7 +547,7 @@ Make sure that replication connections are permitted for user ``postgres`` in ``pg_hba.conf`` and reload the server configuration. You also need to set ``wal_level=logical`` and ``max_wal_senders``, ``max_replication_slots`` to value greater than zero in ``postgresql.conf`` (these changes require a server -restart). Create a database ``psycopg2test``. +restart). Create a database ``psycopg2_test``. Then run the following code to quickly try the replication support out. This is not production code -- it has no error handling, it sends feedback too @@ -559,7 +559,7 @@ replication:: import psycopg2 import psycopg2.extras - conn = psycopg2.connect('dbname=psycopg2test user=postgres', + conn = psycopg2.connect('dbname=psycopg2_test user=postgres', connection_factory=psycopg2.extras.LogicalReplicationConnection) cur = conn.cursor() try: @@ -589,7 +589,7 @@ replication:: "until the slot is dropped.", file=sys.stderr) -You can now make changes to the ``psycopg2test`` database using a normal +You can now make changes to the ``psycopg2_test`` database using a normal psycopg2 session, ``psql``, etc. and see the logical decoding stream printed by this demo client. From 01c552baa3847819d024a0f945ec2b4f3bbeadba Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Sun, 14 Aug 2016 21:09:00 +0100 Subject: [PATCH 119/151] Mention replication support in the NEWS file --- NEWS | 2 ++ 1 file changed, 2 insertions(+) diff --git a/NEWS b/NEWS index 49ed56a9..1cf62686 100644 --- a/NEWS +++ b/NEWS @@ -6,6 +6,8 @@ What's new in psycopg 2.7 New features: +- Added :ref:`replication-support` (:ticket:`#322`). Main authors are + Oleksandr Shulgin and Craig Ringer, who deserve a huge thank you. - Added `~psycopg2.extensions.parse_dsn()` and `~psycopg2.extensions.make_dsn()` functions (:tickets:`#321, #363`). `~psycopg2.connect()` now can take both *dsn* and keyword arguments, merging From 3b41c3a6f373af0100a399cea150a9420ecc4acb Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Mon, 15 Aug 2016 01:06:42 +0100 Subject: [PATCH 120/151] Stop compiling with Python 2.5 --- doc/src/install.rst | 2 +- psycopg/python.h | 4 ++-- setup.py | 1 - 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/doc/src/install.rst b/doc/src/install.rst index 3a95adc4..674bbac8 100644 --- a/doc/src/install.rst +++ b/doc/src/install.rst @@ -17,7 +17,7 @@ The current `!psycopg2` implementation supports: .. NOTE: keep consistent with setup.py and the /features/ page. -- Python 2 versions from 2.5 to 2.7 +- Python 2 versions from 2.6 to 2.7 - Python 3 versions from 3.1 to 3.5 - PostgreSQL versions from 7.4 to 9.4 diff --git a/psycopg/python.h b/psycopg/python.h index 90c82516..cfb8dad3 100644 --- a/psycopg/python.h +++ b/psycopg/python.h @@ -31,8 +31,8 @@ #include #endif -#if PY_VERSION_HEX < 0x02050000 -# error "psycopg requires Python >= 2.5" +#if PY_VERSION_HEX < 0x02060000 +# error "psycopg requires Python >= 2.6" #endif /* hash() return size changed around version 3.2a4 on 64bit platforms. Before diff --git a/setup.py b/setup.py index 6414a88f..edb13282 100644 --- a/setup.py +++ b/setup.py @@ -33,7 +33,6 @@ Intended Audience :: Developers License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL) License :: OSI Approved :: Zope Public License Programming Language :: Python -Programming Language :: Python :: 2.5 Programming Language :: Python :: 2.6 Programming Language :: Python :: 2.7 Programming Language :: Python :: 3 From 78649f8e905f04c3000abef23725d557a103abef Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Mon, 15 Aug 2016 01:55:57 +0100 Subject: [PATCH 121/151] Dropped use of b() "macro" and 2to3 fixer Just use the b"" strings syntax supported from python 2.6. --- lib/_range.py | 14 ++++++------ lib/extensions.py | 13 ++--------- lib/extras.py | 17 +++++++------- scripts/fix_b.py | 20 ----------------- setup.py | 4 ---- tests/test_cursor.py | 17 +++++++------- tests/test_lobject.py | 35 ++++++++++++++--------------- tests/test_quote.py | 18 +++++++-------- tests/test_types_basic.py | 25 ++++++++++----------- tests/test_types_extras.py | 45 ++++++++++++++++++-------------------- 10 files changed, 84 insertions(+), 124 deletions(-) delete mode 100644 scripts/fix_b.py diff --git a/lib/_range.py b/lib/_range.py index b6fe0bdc..4cfd387c 100644 --- a/lib/_range.py +++ b/lib/_range.py @@ -27,7 +27,7 @@ import re from psycopg2._psycopg import ProgrammingError, InterfaceError -from psycopg2.extensions import ISQLQuote, adapt, register_adapter, b +from psycopg2.extensions import ISQLQuote, adapt, register_adapter from psycopg2.extensions import new_type, new_array_type, register_type class Range(object): @@ -240,7 +240,7 @@ class RangeAdapter(object): r = self.adapted if r.isempty: - return b("'empty'::" + self.name) + return b"'empty'::" + self.name.encode('utf8') if r.lower is not None: a = adapt(r.lower) @@ -248,7 +248,7 @@ class RangeAdapter(object): a.prepare(self._conn) lower = a.getquoted() else: - lower = b('NULL') + lower = b'NULL' if r.upper is not None: a = adapt(r.upper) @@ -256,10 +256,10 @@ class RangeAdapter(object): a.prepare(self._conn) upper = a.getquoted() else: - upper = b('NULL') + upper = b'NULL' - return b(self.name + '(') + lower + b(', ') + upper \ - + b(", '%s')" % r._bounds) + return self.name.encode('utf8') + b'(' + lower + b', ' + upper \ + + b", '" + r._bounds.encode('utf8') + b"')" class RangeCaster(object): @@ -459,7 +459,7 @@ class NumberRangeAdapter(RangeAdapter): def getquoted(self): r = self.adapted if r.isempty: - return b("'empty'") + return b"'empty'" if not r.lower_inf: # not exactly: we are relying that none of these object is really diff --git a/lib/extensions.py b/lib/extensions.py index 21300985..309c6eec 100644 --- a/lib/extensions.py +++ b/lib/extensions.py @@ -103,15 +103,6 @@ TRANSACTION_STATUS_INERROR = 3 TRANSACTION_STATUS_UNKNOWN = 4 -# Return bytes from a string -if _sys.version_info[0] < 3: - def b(s): - return s -else: - def b(s): - return s.encode('utf8') - - def register_adapter(typ, callable): """Register 'callable' as an ISQLQuote adapter for type 'typ'.""" adapters[(typ, ISQLQuote)] = callable @@ -136,7 +127,7 @@ class SQL_IN(object): if hasattr(obj, 'prepare'): obj.prepare(self._conn) qobjs = [o.getquoted() for o in pobjs] - return b('(') + b(', ').join(qobjs) + b(')') + return b'(' + b', '.join(qobjs) + b')' def __str__(self): return str(self.getquoted()) @@ -151,7 +142,7 @@ class NoneAdapter(object): def __init__(self, obj): pass - def getquoted(self, _null=b("NULL")): + def getquoted(self, _null=b"NULL"): return _null diff --git a/lib/extras.py b/lib/extras.py index 6ae98517..7a3a925f 100644 --- a/lib/extras.py +++ b/lib/extras.py @@ -40,7 +40,6 @@ from psycopg2 import extensions as _ext from psycopg2.extensions import cursor as _cursor from psycopg2.extensions import connection as _connection from psycopg2.extensions import adapt as _A, quote_ident -from psycopg2.extensions import b from psycopg2._psycopg import REPLICATION_PHYSICAL, REPLICATION_LOGICAL from psycopg2._psycopg import ReplicationConnection as _replicationConnection from psycopg2._psycopg import ReplicationCursor as _replicationCursor @@ -575,7 +574,7 @@ class UUID_adapter(object): return self def getquoted(self): - return b("'%s'::uuid" % self._uuid) + return ("'%s'::uuid" % self._uuid).encode('utf8') def __str__(self): return "'%s'::uuid" % self._uuid @@ -635,7 +634,7 @@ class Inet(object): obj = _A(self.addr) if hasattr(obj, 'prepare'): obj.prepare(self._conn) - return obj.getquoted() + b("::inet") + return obj.getquoted() + b"::inet" def __conform__(self, proto): if proto is _ext.ISQLQuote: @@ -742,7 +741,7 @@ class HstoreAdapter(object): def _getquoted_8(self): """Use the operators available in PG pre-9.0.""" if not self.wrapped: - return b("''::hstore") + return b"''::hstore" adapt = _ext.adapt rv = [] @@ -756,23 +755,23 @@ class HstoreAdapter(object): v.prepare(self.conn) v = v.getquoted() else: - v = b('NULL') + v = b'NULL' # XXX this b'ing is painfully inefficient! - rv.append(b("(") + k + b(" => ") + v + b(")")) + rv.append(b"(" + k + b" => " + v + b")") - return b("(") + b('||').join(rv) + b(")") + return b"(" + b'||'.join(rv) + b")" def _getquoted_9(self): """Use the hstore(text[], text[]) function.""" if not self.wrapped: - return b("''::hstore") + return b"''::hstore" k = _ext.adapt(self.wrapped.keys()) k.prepare(self.conn) v = _ext.adapt(self.wrapped.values()) v.prepare(self.conn) - return b("hstore(") + k.getquoted() + b(", ") + v.getquoted() + b(")") + return b"hstore(" + k.getquoted() + b", " + v.getquoted() + b")" getquoted = _getquoted_9 diff --git a/scripts/fix_b.py b/scripts/fix_b.py deleted file mode 100644 index ccc8e1c9..00000000 --- a/scripts/fix_b.py +++ /dev/null @@ -1,20 +0,0 @@ -"""Fixer to change b('string') into b'string'.""" -# Author: Daniele Varrazzo - -import token -from lib2to3 import fixer_base -from lib2to3.pytree import Leaf - -class FixB(fixer_base.BaseFix): - - PATTERN = """ - power< wrapper='b' trailer< '(' arg=[any] ')' > rest=any* > - """ - - def transform(self, node, results): - arg = results['arg'] - wrapper = results["wrapper"] - if len(arg) == 1 and arg[0].type == token.STRING: - b = Leaf(token.STRING, 'b' + arg[0].value, prefix=wrapper.prefix) - node.children = [ b ] + results['rest'] - diff --git a/setup.py b/setup.py index edb13282..45b3b698 100644 --- a/setup.py +++ b/setup.py @@ -75,10 +75,6 @@ else: # workaround subclass for ticket #153 pass - # Configure distutils to run our custom 2to3 fixers as well - from lib2to3.refactor import get_fixers_from_package - build_py.fixer_names = get_fixers_from_package('lib2to3.fixes') \ - + [ 'fix_b' ] sys.path.insert(0, 'scripts') try: diff --git a/tests/test_cursor.py b/tests/test_cursor.py index 970cc37d..3201013d 100755 --- a/tests/test_cursor.py +++ b/tests/test_cursor.py @@ -26,7 +26,6 @@ import time import pickle import psycopg2 import psycopg2.extensions -from psycopg2.extensions import b from testutils import unittest, ConnectingTestCase, skip_before_postgres from testutils import skip_if_no_namedtuple, skip_if_no_getrefcount @@ -63,28 +62,28 @@ class CursorTests(ConnectingTestCase): # unicode query containing only ascii data cur.execute(u"SELECT 'foo';") self.assertEqual('foo', cur.fetchone()[0]) - self.assertEqual(b("SELECT 'foo';"), cur.mogrify(u"SELECT 'foo';")) + self.assertEqual(b"SELECT 'foo';", cur.mogrify(u"SELECT 'foo';")) conn.set_client_encoding('UTF8') snowman = u"\u2603" # unicode query with non-ascii data cur.execute(u"SELECT '%s';" % snowman) - self.assertEqual(snowman.encode('utf8'), b(cur.fetchone()[0])) + self.assertEqual(snowman.encode('utf8'), cur.fetchone()[0].encode('utf8')) self.assertEqual(("SELECT '%s';" % snowman).encode('utf8'), - cur.mogrify(u"SELECT '%s';" % snowman).replace(b("E'"), b("'"))) + cur.mogrify(u"SELECT '%s';" % snowman).replace(b"E'", b"'")) # unicode args cur.execute("SELECT %s;", (snowman,)) - self.assertEqual(snowman.encode("utf-8"), b(cur.fetchone()[0])) + self.assertEqual(snowman.encode("utf-8"), cur.fetchone()[0].encode('utf8')) self.assertEqual(("SELECT '%s';" % snowman).encode('utf8'), - cur.mogrify("SELECT %s;", (snowman,)).replace(b("E'"), b("'"))) + cur.mogrify("SELECT %s;", (snowman,)).replace(b"E'", b"'")) # unicode query and args cur.execute(u"SELECT %s;", (snowman,)) - self.assertEqual(snowman.encode("utf-8"), b(cur.fetchone()[0])) + self.assertEqual(snowman.encode("utf-8"), cur.fetchone()[0].encode('utf8')) self.assertEqual(("SELECT '%s';" % snowman).encode('utf8'), - cur.mogrify(u"SELECT %s;", (snowman,)).replace(b("E'"), b("'"))) + cur.mogrify(u"SELECT %s;", (snowman,)).replace(b"E'", b"'")) def test_mogrify_decimal_explodes(self): # issue #7: explodes on windows with python 2.5 and psycopg 2.2.2 @@ -95,7 +94,7 @@ class CursorTests(ConnectingTestCase): conn = self.conn cur = conn.cursor() - self.assertEqual(b('SELECT 10.3;'), + self.assertEqual(b'SELECT 10.3;', cur.mogrify("SELECT %s;", (Decimal("10.3"),))) @skip_if_no_getrefcount diff --git a/tests/test_lobject.py b/tests/test_lobject.py index fb2297fa..7a23e6bd 100755 --- a/tests/test_lobject.py +++ b/tests/test_lobject.py @@ -29,7 +29,6 @@ from functools import wraps import psycopg2 import psycopg2.extensions -from psycopg2.extensions import b from testutils import unittest, decorate_all_tests, skip_if_tpc_disabled from testutils import ConnectingTestCase, skip_if_green @@ -99,7 +98,7 @@ class LargeObjectTests(LargeObjectTestCase): lo = self.conn.lobject() lo2 = self.conn.lobject(lo.oid, "w") self.assertEqual(lo2.mode[0], "w") - lo2.write(b("some data")) + lo2.write(b"some data") def test_open_mode_n(self): # Openning an object in mode "n" gives us a closed lobject. @@ -136,7 +135,7 @@ class LargeObjectTests(LargeObjectTestCase): self.tmpdir = tempfile.mkdtemp() filename = os.path.join(self.tmpdir, "data.txt") fp = open(filename, "wb") - fp.write(b("some data")) + fp.write(b"some data") fp.close() lo = self.conn.lobject(0, "r", 0, filename) @@ -150,7 +149,7 @@ class LargeObjectTests(LargeObjectTestCase): def test_write(self): lo = self.conn.lobject() - self.assertEqual(lo.write(b("some data")), len("some data")) + self.assertEqual(lo.write(b"some data"), len("some data")) def test_write_large(self): lo = self.conn.lobject() @@ -159,7 +158,7 @@ class LargeObjectTests(LargeObjectTestCase): def test_read(self): lo = self.conn.lobject() - length = lo.write(b("some data")) + length = lo.write(b"some data") lo.close() lo = self.conn.lobject(lo.oid) @@ -170,14 +169,14 @@ class LargeObjectTests(LargeObjectTestCase): def test_read_binary(self): lo = self.conn.lobject() - length = lo.write(b("some data")) + length = lo.write(b"some data") lo.close() lo = self.conn.lobject(lo.oid, "rb") x = lo.read(4) - self.assertEqual(type(x), type(b(''))) - self.assertEqual(x, b("some")) - self.assertEqual(lo.read(), b(" data")) + self.assertEqual(type(x), type(b'')) + self.assertEqual(x, b"some") + self.assertEqual(lo.read(), b" data") def test_read_text(self): lo = self.conn.lobject() @@ -206,7 +205,7 @@ class LargeObjectTests(LargeObjectTestCase): def test_seek_tell(self): lo = self.conn.lobject() - length = lo.write(b("some data")) + length = lo.write(b"some data") self.assertEqual(lo.tell(), length) lo.close() lo = self.conn.lobject(lo.oid) @@ -236,7 +235,7 @@ class LargeObjectTests(LargeObjectTestCase): def test_export(self): lo = self.conn.lobject() - lo.write(b("some data")) + lo.write(b"some data") self.tmpdir = tempfile.mkdtemp() filename = os.path.join(self.tmpdir, "data.txt") @@ -244,7 +243,7 @@ class LargeObjectTests(LargeObjectTestCase): self.assertTrue(os.path.exists(filename)) f = open(filename, "rb") try: - self.assertEqual(f.read(), b("some data")) + self.assertEqual(f.read(), b"some data") finally: f.close() @@ -256,7 +255,7 @@ class LargeObjectTests(LargeObjectTestCase): def test_write_after_close(self): lo = self.conn.lobject() lo.close() - self.assertRaises(psycopg2.InterfaceError, lo.write, b("some data")) + self.assertRaises(psycopg2.InterfaceError, lo.write, b"some data") def test_read_after_close(self): lo = self.conn.lobject() @@ -281,7 +280,7 @@ class LargeObjectTests(LargeObjectTestCase): def test_export_after_close(self): lo = self.conn.lobject() - lo.write(b("some data")) + lo.write(b"some data") lo.close() self.tmpdir = tempfile.mkdtemp() @@ -290,7 +289,7 @@ class LargeObjectTests(LargeObjectTestCase): self.assertTrue(os.path.exists(filename)) f = open(filename, "rb") try: - self.assertEqual(f.read(), b("some data")) + self.assertEqual(f.read(), b"some data") finally: f.close() @@ -307,7 +306,7 @@ class LargeObjectTests(LargeObjectTestCase): self.lo_oid = lo.oid self.conn.commit() - self.assertRaises(psycopg2.ProgrammingError, lo.write, b("some data")) + self.assertRaises(psycopg2.ProgrammingError, lo.write, b"some data") def test_read_after_commit(self): lo = self.conn.lobject() @@ -340,7 +339,7 @@ class LargeObjectTests(LargeObjectTestCase): def test_export_after_commit(self): lo = self.conn.lobject() - lo.write(b("some data")) + lo.write(b"some data") self.conn.commit() self.tmpdir = tempfile.mkdtemp() @@ -349,7 +348,7 @@ class LargeObjectTests(LargeObjectTestCase): self.assertTrue(os.path.exists(filename)) f = open(filename, "rb") try: - self.assertEqual(f.read(), b("some data")) + self.assertEqual(f.read(), b"some data") finally: f.close() diff --git a/tests/test_quote.py b/tests/test_quote.py index 7176e1a6..f74fd854 100755 --- a/tests/test_quote.py +++ b/tests/test_quote.py @@ -28,7 +28,6 @@ from testutils import unittest, ConnectingTestCase import psycopg2 import psycopg2.extensions -from psycopg2.extensions import b class QuotingTestCase(ConnectingTestCase): @@ -69,12 +68,13 @@ class QuotingTestCase(ConnectingTestCase): with self.assertRaises(ValueError) as e: curs.execute("SELECT %s", (data,)) - self.assertEquals(e.exception.message, 'A string literal cannot contain NUL (0x00) characters.') + self.assertEquals(str(e.exception), + 'A string literal cannot contain NUL (0x00) characters.') def test_binary(self): - data = b("""some data with \000\013 binary + data = b"""some data with \000\013 binary stuff into, 'quotes' and \\ a backslash too. - """) + """ if sys.version_info[0] < 3: data += "".join(map(chr, range(256))) else: @@ -87,7 +87,7 @@ class QuotingTestCase(ConnectingTestCase): else: res = curs.fetchone()[0].tobytes() - if res[0] in (b('x'), ord(b('x'))) and self.conn.server_version >= 90000: + if res[0] in (b'x', ord(b'x')) and self.conn.server_version >= 90000: return self.skipTest( "bytea broken with server >= 9.0, libpq < 9") @@ -199,7 +199,7 @@ class TestStringAdapter(ConnectingTestCase): from psycopg2.extensions import adapt a = adapt("hello") self.assertEqual(a.encoding, 'latin1') - self.assertEqual(a.getquoted(), b("'hello'")) + self.assertEqual(a.getquoted(), b"'hello'") # NOTE: we can't really test an encoding different from utf8, because # when encoding without connection the libpq will use parameters from @@ -222,7 +222,7 @@ class TestStringAdapter(ConnectingTestCase): a = adapt(snowman) a.encoding = 'utf8' self.assertEqual(a.encoding, 'utf8') - self.assertEqual(a.getquoted(), b("'\xe2\x98\x83'")) + self.assertEqual(a.getquoted(), b"'\xe2\x98\x83'") def test_connection_wins_anyway(self): from psycopg2.extensions import adapt @@ -234,7 +234,7 @@ class TestStringAdapter(ConnectingTestCase): a.prepare(self.conn) self.assertEqual(a.encoding, 'utf_8') - self.assertEqual(a.getquoted(), b("'\xe2\x98\x83'")) + self.assertEqual(a.getquoted(), b"'\xe2\x98\x83'") @testutils.skip_before_python(3) def test_adapt_bytes(self): @@ -242,7 +242,7 @@ class TestStringAdapter(ConnectingTestCase): self.conn.set_client_encoding('utf8') a = psycopg2.extensions.QuotedString(snowman.encode('utf8')) a.prepare(self.conn) - self.assertEqual(a.getquoted(), b("'\xe2\x98\x83'")) + self.assertEqual(a.getquoted(), b"'\xe2\x98\x83'") def test_suite(): diff --git a/tests/test_types_basic.py b/tests/test_types_basic.py index 248712b0..f786c2a5 100755 --- a/tests/test_types_basic.py +++ b/tests/test_types_basic.py @@ -30,7 +30,6 @@ import testutils from testutils import unittest, ConnectingTestCase, decorate_all_tests import psycopg2 -from psycopg2.extensions import b class TypesBasicTests(ConnectingTestCase): @@ -190,7 +189,7 @@ class TypesBasicTests(ConnectingTestCase): ss = ['', '{', '{}}', '{' * 20 + '}' * 20] for s in ss: self.assertRaises(psycopg2.DataError, - psycopg2.extensions.STRINGARRAY, b(s), curs) + psycopg2.extensions.STRINGARRAY, s.encode('utf8'), curs) @testutils.skip_before_postgres(8, 2) def testArrayOfNulls(self): @@ -309,9 +308,9 @@ class TypesBasicTests(ConnectingTestCase): def testByteaHexCheckFalsePositive(self): # the check \x -> x to detect bad bytea decode # may be fooled if the first char is really an 'x' - o1 = psycopg2.Binary(b('x')) + o1 = psycopg2.Binary(b'x') o2 = self.execute("SELECT %s::bytea AS foo", (o1,)) - self.assertEqual(b('x'), o2[0]) + self.assertEqual(b'x', o2[0]) def testNegNumber(self): d1 = self.execute("select -%s;", (decimal.Decimal('-1.0'),)) @@ -362,7 +361,7 @@ class AdaptSubclassTest(unittest.TestCase): register_adapter(A, lambda a: AsIs("a")) register_adapter(B, lambda b: AsIs("b")) try: - self.assertEqual(b('b'), adapt(C()).getquoted()) + self.assertEqual(b'b', adapt(C()).getquoted()) finally: del psycopg2.extensions.adapters[A, psycopg2.extensions.ISQLQuote] del psycopg2.extensions.adapters[B, psycopg2.extensions.ISQLQuote] @@ -389,7 +388,7 @@ class AdaptSubclassTest(unittest.TestCase): register_adapter(A, lambda a: AsIs("a")) try: - self.assertEqual(b("a"), adapt(B()).getquoted()) + self.assertEqual(b"a", adapt(B()).getquoted()) finally: del psycopg2.extensions.adapters[A, psycopg2.extensions.ISQLQuote] @@ -434,19 +433,19 @@ class ByteaParserTest(unittest.TestCase): self.assertEqual(rv, None) def test_blank(self): - rv = self.cast(b('')) - self.assertEqual(rv, b('')) + rv = self.cast(b'') + self.assertEqual(rv, b'') def test_blank_hex(self): # Reported as problematic in ticket #48 - rv = self.cast(b('\\x')) - self.assertEqual(rv, b('')) + rv = self.cast(b'\\x') + self.assertEqual(rv, b'') def test_full_hex(self, upper=False): buf = ''.join(("%02x" % i) for i in range(256)) if upper: buf = buf.upper() buf = '\\x' + buf - rv = self.cast(b(buf)) + rv = self.cast(buf.encode('utf8')) if sys.version_info[0] < 3: self.assertEqual(rv, ''.join(map(chr, range(256)))) else: @@ -457,7 +456,7 @@ class ByteaParserTest(unittest.TestCase): def test_full_escaped_octal(self): buf = ''.join(("\\%03o" % i) for i in range(256)) - rv = self.cast(b(buf)) + rv = self.cast(buf.encode('utf8')) if sys.version_info[0] < 3: self.assertEqual(rv, ''.join(map(chr, range(256)))) else: @@ -469,7 +468,7 @@ class ByteaParserTest(unittest.TestCase): buf += string.ascii_letters buf += ''.join('\\' + c for c in string.ascii_letters) buf += '\\\\' - rv = self.cast(b(buf)) + rv = self.cast(buf.encode('utf8')) if sys.version_info[0] < 3: tgt = ''.join(map(chr, range(32))) \ + string.ascii_letters * 2 + '\\' diff --git a/tests/test_types_extras.py b/tests/test_types_extras.py index d8444010..8bb6dae2 100755 --- a/tests/test_types_extras.py +++ b/tests/test_types_extras.py @@ -29,14 +29,13 @@ from testutils import py3_raises_typeerror import psycopg2 import psycopg2.extras import psycopg2.extensions as ext -from psycopg2.extensions import b def filter_scs(conn, s): if conn.get_parameter_status("standard_conforming_strings") == 'off': return s else: - return s.replace(b("E'"), b("'")) + return s.replace(b"E'", b"'") class TypesExtrasTests(ConnectingTestCase): """Test that all type conversions are working.""" @@ -99,7 +98,7 @@ class TypesExtrasTests(ConnectingTestCase): a = psycopg2.extensions.adapt(i) a.prepare(self.conn) self.assertEqual( - filter_scs(self.conn, b("E'192.168.1.0/24'::inet")), + filter_scs(self.conn, b"E'192.168.1.0/24'::inet"), a.getquoted()) # adapts ok with unicode too @@ -107,7 +106,7 @@ class TypesExtrasTests(ConnectingTestCase): a = psycopg2.extensions.adapt(i) a.prepare(self.conn) self.assertEqual( - filter_scs(self.conn, b("E'192.168.1.0/24'::inet")), + filter_scs(self.conn, b"E'192.168.1.0/24'::inet"), a.getquoted()) def test_adapt_fail(self): @@ -146,17 +145,17 @@ class HstoreTestCase(ConnectingTestCase): a.prepare(self.conn) q = a.getquoted() - self.assert_(q.startswith(b("((")), q) - ii = q[1:-1].split(b("||")) + self.assert_(q.startswith(b"(("), q) + ii = q[1:-1].split(b"||") ii.sort() self.assertEqual(len(ii), len(o)) - self.assertEqual(ii[0], filter_scs(self.conn, b("(E'a' => E'1')"))) - self.assertEqual(ii[1], filter_scs(self.conn, b("(E'b' => E'''')"))) - self.assertEqual(ii[2], filter_scs(self.conn, b("(E'c' => NULL)"))) + self.assertEqual(ii[0], filter_scs(self.conn, b"(E'a' => E'1')")) + self.assertEqual(ii[1], filter_scs(self.conn, b"(E'b' => E'''')")) + self.assertEqual(ii[2], filter_scs(self.conn, b"(E'c' => NULL)")) if 'd' in o: encc = u'\xe0'.encode(psycopg2.extensions.encodings[self.conn.encoding]) - self.assertEqual(ii[3], filter_scs(self.conn, b("(E'd' => E'") + encc + b("')"))) + self.assertEqual(ii[3], filter_scs(self.conn, b"(E'd' => E'" + encc + b"')")) def test_adapt_9(self): if self.conn.server_version < 90000: @@ -172,11 +171,11 @@ class HstoreTestCase(ConnectingTestCase): a.prepare(self.conn) q = a.getquoted() - m = re.match(b(r'hstore\(ARRAY\[([^\]]+)\], ARRAY\[([^\]]+)\]\)'), q) + m = re.match(br'hstore\(ARRAY\[([^\]]+)\], ARRAY\[([^\]]+)\]\)', q) self.assert_(m, repr(q)) - kk = m.group(1).split(b(", ")) - vv = m.group(2).split(b(", ")) + kk = m.group(1).split(b", ") + vv = m.group(2).split(b", ") ii = zip(kk, vv) ii.sort() @@ -184,12 +183,12 @@ class HstoreTestCase(ConnectingTestCase): return tuple([filter_scs(self.conn, s) for s in args]) self.assertEqual(len(ii), len(o)) - self.assertEqual(ii[0], f(b("E'a'"), b("E'1'"))) - self.assertEqual(ii[1], f(b("E'b'"), b("E''''"))) - self.assertEqual(ii[2], f(b("E'c'"), b("NULL"))) + self.assertEqual(ii[0], f(b"E'a'", b"E'1'")) + self.assertEqual(ii[1], f(b"E'b'", b"E''''")) + self.assertEqual(ii[2], f(b"E'c'", b"NULL")) if 'd' in o: encc = u'\xe0'.encode(psycopg2.extensions.encodings[self.conn.encoding]) - self.assertEqual(ii[3], f(b("E'd'"), b("E'") + encc + b("'"))) + self.assertEqual(ii[3], f(b"E'd'", b"E'" + encc + b"'")) def test_parse(self): from psycopg2.extras import HstoreAdapter @@ -455,7 +454,7 @@ class AdaptTypeTestCase(ConnectingTestCase): def test_none_in_record(self): curs = self.conn.cursor() s = curs.mogrify("SELECT %s;", [(42, None)]) - self.assertEqual(b("SELECT (42, NULL);"), s) + self.assertEqual(b"SELECT (42, NULL);", s) curs.execute("SELECT %s;", [(42, None)]) d = curs.fetchone()[0] self.assertEqual("(42,)", d) @@ -475,7 +474,7 @@ class AdaptTypeTestCase(ConnectingTestCase): self.assertEqual(ext.adapt(None).getquoted(), "NOPE!") s = curs.mogrify("SELECT %s;", (None,)) - self.assertEqual(b("SELECT NULL;"), s) + self.assertEqual(b"SELECT NULL;", s) finally: ext.register_adapter(type(None), orig_adapter) @@ -892,7 +891,7 @@ class JsonTestCase(ConnectingTestCase): obj = Decimal('123.45') dumps = lambda obj: json.dumps(obj, cls=DecimalEncoder) self.assertEqual(curs.mogrify("%s", (Json(obj, dumps=dumps),)), - b("'123.45'")) + b"'123.45'") @skip_if_no_json_module def test_adapt_subclass(self): @@ -910,8 +909,7 @@ class JsonTestCase(ConnectingTestCase): curs = self.conn.cursor() obj = Decimal('123.45') - self.assertEqual(curs.mogrify("%s", (MyJson(obj),)), - b("'123.45'")) + self.assertEqual(curs.mogrify("%s", (MyJson(obj),)), b"'123.45'") @skip_if_no_json_module def test_register_on_dict(self): @@ -921,8 +919,7 @@ class JsonTestCase(ConnectingTestCase): try: curs = self.conn.cursor() obj = {'a': 123} - self.assertEqual(curs.mogrify("%s", (obj,)), - b("""'{"a": 123}'""")) + self.assertEqual(curs.mogrify("%s", (obj,)), b"""'{"a": 123}'""") finally: del psycopg2.extensions.adapters[dict, ext.ISQLQuote] From 47a312cf836e0c4dc2d198f72a139e9af758f443 Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Mon, 15 Aug 2016 02:39:26 +0100 Subject: [PATCH 122/151] Mention dropping Py 2.5 support in NEWS file --- NEWS | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/NEWS b/NEWS index fac54783..d1b9e7d9 100644 --- a/NEWS +++ b/NEWS @@ -22,6 +22,10 @@ New features: - Added `~psycopg2.extensions.quote_ident()` function (:ticket:`#359`). - Added `~connection.get_dsn_parameters()` connection method (:ticket:`#364`). +Other changes: + +- Dropped support for Python 2.5. + What's new in psycopg 2.6.3 ^^^^^^^^^^^^^^^^^^^^^^^^^^^ From 3d4f6df0de210be48fabcad72c54a6915a630798 Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Mon, 15 Aug 2016 02:17:47 +0100 Subject: [PATCH 123/151] Enforce dependency on libpq version >= 9.1 PGRES_COPY_BOTH was introduced in 9.1: we can ifdef the hell out of pgpath, but we may as well bury the dead horses instead of beating them. They smell funny, too. --- doc/src/install.rst | 3 ++- psycopg/pqpath.c | 2 +- psycopg/psycopg.h | 4 ++++ 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/doc/src/install.rst b/doc/src/install.rst index 674bbac8..4611537e 100644 --- a/doc/src/install.rst +++ b/doc/src/install.rst @@ -19,7 +19,8 @@ The current `!psycopg2` implementation supports: - Python 2 versions from 2.6 to 2.7 - Python 3 versions from 3.1 to 3.5 -- PostgreSQL versions from 7.4 to 9.4 +- PostgreSQL server versions from 7.4 to 9.5 +- PostgreSQL client library version from 9.1 .. _PostgreSQL: http://www.postgresql.org/ .. _Python: http://www.python.org/ diff --git a/psycopg/pqpath.c b/psycopg/pqpath.c index d7283d0c..d02cb708 100644 --- a/psycopg/pqpath.c +++ b/psycopg/pqpath.c @@ -1913,7 +1913,7 @@ pq_fetch(cursorObject *curs, int no_result) break; default: - /* PGRES_COPY_BOTH, PGRES_SINGLE_TUPLE, future statuses */ + /* PGRES_SINGLE_TUPLE, future statuses */ Dprintf("pq_fetch: got unsupported result: status = %d pgconn = %p", pgstatus, curs->conn); PyErr_Format(NotSupportedError, diff --git a/psycopg/psycopg.h b/psycopg/psycopg.h index 3174f309..82b4293c 100644 --- a/psycopg/psycopg.h +++ b/psycopg/psycopg.h @@ -26,6 +26,10 @@ #ifndef PSYCOPG_H #define PSYCOPG_H 1 +#if PG_VERSION_NUM < 90100 +#error "Psycopg requires PostgreSQL client library (libpq) >= 9.1" +#endif + #define PY_SSIZE_T_CLEAN #include #include From 5ddc952dbb93167d19d84981debb741011958efc Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Mon, 15 Aug 2016 02:31:39 +0100 Subject: [PATCH 124/151] Dropped ifdef guards against obsolete libpq versions One of them was actually wrong: lobject_type.c wouldn't have compiled pre 8.3 (broken in 6e841a41, 2 years ago). --- psycopg/adapter_binary.c | 2 -- psycopg/lobject_int.c | 4 ---- psycopg/lobject_type.c | 7 ------- psycopg/psycopgmodule.c | 7 ------- psycopg/utils.c | 2 -- 5 files changed, 22 deletions(-) diff --git a/psycopg/adapter_binary.c b/psycopg/adapter_binary.c index 597048d2..1727b19a 100644 --- a/psycopg/adapter_binary.c +++ b/psycopg/adapter_binary.c @@ -39,11 +39,9 @@ static unsigned char * binary_escape(unsigned char *from, size_t from_length, size_t *to_length, PGconn *conn) { -#if PG_VERSION_NUM >= 80104 if (conn) return PQescapeByteaConn(conn, from, from_length, to_length); else -#endif return PQescapeBytea(from, from_length, to_length); } diff --git a/psycopg/lobject_int.c b/psycopg/lobject_int.c index 279ef1e2..b954a76b 100644 --- a/psycopg/lobject_int.c +++ b/psycopg/lobject_int.c @@ -474,8 +474,6 @@ lobject_export(lobjectObject *self, const char *filename) return retvalue; } -#if PG_VERSION_NUM >= 80300 - RAISES_NEG int lobject_truncate(lobjectObject *self, size_t len) { @@ -510,5 +508,3 @@ lobject_truncate(lobjectObject *self, size_t len) return retvalue; } - -#endif /* PG_VERSION_NUM >= 80300 */ diff --git a/psycopg/lobject_type.c b/psycopg/lobject_type.c index d15eb20e..ddda0daf 100644 --- a/psycopg/lobject_type.c +++ b/psycopg/lobject_type.c @@ -266,8 +266,6 @@ psyco_lobj_get_closed(lobjectObject *self, void *closure) return closed; } -#if PG_VERSION_NUM >= 80300 - #define psyco_lobj_truncate_doc \ "truncate(len=0) -- Truncate large object to given size." @@ -327,10 +325,8 @@ static struct PyMethodDef lobjectObject_methods[] = { METH_NOARGS, psyco_lobj_unlink_doc}, {"export",(PyCFunction)psyco_lobj_export, METH_VARARGS, psyco_lobj_export_doc}, -#if PG_VERSION_NUM >= 80300 {"truncate",(PyCFunction)psyco_lobj_truncate, METH_VARARGS, psyco_lobj_truncate_doc}, -#endif /* PG_VERSION_NUM >= 80300 */ {NULL} }; @@ -475,6 +471,3 @@ PyTypeObject lobjectType = { 0, /*tp_alloc*/ lobject_new, /*tp_new*/ }; - -#endif - diff --git a/psycopg/psycopgmodule.c b/psycopg/psycopgmodule.c index d4a4c947..012df6b3 100644 --- a/psycopg/psycopgmodule.c +++ b/psycopg/psycopgmodule.c @@ -164,7 +164,6 @@ exit: static PyObject * psyco_quote_ident(PyObject *self, PyObject *args, PyObject *kwargs) { -#if PG_VERSION_NUM >= 90000 PyObject *ident = NULL, *obj = NULL, *result = NULL; connectionObject *conn; const char *str; @@ -204,10 +203,6 @@ exit: Py_XDECREF(ident); return result; -#else - PyErr_SetString(NotSupportedError, "PQescapeIdentifier not available in libpq < 9.0"); - return NULL; -#endif } /** type registration **/ @@ -285,9 +280,7 @@ psyco_libcrypto_threads_init(void) if ((m = PyImport_ImportModule("ssl"))) { /* disable libcrypto setup in libpq, so it won't stomp on the callbacks that have already been set up */ -#if PG_VERSION_NUM >= 80400 PQinitOpenSSL(1, 0); -#endif Py_DECREF(m); } else { diff --git a/psycopg/utils.c b/psycopg/utils.c index b919180c..631b8394 100644 --- a/psycopg/utils.c +++ b/psycopg/utils.c @@ -67,12 +67,10 @@ psycopg_escape_string(connectionObject *conn, const char *from, Py_ssize_t len, } { - #if PG_VERSION_NUM >= 80104 int err; if (conn && conn->pgconn) ql = PQescapeStringConn(conn->pgconn, to+eq+1, from, len, &err); else - #endif ql = PQescapeString(to+eq+1, from, len); } From b3792c7f02c26697dbd32197169cf90cadeef859 Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Mon, 15 Aug 2016 02:38:20 +0100 Subject: [PATCH 125/151] Mention dropping Pre 9.1 libpq support in NEWS file --- NEWS | 2 ++ 1 file changed, 2 insertions(+) diff --git a/NEWS b/NEWS index d1b9e7d9..6ada19c0 100644 --- a/NEWS +++ b/NEWS @@ -25,6 +25,8 @@ New features: Other changes: - Dropped support for Python 2.5. +- Dropped support for client library older than PostgreSQL 9.1 (but older + server versions are still supported). What's new in psycopg 2.6.3 From 91d2158de7954daccb0a22885021c8416d1d5c6c Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Tue, 11 Oct 2016 00:10:53 +0100 Subject: [PATCH 126/151] Python source cleanup using flake8 --- lib/__init__.py | 21 ++--- lib/_json.py | 14 ++-- lib/_range.py | 39 +++++---- lib/errorcodes.py | 1 + lib/extensions.py | 82 +++++++++--------- lib/extras.py | 100 +++++++++++++--------- lib/pool.py | 38 +++++---- lib/psycopg1.py | 19 +++-- lib/tz.py | 12 +-- scripts/buildtypes.py | 17 ++-- scripts/make_errorcodes.py | 23 +++-- scripts/refcounter.py | 7 +- setup.py | 75 +++++++++-------- tests/__init__.py | 1 + tests/test_async.py | 10 ++- tests/test_bugX000.py | 14 ++-- tests/test_bug_gc.py | 3 + tests/test_cancel.py | 2 + tests/test_connection.py | 55 +++++++----- tests/test_copy.py | 39 +++++---- tests/test_cursor.py | 37 +++++---- tests/test_dates.py | 79 ++++++++++-------- tests/test_errcodes.py | 2 + tests/test_extras_dictcursor.py | 15 ++-- tests/test_green.py | 4 +- tests/test_lobject.py | 25 ++++-- tests/test_notify.py | 10 +-- tests/test_psycopg2_dbapi20.py | 3 +- tests/test_replication.py | 60 +++++++++----- tests/test_transaction.py | 7 +- tests/test_types_basic.py | 79 +++++++++++------- tests/test_types_extras.py | 143 ++++++++++++++++++++------------ tests/test_with.py | 10 ++- tests/testutils.py | 25 ++++-- tox.ini | 5 ++ 35 files changed, 644 insertions(+), 432 deletions(-) diff --git a/lib/__init__.py b/lib/__init__.py index 829e29eb..fb22b4c0 100644 --- a/lib/__init__.py +++ b/lib/__init__.py @@ -47,19 +47,20 @@ Homepage: http://initd.org/projects/psycopg2 # Import the DBAPI-2.0 stuff into top-level module. -from psycopg2._psycopg import BINARY, NUMBER, STRING, DATETIME, ROWID +from psycopg2._psycopg import ( # noqa + BINARY, NUMBER, STRING, DATETIME, ROWID, -from psycopg2._psycopg import Binary, Date, Time, Timestamp -from psycopg2._psycopg import DateFromTicks, TimeFromTicks, TimestampFromTicks + Binary, Date, Time, Timestamp, + DateFromTicks, TimeFromTicks, TimestampFromTicks, -from psycopg2._psycopg import Error, Warning, DataError, DatabaseError, ProgrammingError -from psycopg2._psycopg import IntegrityError, InterfaceError, InternalError -from psycopg2._psycopg import NotSupportedError, OperationalError + Error, Warning, DataError, DatabaseError, ProgrammingError, IntegrityError, + InterfaceError, InternalError, NotSupportedError, OperationalError, -from psycopg2._psycopg import _connect, apilevel, threadsafety, paramstyle -from psycopg2._psycopg import __version__, __libpq_version__ + _connect, apilevel, threadsafety, paramstyle, + __version__, __libpq_version__, +) -from psycopg2 import tz +from psycopg2 import tz # noqa # Register default adapters. @@ -82,7 +83,7 @@ else: def connect(dsn=None, connection_factory=None, cursor_factory=None, - async=False, **kwargs): + async=False, **kwargs): """ Create a new database connection. diff --git a/lib/_json.py b/lib/_json.py index 26e32f2f..b137a2d9 100644 --- a/lib/_json.py +++ b/lib/_json.py @@ -34,7 +34,7 @@ from psycopg2._psycopg import new_type, new_array_type, register_type # import the best json implementation available -if sys.version_info[:2] >= (2,6): +if sys.version_info[:2] >= (2, 6): import json else: try: @@ -51,6 +51,7 @@ JSONARRAY_OID = 199 JSONB_OID = 3802 JSONBARRAY_OID = 3807 + class Json(object): """ An `~psycopg2.extensions.ISQLQuote` wrapper to adapt a Python object to @@ -106,7 +107,7 @@ class Json(object): def register_json(conn_or_curs=None, globally=False, loads=None, - oid=None, array_oid=None, name='json'): + oid=None, array_oid=None, name='json'): """Create and register typecasters converting :sql:`json` type to Python objects. :param conn_or_curs: a connection or cursor used to find the :sql:`json` @@ -143,6 +144,7 @@ def register_json(conn_or_curs=None, globally=False, loads=None, return JSON, JSONARRAY + def register_default_json(conn_or_curs=None, globally=False, loads=None): """ Create and register :sql:`json` typecasters for PostgreSQL 9.2 and following. @@ -155,6 +157,7 @@ def register_default_json(conn_or_curs=None, globally=False, loads=None): return register_json(conn_or_curs=conn_or_curs, globally=globally, loads=loads, oid=JSON_OID, array_oid=JSONARRAY_OID) + def register_default_jsonb(conn_or_curs=None, globally=False, loads=None): """ Create and register :sql:`jsonb` typecasters for PostgreSQL 9.4 and following. @@ -167,6 +170,7 @@ def register_default_jsonb(conn_or_curs=None, globally=False, loads=None): return register_json(conn_or_curs=conn_or_curs, globally=globally, loads=loads, oid=JSONB_OID, array_oid=JSONBARRAY_OID, name='jsonb') + def _create_json_typecasters(oid, array_oid, loads=None, name='JSON'): """Create typecasters for json data type.""" if loads is None: @@ -188,6 +192,7 @@ def _create_json_typecasters(oid, array_oid, loads=None, name='JSON'): return JSON, JSONARRAY + def _get_json_oids(conn_or_curs, name='json'): # lazy imports from psycopg2.extensions import STATUS_IN_TRANSACTION @@ -204,7 +209,7 @@ def _get_json_oids(conn_or_curs, name='json'): # get the oid for the hstore curs.execute( "SELECT t.oid, %s FROM pg_type t WHERE t.typname = %%s;" - % typarray, (name,)) + % typarray, (name,)) r = curs.fetchone() # revert the status of the connection as before the command @@ -215,6 +220,3 @@ def _get_json_oids(conn_or_curs, name='json'): raise conn.ProgrammingError("%s data type not found" % name) return r - - - diff --git a/lib/_range.py b/lib/_range.py index 4cfd387c..ee9c329e 100644 --- a/lib/_range.py +++ b/lib/_range.py @@ -30,6 +30,7 @@ from psycopg2._psycopg import ProgrammingError, InterfaceError from psycopg2.extensions import ISQLQuote, adapt, register_adapter from psycopg2.extensions import new_type, new_array_type, register_type + class Range(object): """Python representation for a PostgreSQL |range|_ type. @@ -78,42 +79,50 @@ class Range(object): @property def lower_inf(self): """`!True` if the range doesn't have a lower bound.""" - if self._bounds is None: return False + if self._bounds is None: + return False return self._lower is None @property def upper_inf(self): """`!True` if the range doesn't have an upper bound.""" - if self._bounds is None: return False + if self._bounds is None: + return False return self._upper is None @property def lower_inc(self): """`!True` if the lower bound is included in the range.""" - if self._bounds is None: return False - if self._lower is None: return False + if self._bounds is None or self._lower is None: + return False return self._bounds[0] == '[' @property def upper_inc(self): """`!True` if the upper bound is included in the range.""" - if self._bounds is None: return False - if self._upper is None: return False + if self._bounds is None or self._upper is None: + return False return self._bounds[1] == ']' def __contains__(self, x): - if self._bounds is None: return False + if self._bounds is None: + return False + if self._lower is not None: if self._bounds[0] == '[': - if x < self._lower: return False + if x < self._lower: + return False else: - if x <= self._lower: return False + if x <= self._lower: + return False if self._upper is not None: if self._bounds[1] == ']': - if x > self._upper: return False + if x > self._upper: + return False else: - if x >= self._upper: return False + if x >= self._upper: + return False return True @@ -295,7 +304,8 @@ class RangeCaster(object): self.adapter.name = pgrange else: try: - if issubclass(pgrange, RangeAdapter) and pgrange is not RangeAdapter: + if issubclass(pgrange, RangeAdapter) \ + and pgrange is not RangeAdapter: self.adapter = pgrange except TypeError: pass @@ -436,14 +446,17 @@ class NumericRange(Range): """ pass + class DateRange(Range): """Represents :sql:`daterange` values.""" pass + class DateTimeRange(Range): """Represents :sql:`tsrange` values.""" pass + class DateTimeTZRange(Range): """Represents :sql:`tstzrange` values.""" pass @@ -508,5 +521,3 @@ tsrange_caster._register() tstzrange_caster = RangeCaster('tstzrange', DateTimeTZRange, oid=3910, subtype_oid=1184, array_oid=3911) tstzrange_caster._register() - - diff --git a/lib/errorcodes.py b/lib/errorcodes.py index 60181c1c..f56e25ab 100644 --- a/lib/errorcodes.py +++ b/lib/errorcodes.py @@ -29,6 +29,7 @@ This module contains symbolic names for all PostgreSQL error codes. # http://www.postgresql.org/docs/current/static/errcodes-appendix.html # + def lookup(code, _cache={}): """Lookup an error code or class code and return its symbolic name. diff --git a/lib/extensions.py b/lib/extensions.py index 309c6eec..b123e881 100644 --- a/lib/extensions.py +++ b/lib/extensions.py @@ -33,71 +33,69 @@ This module holds all the extensions to the DBAPI-2.0 provided by psycopg. # License for more details. import re as _re -import sys as _sys -from psycopg2._psycopg import UNICODE, INTEGER, LONGINTEGER, BOOLEAN, FLOAT -from psycopg2._psycopg import TIME, DATE, INTERVAL, DECIMAL -from psycopg2._psycopg import BINARYARRAY, BOOLEANARRAY, DATEARRAY, DATETIMEARRAY -from psycopg2._psycopg import DECIMALARRAY, FLOATARRAY, INTEGERARRAY, INTERVALARRAY -from psycopg2._psycopg import LONGINTEGERARRAY, ROWIDARRAY, STRINGARRAY, TIMEARRAY -from psycopg2._psycopg import UNICODEARRAY +from psycopg2._psycopg import ( # noqa + BINARYARRAY, BOOLEAN, BOOLEANARRAY, DATE, DATEARRAY, DATETIMEARRAY, + DECIMAL, DECIMALARRAY, FLOAT, FLOATARRAY, INTEGER, INTEGERARRAY, + INTERVAL, INTERVALARRAY, LONGINTEGER, LONGINTEGERARRAY, ROWIDARRAY, + STRINGARRAY, TIME, TIMEARRAY, UNICODE, UNICODEARRAY, + AsIs, Binary, Boolean, Float, Int, QuotedString, ) -from psycopg2._psycopg import Binary, Boolean, Int, Float, QuotedString, AsIs try: - from psycopg2._psycopg import MXDATE, MXDATETIME, MXINTERVAL, MXTIME - from psycopg2._psycopg import MXDATEARRAY, MXDATETIMEARRAY, MXINTERVALARRAY, MXTIMEARRAY - from psycopg2._psycopg import DateFromMx, TimeFromMx, TimestampFromMx - from psycopg2._psycopg import IntervalFromMx + from psycopg2._psycopg import ( # noqa + MXDATE, MXDATETIME, MXINTERVAL, MXTIME, + MXDATEARRAY, MXDATETIMEARRAY, MXINTERVALARRAY, MXTIMEARRAY, + DateFromMx, TimeFromMx, TimestampFromMx, IntervalFromMx, ) except ImportError: pass try: - from psycopg2._psycopg import PYDATE, PYDATETIME, PYINTERVAL, PYTIME - from psycopg2._psycopg import PYDATEARRAY, PYDATETIMEARRAY, PYINTERVALARRAY, PYTIMEARRAY - from psycopg2._psycopg import DateFromPy, TimeFromPy, TimestampFromPy - from psycopg2._psycopg import IntervalFromPy + from psycopg2._psycopg import ( # noqa + PYDATE, PYDATETIME, PYINTERVAL, PYTIME, + PYDATEARRAY, PYDATETIMEARRAY, PYINTERVALARRAY, PYTIMEARRAY, + DateFromPy, TimeFromPy, TimestampFromPy, IntervalFromPy, ) except ImportError: pass -from psycopg2._psycopg import adapt, adapters, encodings, connection, cursor -from psycopg2._psycopg import lobject, Xid, libpq_version, parse_dsn, quote_ident -from psycopg2._psycopg import string_types, binary_types, new_type, new_array_type, register_type -from psycopg2._psycopg import ISQLQuote, Notify, Diagnostics, Column +from psycopg2._psycopg import ( # noqa + adapt, adapters, encodings, connection, cursor, + lobject, Xid, libpq_version, parse_dsn, quote_ident, + string_types, binary_types, new_type, new_array_type, register_type, + ISQLQuote, Notify, Diagnostics, Column, + QueryCanceledError, TransactionRollbackError, + set_wait_callback, get_wait_callback, ) -from psycopg2._psycopg import QueryCanceledError, TransactionRollbackError - -try: - from psycopg2._psycopg import set_wait_callback, get_wait_callback -except ImportError: - pass """Isolation level values.""" -ISOLATION_LEVEL_AUTOCOMMIT = 0 -ISOLATION_LEVEL_READ_UNCOMMITTED = 4 -ISOLATION_LEVEL_READ_COMMITTED = 1 -ISOLATION_LEVEL_REPEATABLE_READ = 2 -ISOLATION_LEVEL_SERIALIZABLE = 3 +ISOLATION_LEVEL_AUTOCOMMIT = 0 +ISOLATION_LEVEL_READ_UNCOMMITTED = 4 +ISOLATION_LEVEL_READ_COMMITTED = 1 +ISOLATION_LEVEL_REPEATABLE_READ = 2 +ISOLATION_LEVEL_SERIALIZABLE = 3 + """psycopg connection status values.""" -STATUS_SETUP = 0 -STATUS_READY = 1 -STATUS_BEGIN = 2 -STATUS_SYNC = 3 # currently unused -STATUS_ASYNC = 4 # currently unused +STATUS_SETUP = 0 +STATUS_READY = 1 +STATUS_BEGIN = 2 +STATUS_SYNC = 3 # currently unused +STATUS_ASYNC = 4 # currently unused STATUS_PREPARED = 5 # This is a useful mnemonic to check if the connection is in a transaction STATUS_IN_TRANSACTION = STATUS_BEGIN + """psycopg asynchronous connection polling values""" -POLL_OK = 0 -POLL_READ = 1 +POLL_OK = 0 +POLL_READ = 1 POLL_WRITE = 2 POLL_ERROR = 3 + """Backend transaction status values.""" -TRANSACTION_STATUS_IDLE = 0 -TRANSACTION_STATUS_ACTIVE = 1 +TRANSACTION_STATUS_IDLE = 0 +TRANSACTION_STATUS_ACTIVE = 1 TRANSACTION_STATUS_INTRANS = 2 TRANSACTION_STATUS_INERROR = 3 TRANSACTION_STATUS_UNKNOWN = 4 @@ -194,7 +192,7 @@ def _param_escape(s, # Create default json typecasters for PostgreSQL 9.2 oids -from psycopg2._json import register_default_json, register_default_jsonb +from psycopg2._json import register_default_json, register_default_jsonb # noqa try: JSON, JSONARRAY = register_default_json() @@ -206,7 +204,7 @@ del register_default_json, register_default_jsonb # Create default Range typecasters -from psycopg2. _range import Range +from psycopg2. _range import Range # noqa del Range diff --git a/lib/extras.py b/lib/extras.py index 7a3a925f..fe74d386 100644 --- a/lib/extras.py +++ b/lib/extras.py @@ -40,10 +40,23 @@ from psycopg2 import extensions as _ext from psycopg2.extensions import cursor as _cursor from psycopg2.extensions import connection as _connection from psycopg2.extensions import adapt as _A, quote_ident -from psycopg2._psycopg import REPLICATION_PHYSICAL, REPLICATION_LOGICAL -from psycopg2._psycopg import ReplicationConnection as _replicationConnection -from psycopg2._psycopg import ReplicationCursor as _replicationCursor -from psycopg2._psycopg import ReplicationMessage + +from psycopg2._psycopg import ( # noqa + REPLICATION_PHYSICAL, REPLICATION_LOGICAL, + ReplicationConnection as _replicationConnection, + ReplicationCursor as _replicationCursor, + ReplicationMessage) + + +# expose the json adaptation stuff into the module +from psycopg2._json import ( # noqa + json, Json, register_json, register_default_json, register_default_jsonb) + + +# Expose range-related objects +from psycopg2._range import ( # noqa + Range, NumericRange, DateRange, DateTimeRange, DateTimeTZRange, + register_range, RangeAdapter, RangeCaster) class DictCursorBase(_cursor): @@ -109,6 +122,7 @@ class DictConnection(_connection): kwargs.setdefault('cursor_factory', DictCursor) return super(DictConnection, self).cursor(*args, **kwargs) + class DictCursor(DictCursorBase): """A cursor that keeps a list of column name -> index mappings.""" @@ -133,6 +147,7 @@ class DictCursor(DictCursorBase): self.index[self.description[i][0]] = i self._query_executed = 0 + class DictRow(list): """A row object that allow by-column-name access to data.""" @@ -195,10 +210,10 @@ class DictRow(list): # drop the crusty Py2 methods if _sys.version_info[0] > 2: - items = iteritems; del iteritems - keys = iterkeys; del iterkeys - values = itervalues; del itervalues - del has_key + items = iteritems # noqa + keys = iterkeys # noqa + values = itervalues # noqa + del iteritems, iterkeys, itervalues, has_key class RealDictConnection(_connection): @@ -207,6 +222,7 @@ class RealDictConnection(_connection): kwargs.setdefault('cursor_factory', RealDictCursor) return super(RealDictConnection, self).cursor(*args, **kwargs) + class RealDictCursor(DictCursorBase): """A cursor that uses a real dict as the base type for rows. @@ -236,6 +252,7 @@ class RealDictCursor(DictCursorBase): self.column_mapping.append(self.description[i][0]) self._query_executed = 0 + class RealDictRow(dict): """A `!dict` subclass representing a data record.""" @@ -268,6 +285,7 @@ class NamedTupleConnection(_connection): kwargs.setdefault('cursor_factory', NamedTupleCursor) return super(NamedTupleConnection, self).cursor(*args, **kwargs) + class NamedTupleCursor(_cursor): """A cursor that generates results as `~collections.namedtuple`. @@ -372,11 +390,13 @@ class LoggingConnection(_connection): def _logtofile(self, msg, curs): msg = self.filter(msg, curs) - if msg: self._logobj.write(msg + _os.linesep) + if msg: + self._logobj.write(msg + _os.linesep) def _logtologger(self, msg, curs): msg = self.filter(msg, curs) - if msg: self._logobj.debug(msg) + if msg: + self._logobj.debug(msg) def _check(self): if not hasattr(self, '_logobj'): @@ -388,6 +408,7 @@ class LoggingConnection(_connection): kwargs.setdefault('cursor_factory', LoggingCursor) return super(LoggingConnection, self).cursor(*args, **kwargs) + class LoggingCursor(_cursor): """A cursor that logs queries using its connection logging facilities.""" @@ -428,6 +449,7 @@ class MinTimeLoggingConnection(LoggingConnection): kwargs.setdefault('cursor_factory', MinTimeLoggingCursor) return LoggingConnection.cursor(self, *args, **kwargs) + class MinTimeLoggingCursor(LoggingCursor): """The cursor sub-class companion to `MinTimeLoggingConnection`.""" @@ -479,18 +501,23 @@ class ReplicationCursor(_replicationCursor): if slot_type == REPLICATION_LOGICAL: if output_plugin is None: - raise psycopg2.ProgrammingError("output plugin name is required to create logical replication slot") + raise psycopg2.ProgrammingError( + "output plugin name is required to create " + "logical replication slot") command += "LOGICAL %s" % quote_ident(output_plugin, self) elif slot_type == REPLICATION_PHYSICAL: if output_plugin is not None: - raise psycopg2.ProgrammingError("cannot specify output plugin name when creating physical replication slot") + raise psycopg2.ProgrammingError( + "cannot specify output plugin name when creating " + "physical replication slot") command += "PHYSICAL" else: - raise psycopg2.ProgrammingError("unrecognized replication type: %s" % repr(slot_type)) + raise psycopg2.ProgrammingError( + "unrecognized replication type: %s" % repr(slot_type)) self.execute(command) @@ -513,7 +540,8 @@ class ReplicationCursor(_replicationCursor): if slot_name: command += "SLOT %s " % quote_ident(slot_name, self) else: - raise psycopg2.ProgrammingError("slot name is required for logical replication") + raise psycopg2.ProgrammingError( + "slot name is required for logical replication") command += "LOGICAL " @@ -523,28 +551,32 @@ class ReplicationCursor(_replicationCursor): # don't add "PHYSICAL", before 9.4 it was just START_REPLICATION XXX/XXX else: - raise psycopg2.ProgrammingError("unrecognized replication type: %s" % repr(slot_type)) + raise psycopg2.ProgrammingError( + "unrecognized replication type: %s" % repr(slot_type)) if type(start_lsn) is str: lsn = start_lsn.split('/') lsn = "%X/%08X" % (int(lsn[0], 16), int(lsn[1], 16)) else: - lsn = "%X/%08X" % ((start_lsn >> 32) & 0xFFFFFFFF, start_lsn & 0xFFFFFFFF) + lsn = "%X/%08X" % ((start_lsn >> 32) & 0xFFFFFFFF, + start_lsn & 0xFFFFFFFF) command += lsn if timeline != 0: if slot_type == REPLICATION_LOGICAL: - raise psycopg2.ProgrammingError("cannot specify timeline for logical replication") + raise psycopg2.ProgrammingError( + "cannot specify timeline for logical replication") command += " TIMELINE %d" % timeline if options: if slot_type == REPLICATION_PHYSICAL: - raise psycopg2.ProgrammingError("cannot specify output plugin options for physical replication") + raise psycopg2.ProgrammingError( + "cannot specify output plugin options for physical replication") command += " (" - for k,v in options.iteritems(): + for k, v in options.iteritems(): if not command.endswith('('): command += ", " command += "%s %s" % (quote_ident(k, self), _A(str(v))) @@ -579,6 +611,7 @@ class UUID_adapter(object): def __str__(self): return "'%s'::uuid" % self._uuid + def register_uuid(oids=None, conn_or_curs=None): """Create the UUID type and an uuid.UUID adapter. @@ -643,6 +676,7 @@ class Inet(object): def __str__(self): return str(self.addr) + def register_inet(oid=None, conn_or_curs=None): """Create the INET type and an Inet adapter. @@ -862,8 +896,9 @@ WHERE typname = 'hstore'; return tuple(rv0), tuple(rv1) + def register_hstore(conn_or_curs, globally=False, unicode=False, - oid=None, array_oid=None): + oid=None, array_oid=None): """Register adapter and typecaster for `!dict`\-\ |hstore| conversions. :param conn_or_curs: a connection or cursor: the typecaster will be @@ -942,8 +977,8 @@ class CompositeCaster(object): self.oid = oid self.array_oid = array_oid - self.attnames = [ a[0] for a in attrs ] - self.atttypes = [ a[1] for a in attrs ] + self.attnames = [a[0] for a in attrs] + self.atttypes = [a[1] for a in attrs] self._create_type(name, self.attnames) self.typecaster = _ext.new_type((oid,), name, self.parse) if array_oid: @@ -962,8 +997,8 @@ class CompositeCaster(object): "expecting %d components for the type %s, %d found instead" % (len(self.atttypes), self.name, len(tokens))) - values = [ curs.cast(oid, token) - for oid, token in zip(self.atttypes, tokens) ] + values = [curs.cast(oid, token) + for oid, token in zip(self.atttypes, tokens)] return self.make(values) @@ -1057,11 +1092,12 @@ ORDER BY attnum; type_oid = recs[0][0] array_oid = recs[0][1] - type_attrs = [ (r[2], r[3]) for r in recs ] + type_attrs = [(r[2], r[3]) for r in recs] return self(tname, type_oid, type_attrs, array_oid=array_oid, schema=schema) + def register_composite(name, conn_or_curs, globally=False, factory=None): """Register a typecaster to convert a composite type into a tuple. @@ -1084,17 +1120,7 @@ def register_composite(name, conn_or_curs, globally=False, factory=None): _ext.register_type(caster.typecaster, not globally and conn_or_curs or None) if caster.array_typecaster is not None: - _ext.register_type(caster.array_typecaster, not globally and conn_or_curs or None) + _ext.register_type( + caster.array_typecaster, not globally and conn_or_curs or None) return caster - - -# expose the json adaptation stuff into the module -from psycopg2._json import json, Json, register_json -from psycopg2._json import register_default_json, register_default_jsonb - - -# Expose range-related objects -from psycopg2._range import Range, NumericRange -from psycopg2._range import DateRange, DateTimeRange, DateTimeTZRange -from psycopg2._range import register_range, RangeAdapter, RangeCaster diff --git a/lib/pool.py b/lib/pool.py index 8d7c4afb..e57875c8 100644 --- a/lib/pool.py +++ b/lib/pool.py @@ -40,18 +40,18 @@ class AbstractConnectionPool(object): New 'minconn' connections are created immediately calling 'connfunc' with given parameters. The connection pool will support a maximum of - about 'maxconn' connections. + about 'maxconn' connections. """ self.minconn = int(minconn) self.maxconn = int(maxconn) self.closed = False - + self._args = args self._kwargs = kwargs self._pool = [] self._used = {} - self._rused = {} # id(conn) -> key map + self._rused = {} # id(conn) -> key map self._keys = 0 for i in range(self.minconn): @@ -71,12 +71,14 @@ class AbstractConnectionPool(object): """Return a new unique key.""" self._keys += 1 return self._keys - + def _getconn(self, key=None): """Get a free connection and assign it to 'key' if not None.""" - if self.closed: raise PoolError("connection pool is closed") - if key is None: key = self._getkey() - + if self.closed: + raise PoolError("connection pool is closed") + if key is None: + key = self._getkey() + if key in self._used: return self._used[key] @@ -88,11 +90,13 @@ class AbstractConnectionPool(object): if len(self._used) == self.maxconn: raise PoolError("connection pool exhausted") return self._connect(key) - + def _putconn(self, conn, key=None, close=False): """Put away a connection.""" - if self.closed: raise PoolError("connection pool is closed") - if key is None: key = self._rused.get(id(conn)) + if self.closed: + raise PoolError("connection pool is closed") + if key is None: + key = self._rused.get(id(conn)) if not key: raise PoolError("trying to put unkeyed connection") @@ -129,21 +133,22 @@ class AbstractConnectionPool(object): an already closed connection. If you call .closeall() make sure your code can deal with it. """ - if self.closed: raise PoolError("connection pool is closed") + if self.closed: + raise PoolError("connection pool is closed") for conn in self._pool + list(self._used.values()): try: conn.close() except: pass self.closed = True - + class SimpleConnectionPool(AbstractConnectionPool): """A connection pool that can't be shared across different threads.""" getconn = AbstractConnectionPool._getconn putconn = AbstractConnectionPool._putconn - closeall = AbstractConnectionPool._closeall + closeall = AbstractConnectionPool._closeall class ThreadedConnectionPool(AbstractConnectionPool): @@ -182,7 +187,7 @@ class ThreadedConnectionPool(AbstractConnectionPool): class PersistentConnectionPool(AbstractConnectionPool): - """A pool that assigns persistent connections to different threads. + """A pool that assigns persistent connections to different threads. Note that this connection pool generates by itself the required keys using the current thread id. This means that until a thread puts away @@ -204,7 +209,7 @@ class PersistentConnectionPool(AbstractConnectionPool): # we we'll need the thread module, to determine thread ids, so we # import it here and copy it in an instance variable - import thread as _thread # work around for 2to3 bug - see ticket #348 + import thread as _thread # work around for 2to3 bug - see ticket #348 self.__thread = _thread def getconn(self): @@ -221,7 +226,8 @@ class PersistentConnectionPool(AbstractConnectionPool): key = self.__thread.get_ident() self._lock.acquire() try: - if not conn: conn = self._used[key] + if not conn: + conn = self._used[key] self._putconn(conn, key, close) finally: self._lock.release() diff --git a/lib/psycopg1.py b/lib/psycopg1.py index 95b36bff..3808aaaf 100644 --- a/lib/psycopg1.py +++ b/lib/psycopg1.py @@ -28,24 +28,26 @@ old code while porting to psycopg 2. Import it as follows:: # FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public # License for more details. -import psycopg2._psycopg as _2psycopg +import psycopg2._psycopg as _2psycopg # noqa from psycopg2.extensions import cursor as _2cursor from psycopg2.extensions import connection as _2connection -from psycopg2 import * +from psycopg2 import * # noqa import psycopg2.extensions as _ext _2connect = connect + def connect(*args, **kwargs): """connect(dsn, ...) -> new psycopg 1.1.x compatible connection object""" kwargs['connection_factory'] = connection conn = _2connect(*args, **kwargs) conn.set_isolation_level(_ext.ISOLATION_LEVEL_READ_COMMITTED) return conn - + + class connection(_2connection): """psycopg 1.1.x connection.""" - + def cursor(self): """cursor() -> new psycopg 1.1.x compatible cursor object""" return _2connection.cursor(self, cursor_factory=cursor) @@ -56,7 +58,7 @@ class connection(_2connection): self.set_isolation_level(_ext.ISOLATION_LEVEL_AUTOCOMMIT) else: self.set_isolation_level(_ext.ISOLATION_LEVEL_READ_COMMITTED) - + class cursor(_2cursor): """psycopg 1.1.x cursor. @@ -71,25 +73,24 @@ class cursor(_2cursor): for i in range(len(self.description)): res[self.description[i][0]] = row[i] return res - + def dictfetchone(self): row = _2cursor.fetchone(self) if row: return self.__build_dict(row) else: return row - + def dictfetchmany(self, size): res = [] rows = _2cursor.fetchmany(self, size) for row in rows: res.append(self.__build_dict(row)) return res - + def dictfetchall(self): res = [] rows = _2cursor.fetchall(self) for row in rows: res.append(self.__build_dict(row)) return res - diff --git a/lib/tz.py b/lib/tz.py index 695a9253..92a16041 100644 --- a/lib/tz.py +++ b/lib/tz.py @@ -2,7 +2,7 @@ This module holds two different tzinfo implementations that can be used as the 'tzinfo' argument to datetime constructors, directly passed to psycopg -functions or used to set the .tzinfo_factory attribute in cursors. +functions or used to set the .tzinfo_factory attribute in cursors. """ # psycopg/tz.py - tzinfo implementation # @@ -31,6 +31,7 @@ import time ZERO = datetime.timedelta(0) + class FixedOffsetTimezone(datetime.tzinfo): """Fixed offset in minutes east from UTC. @@ -52,7 +53,7 @@ class FixedOffsetTimezone(datetime.tzinfo): def __init__(self, offset=None, name=None): if offset is not None: - self._offset = datetime.timedelta(minutes = offset) + self._offset = datetime.timedelta(minutes=offset) if name is not None: self._name = name @@ -85,7 +86,7 @@ class FixedOffsetTimezone(datetime.tzinfo): else: seconds = self._offset.seconds + self._offset.days * 86400 hours, seconds = divmod(seconds, 3600) - minutes = seconds/60 + minutes = seconds / 60 if minutes: return "%+03d:%d" % (hours, minutes) else: @@ -95,13 +96,14 @@ class FixedOffsetTimezone(datetime.tzinfo): return ZERO -STDOFFSET = datetime.timedelta(seconds = -time.timezone) +STDOFFSET = datetime.timedelta(seconds=-time.timezone) if time.daylight: - DSTOFFSET = datetime.timedelta(seconds = -time.altzone) + DSTOFFSET = datetime.timedelta(seconds=-time.altzone) else: DSTOFFSET = STDOFFSET DSTDIFF = DSTOFFSET - STDOFFSET + class LocalTimezone(datetime.tzinfo): """Platform idea of local timezone. diff --git a/scripts/buildtypes.py b/scripts/buildtypes.py index d50a6b66..5ae6c947 100644 --- a/scripts/buildtypes.py +++ b/scripts/buildtypes.py @@ -19,8 +19,8 @@ # code defines the DBAPITypeObject fundamental types and warns for # undefined types. -import sys, os, string, copy -from string import split, join, strip +import sys +from string import split, strip # here is the list of the foundamental types we want to import from @@ -37,7 +37,7 @@ basic_types = (['NUMBER', ['INT8', 'INT4', 'INT2', 'FLOAT8', 'FLOAT4', ['STRING', ['NAME', 'CHAR', 'TEXT', 'BPCHAR', 'VARCHAR']], ['BOOLEAN', ['BOOL']], - ['DATETIME', ['TIMESTAMP', 'TIMESTAMPTZ', + ['DATETIME', ['TIMESTAMP', 'TIMESTAMPTZ', 'TINTERVAL', 'INTERVAL']], ['TIME', ['TIME', 'TIMETZ']], ['DATE', ['DATE']], @@ -73,8 +73,7 @@ FOOTER = """ {NULL, NULL, NULL, NULL}\n};\n""" # useful error reporting function def error(msg): """Report an error on stderr.""" - sys.stderr.write(msg+'\n') - + sys.stderr.write(msg + '\n') # read couples from stdin and build list read_types = [] @@ -91,14 +90,14 @@ for t in basic_types: for v in t[1]: found = filter(lambda x, y=v: x[0] == y, read_types) if len(found) == 0: - error(v+': value not found') + error(v + ': value not found') elif len(found) > 1: - error(v+': too many values') + error(v + ': too many values') else: found_types[k].append(int(found[0][1])) # now outputs to stdout the right C-style definitions -stypes = "" ; sstruct = "" +stypes = sstruct = "" for t in basic_types: k = t[0] s = str(found_types[k]) @@ -108,7 +107,7 @@ for t in basic_types: % (k, k, k)) for t in array_types: kt = t[0] - ka = t[0]+'ARRAY' + ka = t[0] + 'ARRAY' s = str(t[1]) s = '{' + s[1:-1] + ', 0}' stypes = stypes + ('static long int typecast_%s_types[] = %s;\n' % (ka, s)) diff --git a/scripts/make_errorcodes.py b/scripts/make_errorcodes.py index 58d05b85..26f7e68a 100755 --- a/scripts/make_errorcodes.py +++ b/scripts/make_errorcodes.py @@ -23,6 +23,7 @@ from collections import defaultdict from BeautifulSoup import BeautifulSoup as BS + def main(): if len(sys.argv) != 2: print >>sys.stderr, "usage: %s /path/to/errorcodes.py" % sys.argv[0] @@ -41,6 +42,7 @@ def main(): for line in generate_module_data(classes, errors): print >>f, line + def read_base_file(filename): rv = [] for line in open(filename): @@ -50,6 +52,7 @@ def read_base_file(filename): raise ValueError("can't find the separator. Is this the right file?") + def parse_errors_txt(url): classes = {} errors = defaultdict(dict) @@ -84,6 +87,7 @@ def parse_errors_txt(url): return classes, errors + def parse_errors_sgml(url): page = BS(urllib2.urlopen(url)) table = page('table')[1]('tbody')[0] @@ -92,7 +96,7 @@ def parse_errors_sgml(url): errors = defaultdict(dict) for tr in table('tr'): - if tr.td.get('colspan'): # it's a class + if tr.td.get('colspan'): # it's a class label = ' '.join(' '.join(tr(text=True)).split()) \ .replace(u'\u2014', '-').encode('ascii') assert label.startswith('Class') @@ -100,7 +104,7 @@ def parse_errors_sgml(url): assert len(class_) == 2 classes[class_] = label - else: # it's an error + else: # it's an error errcode = tr.tt.string.encode("ascii") assert len(errcode) == 5 @@ -124,11 +128,12 @@ def parse_errors_sgml(url): return classes, errors errors_sgml_url = \ - "http://www.postgresql.org/docs/%s/static/errcodes-appendix.html" + "http://www.postgresql.org/docs/%s/static/errcodes-appendix.html" errors_txt_url = \ - "http://git.postgresql.org/gitweb/?p=postgresql.git;a=blob_plain;" \ - "f=src/backend/utils/errcodes.txt;hb=REL%s_STABLE" + "http://git.postgresql.org/gitweb/?p=postgresql.git;a=blob_plain;" \ + "f=src/backend/utils/errcodes.txt;hb=REL%s_STABLE" + def fetch_errors(versions): classes = {} @@ -148,14 +153,15 @@ def fetch_errors(versions): return classes, errors + def generate_module_data(classes, errors): yield "" yield "# Error classes" for clscode, clslabel in sorted(classes.items()): err = clslabel.split(" - ")[1].split("(")[0] \ - .strip().replace(" ", "_").replace('/', "_").upper() + .strip().replace(" ", "_").replace('/', "_").upper() yield "CLASS_%s = %r" % (err, clscode) - + for clscode, clslabel in sorted(classes.items()): yield "" yield "# %s" % clslabel @@ -163,7 +169,6 @@ def generate_module_data(classes, errors): for errcode, errlabel in sorted(errors[clscode].items()): yield "%s = %r" % (errlabel, errcode) + if __name__ == '__main__': sys.exit(main()) - - diff --git a/scripts/refcounter.py b/scripts/refcounter.py index 38544fe0..9e900cf7 100755 --- a/scripts/refcounter.py +++ b/scripts/refcounter.py @@ -25,6 +25,7 @@ import unittest from pprint import pprint from collections import defaultdict + def main(): opt = parse_args() @@ -58,6 +59,7 @@ def main(): return rv + def parse_args(): import optparse @@ -83,7 +85,7 @@ def dump(i, opt): c[type(o)] += 1 pprint( - sorted(((v,str(k)) for k,v in c.items()), reverse=True), + sorted(((v, str(k)) for k, v in c.items()), reverse=True), stream=open("debug-%02d.txt" % i, "w")) if opt.objs: @@ -95,7 +97,7 @@ def dump(i, opt): # TODO: very incomplete if t is dict: - co.sort(key = lambda d: d.items()) + co.sort(key=lambda d: d.items()) else: co.sort() @@ -104,4 +106,3 @@ def dump(i, opt): if __name__ == '__main__': sys.exit(main()) - diff --git a/setup.py b/setup.py index 45b3b698..3f021830 100644 --- a/setup.py +++ b/setup.py @@ -25,34 +25,9 @@ UPDATEs. psycopg2 also provide full asynchronous operations and support for coroutine libraries. """ -# note: if you are changing the list of supported Python version please fix -# the docs in install.rst and the /features/ page on the website. -classifiers = """\ -Development Status :: 5 - Production/Stable -Intended Audience :: Developers -License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL) -License :: OSI Approved :: Zope Public License -Programming Language :: Python -Programming Language :: Python :: 2.6 -Programming Language :: Python :: 2.7 -Programming Language :: Python :: 3 -Programming Language :: Python :: 3.1 -Programming Language :: Python :: 3.2 -Programming Language :: Python :: 3.3 -Programming Language :: Python :: 3.4 -Programming Language :: Python :: 3.5 -Programming Language :: C -Programming Language :: SQL -Topic :: Database -Topic :: Database :: Front-Ends -Topic :: Software Development -Topic :: Software Development :: Libraries :: Python Modules -Operating System :: Microsoft :: Windows -Operating System :: Unix -""" - # Note: The setup.py must be compatible with both Python 2 and 3 + import os import sys import re @@ -87,7 +62,34 @@ except ImportError: PSYCOPG_VERSION = '2.7.dev0' -version_flags = ['dt', 'dec'] + +# note: if you are changing the list of supported Python version please fix +# the docs in install.rst and the /features/ page on the website. +classifiers = """\ +Development Status :: 5 - Production/Stable +Intended Audience :: Developers +License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL) +License :: OSI Approved :: Zope Public License +Programming Language :: Python +Programming Language :: Python :: 2.6 +Programming Language :: Python :: 2.7 +Programming Language :: Python :: 3 +Programming Language :: Python :: 3.1 +Programming Language :: Python :: 3.2 +Programming Language :: Python :: 3.3 +Programming Language :: Python :: 3.4 +Programming Language :: Python :: 3.5 +Programming Language :: C +Programming Language :: SQL +Topic :: Database +Topic :: Database :: Front-Ends +Topic :: Software Development +Topic :: Software Development :: Libraries :: Python Modules +Operating System :: Microsoft :: Windows +Operating System :: Unix +""" + +version_flags = ['dt', 'dec'] PLATFORM_IS_WINDOWS = sys.platform.lower().startswith('win') @@ -208,7 +210,7 @@ or with the pg_config option in 'setup.cfg'. # Support unicode paths, if this version of Python provides the # necessary infrastructure: if sys.version_info[0] < 3 \ - and hasattr(sys, 'getfilesystemencoding'): + and hasattr(sys, 'getfilesystemencoding'): pg_config_path = pg_config_path.encode( sys.getfilesystemencoding()) @@ -230,7 +232,7 @@ class psycopg_build_ext(build_ext): ('use-pydatetime', None, "Use Python datatime objects for date and time representation."), ('pg-config=', None, - "The name of the pg_config binary and/or full path to find it"), + "The name of the pg_config binary and/or full path to find it"), ('have-ssl', None, "Compile with OpenSSL built PostgreSQL libraries (Windows only)."), ('static-libpq', None, @@ -388,7 +390,7 @@ class psycopg_build_ext(build_ext): if not getattr(self, 'link_objects', None): self.link_objects = [] self.link_objects.append( - os.path.join(pg_config_helper.query("libdir"), "libpq.a")) + os.path.join(pg_config_helper.query("libdir"), "libpq.a")) else: self.libraries.append("pq") @@ -417,7 +419,7 @@ class psycopg_build_ext(build_ext): else: sys.stderr.write( "Error: could not determine PostgreSQL version from '%s'" - % pgversion) + % pgversion) sys.exit(1) define_macros.append(("PG_VERSION_NUM", "%d%02d%02d" % @@ -445,6 +447,7 @@ class psycopg_build_ext(build_ext): if hasattr(self, "finalize_" + sys.platform): getattr(self, "finalize_" + sys.platform)() + def is_py_64(): # sys.maxint not available since Py 3.1; # sys.maxsize not available before Py 2.6; @@ -511,7 +514,7 @@ parser.read('setup.cfg') # Choose a datetime module have_pydatetime = True have_mxdatetime = False -use_pydatetime = int(parser.get('build_ext', 'use_pydatetime')) +use_pydatetime = int(parser.get('build_ext', 'use_pydatetime')) # check for mx package if parser.has_option('build_ext', 'mx_include_dir'): @@ -547,8 +550,8 @@ you probably need to install its companion -dev or -devel package.""" sys.exit(1) # generate a nice version string to avoid confusion when users report bugs -version_flags.append('pq3') # no more a choice -version_flags.append('ext') # no more a choice +version_flags.append('pq3') # no more a choice +version_flags.append('ext') # no more a choice if version_flags: PSYCOPG_VERSION_EX = PSYCOPG_VERSION + " (%s)" % ' '.join(version_flags) @@ -580,8 +583,8 @@ for define in parser.get('build_ext', 'define').split(','): # build the extension -sources = [ os.path.join('psycopg', x) for x in sources] -depends = [ os.path.join('psycopg', x) for x in depends] +sources = [os.path.join('psycopg', x) for x in sources] +depends = [os.path.join('psycopg', x) for x in depends] ext.append(Extension("psycopg2._psycopg", sources, define_macros=define_macros, diff --git a/tests/__init__.py b/tests/__init__.py index 2e51cc2b..ada55276 100755 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -52,6 +52,7 @@ if sys.version_info[:2] >= (2, 5): else: test_with = None + def test_suite(): # If connection to test db fails, bail out early. import psycopg2 diff --git a/tests/test_async.py b/tests/test_async.py index e0bca7d5..6f8fed58 100755 --- a/tests/test_async.py +++ b/tests/test_async.py @@ -33,6 +33,7 @@ import StringIO from testutils import ConnectingTestCase + class PollableStub(object): """A 'pollable' wrapper allowing analysis of the `poll()` calls.""" def __init__(self, pollable): @@ -68,6 +69,7 @@ class AsyncTests(ConnectingTestCase): def test_connection_setup(self): cur = self.conn.cursor() sync_cur = self.sync_conn.cursor() + del cur, sync_cur self.assert_(self.conn.async) self.assert_(not self.sync_conn.async) @@ -77,7 +79,7 @@ class AsyncTests(ConnectingTestCase): # check other properties to be found on the connection self.assert_(self.conn.server_version) - self.assert_(self.conn.protocol_version in (2,3)) + self.assert_(self.conn.protocol_version in (2, 3)) self.assert_(self.conn.encoding in psycopg2.extensions.encodings) def test_async_named_cursor(self): @@ -108,6 +110,7 @@ class AsyncTests(ConnectingTestCase): def test_async_after_async(self): cur = self.conn.cursor() cur2 = self.conn.cursor() + del cur2 cur.execute("insert into table1 values (1)") @@ -422,14 +425,14 @@ class AsyncTests(ConnectingTestCase): def test_async_cursor_gone(self): import gc cur = self.conn.cursor() - cur.execute("select 42;"); + cur.execute("select 42;") del cur gc.collect() self.assertRaises(psycopg2.InterfaceError, self.wait, self.conn) # The connection is still usable cur = self.conn.cursor() - cur.execute("select 42;"); + cur.execute("select 42;") self.wait(self.conn) self.assertEqual(cur.fetchone(), (42,)) @@ -449,4 +452,3 @@ def test_suite(): if __name__ == "__main__": unittest.main() - diff --git a/tests/test_bugX000.py b/tests/test_bugX000.py index efa593ec..fbd2a9f6 100755 --- a/tests/test_bugX000.py +++ b/tests/test_bugX000.py @@ -26,15 +26,17 @@ import psycopg2 import time import unittest + class DateTimeAllocationBugTestCase(unittest.TestCase): def test_date_time_allocation_bug(self): - d1 = psycopg2.Date(2002,12,25) - d2 = psycopg2.DateFromTicks(time.mktime((2002,12,25,0,0,0,0,0,0))) - t1 = psycopg2.Time(13,45,30) - t2 = psycopg2.TimeFromTicks(time.mktime((2001,1,1,13,45,30,0,0,0))) - t1 = psycopg2.Timestamp(2002,12,25,13,45,30) + d1 = psycopg2.Date(2002, 12, 25) + d2 = psycopg2.DateFromTicks(time.mktime((2002, 12, 25, 0, 0, 0, 0, 0, 0))) + t1 = psycopg2.Time(13, 45, 30) + t2 = psycopg2.TimeFromTicks(time.mktime((2001, 1, 1, 13, 45, 30, 0, 0, 0))) + t1 = psycopg2.Timestamp(2002, 12, 25, 13, 45, 30) t2 = psycopg2.TimestampFromTicks( - time.mktime((2002,12,25,13,45,30,0,0,0))) + time.mktime((2002, 12, 25, 13, 45, 30, 0, 0, 0))) + del d1, d2, t1, t2 def test_suite(): diff --git a/tests/test_bug_gc.py b/tests/test_bug_gc.py index 1551dc47..084236ef 100755 --- a/tests/test_bug_gc.py +++ b/tests/test_bug_gc.py @@ -29,6 +29,7 @@ import gc from testutils import ConnectingTestCase, skip_if_no_uuid + class StolenReferenceTestCase(ConnectingTestCase): @skip_if_no_uuid def test_stolen_reference_bug(self): @@ -41,8 +42,10 @@ class StolenReferenceTestCase(ConnectingTestCase): curs.execute("select 'b5219e01-19ab-4994-b71e-149225dc51e4'::uuid") curs.fetchone() + def test_suite(): return unittest.TestLoader().loadTestsFromName(__name__) + if __name__ == "__main__": unittest.main() diff --git a/tests/test_cancel.py b/tests/test_cancel.py index 0ffa742a..a8eb7506 100755 --- a/tests/test_cancel.py +++ b/tests/test_cancel.py @@ -32,6 +32,7 @@ from psycopg2 import extras from testconfig import dsn from testutils import unittest, ConnectingTestCase, skip_before_postgres + class CancelTests(ConnectingTestCase): def setUp(self): @@ -71,6 +72,7 @@ class CancelTests(ConnectingTestCase): except Exception, e: errors.append(e) raise + del cur thread1 = threading.Thread(target=neverending, args=(self.conn, )) # wait a bit to make sure that the other thread is already in diff --git a/tests/test_connection.py b/tests/test_connection.py index 8aa5a2b5..8744488d 100755 --- a/tests/test_connection.py +++ b/tests/test_connection.py @@ -27,17 +27,16 @@ import sys import time import threading from operator import attrgetter -from StringIO import StringIO import psycopg2 import psycopg2.errorcodes -import psycopg2.extensions -ext = psycopg2.extensions +from psycopg2 import extensions as ext + +from testutils import ( + unittest, decorate_all_tests, skip_if_no_superuser, + skip_before_postgres, skip_after_postgres, skip_before_libpq, + ConnectingTestCase, skip_if_tpc_disabled, skip_if_windows) -from testutils import unittest, decorate_all_tests, skip_if_no_superuser -from testutils import skip_before_postgres, skip_after_postgres, skip_before_libpq -from testutils import ConnectingTestCase, skip_if_tpc_disabled -from testutils import skip_if_windows from testconfig import dsn, dbname @@ -112,8 +111,14 @@ class ConnectionTests(ConnectingTestCase): cur = conn.cursor() if self.conn.server_version >= 90300: cur.execute("set client_min_messages=debug1") - cur.execute("create temp table table1 (id serial); create temp table table2 (id serial);") - cur.execute("create temp table table3 (id serial); create temp table table4 (id serial);") + cur.execute(""" + create temp table table1 (id serial); + create temp table table2 (id serial); + """) + cur.execute(""" + create temp table table3 (id serial); + create temp table table4 (id serial); + """) self.assertEqual(4, len(conn.notices)) self.assert_('table1' in conn.notices[0]) self.assert_('table2' in conn.notices[1]) @@ -126,7 +131,8 @@ class ConnectionTests(ConnectingTestCase): if self.conn.server_version >= 90300: cur.execute("set client_min_messages=debug1") for i in range(0, 100, 10): - sql = " ".join(["create temp table table%d (id serial);" % j for j in range(i, i + 10)]) + sql = " ".join(["create temp table table%d (id serial);" % j + for j in range(i, i + 10)]) cur.execute(sql) self.assertEqual(50, len(conn.notices)) @@ -141,8 +147,13 @@ class ConnectionTests(ConnectingTestCase): if self.conn.server_version >= 90300: cur.execute("set client_min_messages=debug1") - cur.execute("create temp table table1 (id serial); create temp table table2 (id serial);") - cur.execute("create temp table table3 (id serial); create temp table table4 (id serial);") + cur.execute(""" + create temp table table1 (id serial); + create temp table table2 (id serial); + """) + cur.execute(""" + create temp table table3 (id serial); + create temp table table4 (id serial);""") self.assertEqual(len(conn.notices), 4) self.assert_('table1' in conn.notices.popleft()) self.assert_('table2' in conn.notices.popleft()) @@ -152,7 +163,8 @@ class ConnectionTests(ConnectingTestCase): # not limited, but no error for i in range(0, 100, 10): - sql = " ".join(["create temp table table2_%d (id serial);" % j for j in range(i, i + 10)]) + sql = " ".join(["create temp table table2_%d (id serial);" % j + for j in range(i, i + 10)]) cur.execute(sql) self.assertEqual(len([n for n in conn.notices if 'CREATE TABLE' in n]), @@ -315,16 +327,18 @@ class ParseDsnTestCase(ConnectingTestCase): def test_parse_dsn(self): from psycopg2 import ProgrammingError - self.assertEqual(ext.parse_dsn('dbname=test user=tester password=secret'), - dict(user='tester', password='secret', dbname='test'), - "simple DSN parsed") + self.assertEqual( + ext.parse_dsn('dbname=test user=tester password=secret'), + dict(user='tester', password='secret', dbname='test'), + "simple DSN parsed") self.assertRaises(ProgrammingError, ext.parse_dsn, "dbname=test 2 user=tester password=secret") - self.assertEqual(ext.parse_dsn("dbname='test 2' user=tester password=secret"), - dict(user='tester', password='secret', dbname='test 2'), - "DSN with quoting parsed") + self.assertEqual( + ext.parse_dsn("dbname='test 2' user=tester password=secret"), + dict(user='tester', password='secret', dbname='test 2'), + "DSN with quoting parsed") # Can't really use assertRaisesRegexp() here since we need to # make sure that secret is *not* exposed in the error messgage @@ -485,7 +499,8 @@ class IsolationLevelsTestCase(ConnectingTestCase): levels = [ (None, psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT), - ('read uncommitted', psycopg2.extensions.ISOLATION_LEVEL_READ_UNCOMMITTED), + ('read uncommitted', + psycopg2.extensions.ISOLATION_LEVEL_READ_UNCOMMITTED), ('read committed', psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED), ('repeatable read', psycopg2.extensions.ISOLATION_LEVEL_REPEATABLE_READ), ('serializable', psycopg2.extensions.ISOLATION_LEVEL_SERIALIZABLE), diff --git a/tests/test_copy.py b/tests/test_copy.py index 32134215..ac42c980 100755 --- a/tests/test_copy.py +++ b/tests/test_copy.py @@ -39,7 +39,8 @@ from testconfig import dsn if sys.version_info[0] < 3: _base = object else: - from io import TextIOBase as _base + from io import TextIOBase as _base + class MinimalRead(_base): """A file wrapper exposing the minimal interface to copy from.""" @@ -52,6 +53,7 @@ class MinimalRead(_base): def readline(self): return self.f.readline() + class MinimalWrite(_base): """A file wrapper exposing the minimal interface to copy to.""" def __init__(self, f): @@ -78,7 +80,7 @@ class CopyTests(ConnectingTestCase): def test_copy_from(self): curs = self.conn.cursor() try: - self._copy_from(curs, nrecs=1024, srec=10*1024, copykw={}) + self._copy_from(curs, nrecs=1024, srec=10 * 1024, copykw={}) finally: curs.close() @@ -86,8 +88,8 @@ class CopyTests(ConnectingTestCase): # Trying to trigger a "would block" error curs = self.conn.cursor() try: - self._copy_from(curs, nrecs=10*1024, srec=10*1024, - copykw={'size': 20*1024*1024}) + self._copy_from(curs, nrecs=10 * 1024, srec=10 * 1024, + copykw={'size': 20 * 1024 * 1024}) finally: curs.close() @@ -110,6 +112,7 @@ class CopyTests(ConnectingTestCase): f.write("%s\n" % (i,)) f.seek(0) + def cols(): raise ZeroDivisionError() yield 'id' @@ -120,8 +123,8 @@ class CopyTests(ConnectingTestCase): def test_copy_to(self): curs = self.conn.cursor() try: - self._copy_from(curs, nrecs=1024, srec=10*1024, copykw={}) - self._copy_to(curs, srec=10*1024) + self._copy_from(curs, nrecs=1024, srec=10 * 1024, copykw={}) + self._copy_to(curs, srec=10 * 1024) finally: curs.close() @@ -209,9 +212,11 @@ class CopyTests(ConnectingTestCase): exp_size = 123 # hack here to leave file as is, only check size when reading real_read = f.read + def read(_size, f=f, exp_size=exp_size): self.assertEqual(_size, exp_size) return real_read(_size) + f.read = read curs.copy_expert('COPY tcopy (data) FROM STDIN', f, size=exp_size) curs.execute("select data from tcopy;") @@ -221,7 +226,7 @@ class CopyTests(ConnectingTestCase): f = StringIO() for i, c in izip(xrange(nrecs), cycle(string.ascii_letters)): l = c * srec - f.write("%s\t%s\n" % (i,l)) + f.write("%s\t%s\n" % (i, l)) f.seek(0) curs.copy_from(MinimalRead(f), "tcopy", **copykw) @@ -258,24 +263,24 @@ class CopyTests(ConnectingTestCase): curs.copy_expert, 'COPY tcopy (data) FROM STDIN', f) def test_copy_no_column_limit(self): - cols = [ "c%050d" % i for i in range(200) ] + cols = ["c%050d" % i for i in range(200)] curs = self.conn.cursor() curs.execute('CREATE TEMPORARY TABLE manycols (%s)' % ',\n'.join( - [ "%s int" % c for c in cols])) + ["%s int" % c for c in cols])) curs.execute("INSERT INTO manycols DEFAULT VALUES") f = StringIO() - curs.copy_to(f, "manycols", columns = cols) + curs.copy_to(f, "manycols", columns=cols) f.seek(0) self.assertEqual(f.read().split(), ['\\N'] * len(cols)) f.seek(0) - curs.copy_from(f, "manycols", columns = cols) + curs.copy_from(f, "manycols", columns=cols) curs.execute("select count(*) from manycols;") self.assertEqual(curs.fetchone()[0], 2) - @skip_before_postgres(8, 2) # they don't send the count + @skip_before_postgres(8, 2) # they don't send the count def test_copy_rowcount(self): curs = self.conn.cursor() @@ -316,7 +321,7 @@ try: except psycopg2.ProgrammingError: pass conn.close() -""" % { 'dsn': dsn,}) +""" % {'dsn': dsn}) proc = Popen([sys.executable, '-c', script_to_py3(script)]) proc.communicate() @@ -334,7 +339,7 @@ try: except psycopg2.ProgrammingError: pass conn.close() -""" % { 'dsn': dsn,}) +""" % {'dsn': dsn}) proc = Popen([sys.executable, '-c', script_to_py3(script)], stdout=PIPE) proc.communicate() @@ -343,10 +348,10 @@ conn.close() def test_copy_from_propagate_error(self): class BrokenRead(_base): def read(self, size): - return 1/0 + return 1 / 0 def readline(self): - return 1/0 + return 1 / 0 curs = self.conn.cursor() # It seems we cannot do this, but now at least we propagate the error @@ -360,7 +365,7 @@ conn.close() def test_copy_to_propagate_error(self): class BrokenWrite(_base): def write(self, data): - return 1/0 + return 1 / 0 curs = self.conn.cursor() curs.execute("insert into tcopy values (10, 'hi')") diff --git a/tests/test_cursor.py b/tests/test_cursor.py index 3201013d..4fab2c4c 100755 --- a/tests/test_cursor.py +++ b/tests/test_cursor.py @@ -29,6 +29,7 @@ import psycopg2.extensions from testutils import unittest, ConnectingTestCase, skip_before_postgres from testutils import skip_if_no_namedtuple, skip_if_no_getrefcount + class CursorTests(ConnectingTestCase): def test_close_idempotent(self): @@ -47,8 +48,10 @@ class CursorTests(ConnectingTestCase): conn = self.conn cur = conn.cursor() cur.execute("create temp table test_exc (data int);") + def buggygen(): - yield 1//0 + yield 1 // 0 + self.assertRaises(ZeroDivisionError, cur.executemany, "insert into test_exc values (%s)", buggygen()) cur.close() @@ -102,8 +105,7 @@ class CursorTests(ConnectingTestCase): # issue #81: reference leak when a parameter value is referenced # more than once from a dict. cur = self.conn.cursor() - i = lambda x: x - foo = i('foo') * 10 + foo = (lambda x: x)('foo') * 10 import sys nref1 = sys.getrefcount(foo) cur.mogrify("select %(foo)s, %(foo)s, %(foo)s", {'foo': foo}) @@ -135,7 +137,7 @@ class CursorTests(ConnectingTestCase): self.assertEqual(Decimal('123.45'), curs.cast(1700, '123.45')) from datetime import date - self.assertEqual(date(2011,1,2), curs.cast(1082, '2011-01-02')) + self.assertEqual(date(2011, 1, 2), curs.cast(1082, '2011-01-02')) self.assertEqual("who am i?", curs.cast(705, 'who am i?')) # unknown def test_cast_specificity(self): @@ -158,7 +160,8 @@ class CursorTests(ConnectingTestCase): curs = self.conn.cursor() w = ref(curs) del curs - import gc; gc.collect() + import gc + gc.collect() self.assert_(w() is None) def test_null_name(self): @@ -168,7 +171,7 @@ class CursorTests(ConnectingTestCase): def test_invalid_name(self): curs = self.conn.cursor() curs.execute("create temp table invname (data int);") - for i in (10,20,30): + for i in (10, 20, 30): curs.execute("insert into invname values (%s)", (i,)) curs.close() @@ -193,16 +196,16 @@ class CursorTests(ConnectingTestCase): self._create_withhold_table() curs = self.conn.cursor("W") - self.assertEqual(curs.withhold, False); + self.assertEqual(curs.withhold, False) curs.withhold = True - self.assertEqual(curs.withhold, True); + self.assertEqual(curs.withhold, True) curs.execute("select data from withhold order by data") self.conn.commit() self.assertEqual(curs.fetchall(), [(10,), (20,), (30,)]) curs.close() curs = self.conn.cursor("W", withhold=True) - self.assertEqual(curs.withhold, True); + self.assertEqual(curs.withhold, True) curs.execute("select data from withhold order by data") self.conn.commit() self.assertEqual(curs.fetchall(), [(10,), (20,), (30,)]) @@ -264,18 +267,18 @@ class CursorTests(ConnectingTestCase): curs = self.conn.cursor() curs.execute("create table scrollable (data int)") curs.executemany("insert into scrollable values (%s)", - [ (i,) for i in range(100) ]) + [(i,) for i in range(100)]) curs.close() for t in range(2): if not t: curs = self.conn.cursor("S") - self.assertEqual(curs.scrollable, None); + self.assertEqual(curs.scrollable, None) curs.scrollable = True else: curs = self.conn.cursor("S", scrollable=True) - self.assertEqual(curs.scrollable, True); + self.assertEqual(curs.scrollable, True) curs.itersize = 10 # complex enough to make postgres cursors declare without @@ -303,7 +306,7 @@ class CursorTests(ConnectingTestCase): curs = self.conn.cursor() curs.execute("create table scrollable (data int)") curs.executemany("insert into scrollable values (%s)", - [ (i,) for i in range(100) ]) + [(i,) for i in range(100)]) curs.close() curs = self.conn.cursor("S") # default scrollability @@ -340,18 +343,18 @@ class CursorTests(ConnectingTestCase): def test_iter_named_cursor_default_itersize(self): curs = self.conn.cursor('tmp') curs.execute('select generate_series(1,50)') - rv = [ (r[0], curs.rownumber) for r in curs ] + rv = [(r[0], curs.rownumber) for r in curs] # everything swallowed in one gulp - self.assertEqual(rv, [(i,i) for i in range(1,51)]) + self.assertEqual(rv, [(i, i) for i in range(1, 51)]) @skip_before_postgres(8, 0) def test_iter_named_cursor_itersize(self): curs = self.conn.cursor('tmp') curs.itersize = 30 curs.execute('select generate_series(1,50)') - rv = [ (r[0], curs.rownumber) for r in curs ] + rv = [(r[0], curs.rownumber) for r in curs] # everything swallowed in two gulps - self.assertEqual(rv, [(i,((i - 1) % 30) + 1) for i in range(1,51)]) + self.assertEqual(rv, [(i, ((i - 1) % 30) + 1) for i in range(1, 51)]) @skip_before_postgres(8, 0) def test_iter_named_cursor_rownumber(self): diff --git a/tests/test_dates.py b/tests/test_dates.py index d6ce3482..3463d001 100755 --- a/tests/test_dates.py +++ b/tests/test_dates.py @@ -27,6 +27,7 @@ import psycopg2 from psycopg2.tz import FixedOffsetTimezone, ZERO from testutils import unittest, ConnectingTestCase, skip_before_postgres + class CommonDatetimeTestsMixin: def execute(self, *args): @@ -144,10 +145,10 @@ class DatetimeTests(ConnectingTestCase, CommonDatetimeTestsMixin): # The Python datetime module does not support time zone # offsets that are not a whole number of minutes. # We round the offset to the nearest minute. - self.check_time_tz("+01:15:00", 60 * (60 + 15)) - self.check_time_tz("+01:15:29", 60 * (60 + 15)) - self.check_time_tz("+01:15:30", 60 * (60 + 16)) - self.check_time_tz("+01:15:59", 60 * (60 + 16)) + self.check_time_tz("+01:15:00", 60 * (60 + 15)) + self.check_time_tz("+01:15:29", 60 * (60 + 15)) + self.check_time_tz("+01:15:30", 60 * (60 + 16)) + self.check_time_tz("+01:15:59", 60 * (60 + 16)) self.check_time_tz("-01:15:00", -60 * (60 + 15)) self.check_time_tz("-01:15:29", -60 * (60 + 15)) self.check_time_tz("-01:15:30", -60 * (60 + 16)) @@ -180,10 +181,10 @@ class DatetimeTests(ConnectingTestCase, CommonDatetimeTestsMixin): # The Python datetime module does not support time zone # offsets that are not a whole number of minutes. # We round the offset to the nearest minute. - self.check_datetime_tz("+01:15:00", 60 * (60 + 15)) - self.check_datetime_tz("+01:15:29", 60 * (60 + 15)) - self.check_datetime_tz("+01:15:30", 60 * (60 + 16)) - self.check_datetime_tz("+01:15:59", 60 * (60 + 16)) + self.check_datetime_tz("+01:15:00", 60 * (60 + 15)) + self.check_datetime_tz("+01:15:29", 60 * (60 + 15)) + self.check_datetime_tz("+01:15:30", 60 * (60 + 16)) + self.check_datetime_tz("+01:15:59", 60 * (60 + 16)) self.check_datetime_tz("-01:15:00", -60 * (60 + 15)) self.check_datetime_tz("-01:15:29", -60 * (60 + 15)) self.check_datetime_tz("-01:15:30", -60 * (60 + 16)) @@ -269,32 +270,32 @@ class DatetimeTests(ConnectingTestCase, CommonDatetimeTestsMixin): def test_type_roundtrip_date(self): from datetime import date - self._test_type_roundtrip(date(2010,5,3)) + self._test_type_roundtrip(date(2010, 5, 3)) def test_type_roundtrip_datetime(self): from datetime import datetime - dt = self._test_type_roundtrip(datetime(2010,5,3,10,20,30)) + dt = self._test_type_roundtrip(datetime(2010, 5, 3, 10, 20, 30)) self.assertEqual(None, dt.tzinfo) def test_type_roundtrip_datetimetz(self): from datetime import datetime import psycopg2.tz - tz = psycopg2.tz.FixedOffsetTimezone(8*60) - dt1 = datetime(2010,5,3,10,20,30, tzinfo=tz) + tz = psycopg2.tz.FixedOffsetTimezone(8 * 60) + dt1 = datetime(2010, 5, 3, 10, 20, 30, tzinfo=tz) dt2 = self._test_type_roundtrip(dt1) self.assertNotEqual(None, dt2.tzinfo) self.assertEqual(dt1, dt2) def test_type_roundtrip_time(self): from datetime import time - tm = self._test_type_roundtrip(time(10,20,30)) + tm = self._test_type_roundtrip(time(10, 20, 30)) self.assertEqual(None, tm.tzinfo) def test_type_roundtrip_timetz(self): from datetime import time import psycopg2.tz - tz = psycopg2.tz.FixedOffsetTimezone(8*60) - tm1 = time(10,20,30, tzinfo=tz) + tz = psycopg2.tz.FixedOffsetTimezone(8 * 60) + tm1 = time(10, 20, 30, tzinfo=tz) tm2 = self._test_type_roundtrip(tm1) self.assertNotEqual(None, tm2.tzinfo) self.assertEqual(tm1, tm2) @@ -305,15 +306,15 @@ class DatetimeTests(ConnectingTestCase, CommonDatetimeTestsMixin): def test_type_roundtrip_date_array(self): from datetime import date - self._test_type_roundtrip_array(date(2010,5,3)) + self._test_type_roundtrip_array(date(2010, 5, 3)) def test_type_roundtrip_datetime_array(self): from datetime import datetime - self._test_type_roundtrip_array(datetime(2010,5,3,10,20,30)) + self._test_type_roundtrip_array(datetime(2010, 5, 3, 10, 20, 30)) def test_type_roundtrip_time_array(self): from datetime import time - self._test_type_roundtrip_array(time(10,20,30)) + self._test_type_roundtrip_array(time(10, 20, 30)) def test_type_roundtrip_interval_array(self): from datetime import timedelta @@ -355,8 +356,10 @@ class mxDateTimeTests(ConnectingTestCase, CommonDatetimeTestsMixin): psycopg2.extensions.register_type(self.INTERVAL, self.conn) psycopg2.extensions.register_type(psycopg2.extensions.MXDATEARRAY, self.conn) psycopg2.extensions.register_type(psycopg2.extensions.MXTIMEARRAY, self.conn) - psycopg2.extensions.register_type(psycopg2.extensions.MXDATETIMEARRAY, self.conn) - psycopg2.extensions.register_type(psycopg2.extensions.MXINTERVALARRAY, self.conn) + psycopg2.extensions.register_type( + psycopg2.extensions.MXDATETIMEARRAY, self.conn) + psycopg2.extensions.register_type( + psycopg2.extensions.MXINTERVALARRAY, self.conn) def tearDown(self): self.conn.close() @@ -479,15 +482,15 @@ class mxDateTimeTests(ConnectingTestCase, CommonDatetimeTestsMixin): def test_type_roundtrip_date(self): from mx.DateTime import Date - self._test_type_roundtrip(Date(2010,5,3)) + self._test_type_roundtrip(Date(2010, 5, 3)) def test_type_roundtrip_datetime(self): from mx.DateTime import DateTime - self._test_type_roundtrip(DateTime(2010,5,3,10,20,30)) + self._test_type_roundtrip(DateTime(2010, 5, 3, 10, 20, 30)) def test_type_roundtrip_time(self): from mx.DateTime import Time - self._test_type_roundtrip(Time(10,20,30)) + self._test_type_roundtrip(Time(10, 20, 30)) def test_type_roundtrip_interval(self): from mx.DateTime import DateTimeDeltaFrom @@ -495,15 +498,15 @@ class mxDateTimeTests(ConnectingTestCase, CommonDatetimeTestsMixin): def test_type_roundtrip_date_array(self): from mx.DateTime import Date - self._test_type_roundtrip_array(Date(2010,5,3)) + self._test_type_roundtrip_array(Date(2010, 5, 3)) def test_type_roundtrip_datetime_array(self): from mx.DateTime import DateTime - self._test_type_roundtrip_array(DateTime(2010,5,3,10,20,30)) + self._test_type_roundtrip_array(DateTime(2010, 5, 3, 10, 20, 30)) def test_type_roundtrip_time_array(self): from mx.DateTime import Time - self._test_type_roundtrip_array(Time(10,20,30)) + self._test_type_roundtrip_array(Time(10, 20, 30)) def test_type_roundtrip_interval_array(self): from mx.DateTime import DateTimeDeltaFrom @@ -549,22 +552,30 @@ class FixedOffsetTimezoneTests(unittest.TestCase): def test_repr_with_positive_offset(self): tzinfo = FixedOffsetTimezone(5 * 60) - self.assertEqual(repr(tzinfo), "psycopg2.tz.FixedOffsetTimezone(offset=300, name=None)") + self.assertEqual(repr(tzinfo), + "psycopg2.tz.FixedOffsetTimezone(offset=300, name=None)") def test_repr_with_negative_offset(self): tzinfo = FixedOffsetTimezone(-5 * 60) - self.assertEqual(repr(tzinfo), "psycopg2.tz.FixedOffsetTimezone(offset=-300, name=None)") + self.assertEqual(repr(tzinfo), + "psycopg2.tz.FixedOffsetTimezone(offset=-300, name=None)") def test_repr_with_name(self): tzinfo = FixedOffsetTimezone(name="FOO") - self.assertEqual(repr(tzinfo), "psycopg2.tz.FixedOffsetTimezone(offset=0, name='FOO')") + self.assertEqual(repr(tzinfo), + "psycopg2.tz.FixedOffsetTimezone(offset=0, name='FOO')") def test_instance_caching(self): - self.assert_(FixedOffsetTimezone(name="FOO") is FixedOffsetTimezone(name="FOO")) - self.assert_(FixedOffsetTimezone(7 * 60) is FixedOffsetTimezone(7 * 60)) - self.assert_(FixedOffsetTimezone(-9 * 60, 'FOO') is FixedOffsetTimezone(-9 * 60, 'FOO')) - self.assert_(FixedOffsetTimezone(9 * 60) is not FixedOffsetTimezone(9 * 60, 'FOO')) - self.assert_(FixedOffsetTimezone(name='FOO') is not FixedOffsetTimezone(9 * 60, 'FOO')) + self.assert_(FixedOffsetTimezone(name="FOO") + is FixedOffsetTimezone(name="FOO")) + self.assert_(FixedOffsetTimezone(7 * 60) + is FixedOffsetTimezone(7 * 60)) + self.assert_(FixedOffsetTimezone(-9 * 60, 'FOO') + is FixedOffsetTimezone(-9 * 60, 'FOO')) + self.assert_(FixedOffsetTimezone(9 * 60) + is not FixedOffsetTimezone(9 * 60, 'FOO')) + self.assert_(FixedOffsetTimezone(name='FOO') + is not FixedOffsetTimezone(9 * 60, 'FOO')) def test_pickle(self): # ticket #135 diff --git a/tests/test_errcodes.py b/tests/test_errcodes.py index 6cf5ddba..6865194f 100755 --- a/tests/test_errcodes.py +++ b/tests/test_errcodes.py @@ -32,6 +32,7 @@ except NameError: from threading import Thread from psycopg2 import errorcodes + class ErrocodeTests(ConnectingTestCase): def test_lookup_threadsafe(self): @@ -39,6 +40,7 @@ class ErrocodeTests(ConnectingTestCase): MAX_CYCLES = 2000 errs = [] + def f(pg_code='40001'): try: errorcodes.lookup(pg_code) diff --git a/tests/test_extras_dictcursor.py b/tests/test_extras_dictcursor.py index f2feffff..20393c66 100755 --- a/tests/test_extras_dictcursor.py +++ b/tests/test_extras_dictcursor.py @@ -39,7 +39,8 @@ class ExtrasDictCursorTests(ConnectingTestCase): self.assert_(isinstance(cur, psycopg2.extras.DictCursor)) self.assertEqual(cur.name, None) # overridable - cur = self.conn.cursor('foo', cursor_factory=psycopg2.extras.NamedTupleCursor) + cur = self.conn.cursor('foo', + cursor_factory=psycopg2.extras.NamedTupleCursor) self.assertEqual(cur.name, 'foo') self.assert_(isinstance(cur, psycopg2.extras.NamedTupleCursor)) @@ -80,7 +81,6 @@ class ExtrasDictCursorTests(ConnectingTestCase): self.failUnless(row[0] == 'bar') return row - def testDictCursorWithPlainCursorRealFetchOne(self): self._testWithPlainCursorReal(lambda curs: curs.fetchone()) @@ -110,7 +110,6 @@ class ExtrasDictCursorTests(ConnectingTestCase): row = getter(curs) self.failUnless(row['foo'] == 'bar') - def testDictCursorWithNamedCursorFetchOne(self): self._testWithNamedCursor(lambda curs: curs.fetchone()) @@ -146,7 +145,6 @@ class ExtrasDictCursorTests(ConnectingTestCase): self.failUnless(row['foo'] == 'bar') self.failUnless(row[0] == 'bar') - def testDictCursorRealWithNamedCursorFetchOne(self): self._testWithNamedCursorReal(lambda curs: curs.fetchone()) @@ -176,12 +174,12 @@ class ExtrasDictCursorTests(ConnectingTestCase): self._testIterRowNumber(curs) def _testWithNamedCursorReal(self, getter): - curs = self.conn.cursor('aname', cursor_factory=psycopg2.extras.RealDictCursor) + curs = self.conn.cursor('aname', + cursor_factory=psycopg2.extras.RealDictCursor) curs.execute("SELECT * FROM ExtrasDictCursorTests") row = getter(curs) self.failUnless(row['foo'] == 'bar') - def _testNamedCursorNotGreedy(self, curs): curs.itersize = 2 curs.execute("""select clock_timestamp() as ts from generate_series(1,3)""") @@ -235,7 +233,7 @@ class NamedTupleCursorTest(ConnectingTestCase): from psycopg2.extras import NamedTupleConnection try: - from collections import namedtuple + from collections import namedtuple # noqa except ImportError: return @@ -346,7 +344,7 @@ class NamedTupleCursorTest(ConnectingTestCase): def test_error_message(self): try: - from collections import namedtuple + from collections import namedtuple # noqa except ImportError: # an import error somewhere from psycopg2.extras import NamedTupleConnection @@ -390,6 +388,7 @@ class NamedTupleCursorTest(ConnectingTestCase): from psycopg2.extras import NamedTupleCursor f_orig = NamedTupleCursor._make_nt calls = [0] + def f_patched(self_): calls[0] += 1 return f_orig(self_) diff --git a/tests/test_green.py b/tests/test_green.py index 506b38fe..0424a2cc 100755 --- a/tests/test_green.py +++ b/tests/test_green.py @@ -29,6 +29,7 @@ import psycopg2.extras from testutils import ConnectingTestCase + class ConnectionStub(object): """A `connection` wrapper allowing analysis of the `poll()` calls.""" def __init__(self, conn): @@ -43,6 +44,7 @@ class ConnectionStub(object): self.polls.append(rv) return rv + class GreenTestCase(ConnectingTestCase): def setUp(self): self._cb = psycopg2.extensions.get_wait_callback() @@ -89,7 +91,7 @@ class GreenTestCase(ConnectingTestCase): curs.fetchone() # now try to do something that will fail in the callback - psycopg2.extensions.set_wait_callback(lambda conn: 1//0) + psycopg2.extensions.set_wait_callback(lambda conn: 1 // 0) self.assertRaises(ZeroDivisionError, curs.execute, "select 2") self.assert_(conn.closed) diff --git a/tests/test_lobject.py b/tests/test_lobject.py index 7a23e6bd..4da20e95 100755 --- a/tests/test_lobject.py +++ b/tests/test_lobject.py @@ -32,6 +32,7 @@ import psycopg2.extensions from testutils import unittest, decorate_all_tests, skip_if_tpc_disabled from testutils import ConnectingTestCase, skip_if_green + def skip_if_no_lo(f): @wraps(f) def skip_if_no_lo_(self): @@ -158,7 +159,7 @@ class LargeObjectTests(LargeObjectTestCase): def test_read(self): lo = self.conn.lobject() - length = lo.write(b"some data") + lo.write(b"some data") lo.close() lo = self.conn.lobject(lo.oid) @@ -169,7 +170,7 @@ class LargeObjectTests(LargeObjectTestCase): def test_read_binary(self): lo = self.conn.lobject() - length = lo.write(b"some data") + lo.write(b"some data") lo.close() lo = self.conn.lobject(lo.oid, "rb") @@ -181,7 +182,7 @@ class LargeObjectTests(LargeObjectTestCase): def test_read_text(self): lo = self.conn.lobject() snowman = u"\u2603" - length = lo.write(u"some data " + snowman) + lo.write(u"some data " + snowman) lo.close() lo = self.conn.lobject(lo.oid, "rt") @@ -193,7 +194,7 @@ class LargeObjectTests(LargeObjectTestCase): def test_read_large(self): lo = self.conn.lobject() data = "data" * 1000000 - length = lo.write("some" + data) + lo.write("some" + data) lo.close() lo = self.conn.lobject(lo.oid) @@ -399,6 +400,7 @@ def skip_if_no_truncate(f): return skip_if_no_truncate_ + class LargeObjectTruncateTests(LargeObjectTestCase): def test_truncate(self): lo = self.conn.lobject() @@ -450,15 +452,19 @@ def _has_lo64(conn): return (True, "this server and build support the lo64 API") + def skip_if_no_lo64(f): @wraps(f) def skip_if_no_lo64_(self): lo64, msg = _has_lo64(self.conn) - if not lo64: return self.skipTest(msg) - else: return f(self) + if not lo64: + return self.skipTest(msg) + else: + return f(self) return skip_if_no_lo64_ + class LargeObject64Tests(LargeObjectTestCase): def test_seek_tell_truncate_greater_than_2gb(self): lo = self.conn.lobject() @@ -477,11 +483,14 @@ def skip_if_lo64(f): @wraps(f) def skip_if_lo64_(self): lo64, msg = _has_lo64(self.conn) - if lo64: return self.skipTest(msg) - else: return f(self) + if lo64: + return self.skipTest(msg) + else: + return f(self) return skip_if_lo64_ + class LargeObjectNot64Tests(LargeObjectTestCase): def test_seek_larger_than_2gb(self): lo = self.conn.lobject() diff --git a/tests/test_notify.py b/tests/test_notify.py index fc6224d7..1a0ac457 100755 --- a/tests/test_notify.py +++ b/tests/test_notify.py @@ -67,8 +67,8 @@ curs.execute("NOTIFY " %(name)r %(payload)r) curs.close() conn.close() """ % { - 'module': psycopg2.__name__, - 'dsn': dsn, 'sec': sec, 'name': name, 'payload': payload}) + 'module': psycopg2.__name__, + 'dsn': dsn, 'sec': sec, 'name': name, 'payload': payload}) return Popen([sys.executable, '-c', script_to_py3(script)], stdout=PIPE) @@ -79,7 +79,7 @@ conn.close() proc = self.notify('foo', 1) t0 = time.time() - ready = select.select([self.conn], [], [], 5) + select.select([self.conn], [], [], 5) t1 = time.time() self.assert_(0.99 < t1 - t0 < 4, t1 - t0) @@ -107,7 +107,7 @@ conn.close() names = dict.fromkeys(['foo', 'bar', 'baz']) for (pid, name) in self.conn.notifies: self.assertEqual(pids[name], pid) - names.pop(name) # raise if name found twice + names.pop(name) # raise if name found twice def test_notifies_received_on_execute(self): self.autocommit(self.conn) @@ -217,6 +217,6 @@ conn.close() def test_suite(): return unittest.TestLoader().loadTestsFromName(__name__) + if __name__ == "__main__": unittest.main() - diff --git a/tests/test_psycopg2_dbapi20.py b/tests/test_psycopg2_dbapi20.py index 744d3224..80473b70 100755 --- a/tests/test_psycopg2_dbapi20.py +++ b/tests/test_psycopg2_dbapi20.py @@ -30,12 +30,13 @@ import psycopg2 from testconfig import dsn + class Psycopg2Tests(dbapi20.DatabaseAPI20Test): driver = psycopg2 connect_args = () connect_kw_args = {'dsn': dsn} - lower_func = 'lower' # For stored procedure test + lower_func = 'lower' # For stored procedure test def test_setoutputsize(self): # psycopg2's setoutputsize() is a no-op diff --git a/tests/test_replication.py b/tests/test_replication.py index f527edd2..ca99038a 100644 --- a/tests/test_replication.py +++ b/tests/test_replication.py @@ -24,8 +24,8 @@ import psycopg2 import psycopg2.extensions -from psycopg2.extras import PhysicalReplicationConnection, LogicalReplicationConnection -from psycopg2.extras import StopReplication +from psycopg2.extras import ( + PhysicalReplicationConnection, LogicalReplicationConnection, StopReplication) import testconfig from testutils import unittest @@ -70,14 +70,16 @@ class ReplicationTestCase(ConnectingTestCase): # generate some events for our replication stream def make_replication_events(self): conn = self.connect() - if conn is None: return + if conn is None: + return cur = conn.cursor() try: cur.execute("DROP TABLE dummy1") except psycopg2.ProgrammingError: conn.rollback() - cur.execute("CREATE TABLE dummy1 AS SELECT * FROM generate_series(1, 5) AS id") + cur.execute( + "CREATE TABLE dummy1 AS SELECT * FROM generate_series(1, 5) AS id") conn.commit() @@ -85,7 +87,8 @@ class ReplicationTest(ReplicationTestCase): @skip_before_postgres(9, 0) def test_physical_replication_connection(self): conn = self.repl_connect(connection_factory=PhysicalReplicationConnection) - if conn is None: return + if conn is None: + return cur = conn.cursor() cur.execute("IDENTIFY_SYSTEM") cur.fetchall() @@ -93,41 +96,49 @@ class ReplicationTest(ReplicationTestCase): @skip_before_postgres(9, 4) def test_logical_replication_connection(self): conn = self.repl_connect(connection_factory=LogicalReplicationConnection) - if conn is None: return + if conn is None: + return cur = conn.cursor() cur.execute("IDENTIFY_SYSTEM") cur.fetchall() - @skip_before_postgres(9, 4) # slots require 9.4 + @skip_before_postgres(9, 4) # slots require 9.4 def test_create_replication_slot(self): conn = self.repl_connect(connection_factory=PhysicalReplicationConnection) - if conn is None: return + if conn is None: + return cur = conn.cursor() self.create_replication_slot(cur) - self.assertRaises(psycopg2.ProgrammingError, self.create_replication_slot, cur) + self.assertRaises( + psycopg2.ProgrammingError, self.create_replication_slot, cur) - @skip_before_postgres(9, 4) # slots require 9.4 + @skip_before_postgres(9, 4) # slots require 9.4 def test_start_on_missing_replication_slot(self): conn = self.repl_connect(connection_factory=PhysicalReplicationConnection) - if conn is None: return + if conn is None: + return cur = conn.cursor() - self.assertRaises(psycopg2.ProgrammingError, cur.start_replication, self.slot) + self.assertRaises(psycopg2.ProgrammingError, + cur.start_replication, self.slot) self.create_replication_slot(cur) cur.start_replication(self.slot) - @skip_before_postgres(9, 4) # slots require 9.4 + @skip_before_postgres(9, 4) # slots require 9.4 def test_start_and_recover_from_error(self): conn = self.repl_connect(connection_factory=LogicalReplicationConnection) - if conn is None: return + if conn is None: + return cur = conn.cursor() self.create_replication_slot(cur, output_plugin='test_decoding') # try with invalid options - cur.start_replication(slot_name=self.slot, options={'invalid_param': 'value'}) + cur.start_replication( + slot_name=self.slot, options={'invalid_param': 'value'}) + def consume(msg): pass # we don't see the error from the server before we try to read the data @@ -136,10 +147,11 @@ class ReplicationTest(ReplicationTestCase): # try with correct command cur.start_replication(slot_name=self.slot) - @skip_before_postgres(9, 4) # slots require 9.4 + @skip_before_postgres(9, 4) # slots require 9.4 def test_stop_replication(self): conn = self.repl_connect(connection_factory=LogicalReplicationConnection) - if conn is None: return + if conn is None: + return cur = conn.cursor() self.create_replication_slot(cur, output_plugin='test_decoding') @@ -147,16 +159,19 @@ class ReplicationTest(ReplicationTestCase): self.make_replication_events() cur.start_replication(self.slot) + def consume(msg): raise StopReplication() self.assertRaises(StopReplication, cur.consume_stream, consume) class AsyncReplicationTest(ReplicationTestCase): - @skip_before_postgres(9, 4) # slots require 9.4 + @skip_before_postgres(9, 4) # slots require 9.4 def test_async_replication(self): - conn = self.repl_connect(connection_factory=LogicalReplicationConnection, async=1) - if conn is None: return + conn = self.repl_connect( + connection_factory=LogicalReplicationConnection, async=1) + if conn is None: + return self.wait(conn) cur = conn.cursor() @@ -169,9 +184,10 @@ class AsyncReplicationTest(ReplicationTestCase): self.make_replication_events() self.msg_count = 0 + def consume(msg): # just check the methods - log = "%s: %s" % (cur.io_timestamp, repr(msg)) + "%s: %s" % (cur.io_timestamp, repr(msg)) self.msg_count += 1 if self.msg_count > 3: @@ -193,8 +209,10 @@ class AsyncReplicationTest(ReplicationTestCase): select([cur], [], []) self.assertRaises(StopReplication, process_stream) + def test_suite(): return unittest.TestLoader().loadTestsFromName(__name__) + if __name__ == "__main__": unittest.main() diff --git a/tests/test_transaction.py b/tests/test_transaction.py index 724d0d80..2dc44ec5 100755 --- a/tests/test_transaction.py +++ b/tests/test_transaction.py @@ -29,6 +29,7 @@ import psycopg2 from psycopg2.extensions import ( ISOLATION_LEVEL_SERIALIZABLE, STATUS_BEGIN, STATUS_READY) + class TransactionTests(ConnectingTestCase): def setUp(self): @@ -147,6 +148,7 @@ class DeadlockSerializationTests(ConnectingTestCase): self.thread1_error = exc step1.set() conn.close() + def task2(): try: conn = self.connect() @@ -174,7 +176,7 @@ class DeadlockSerializationTests(ConnectingTestCase): self.assertFalse(self.thread1_error and self.thread2_error) error = self.thread1_error or self.thread2_error self.assertTrue(isinstance( - error, psycopg2.extensions.TransactionRollbackError)) + error, psycopg2.extensions.TransactionRollbackError)) def test_serialisation_failure(self): self.thread1_error = self.thread2_error = None @@ -195,6 +197,7 @@ class DeadlockSerializationTests(ConnectingTestCase): self.thread1_error = exc step1.set() conn.close() + def task2(): try: conn = self.connect() @@ -221,7 +224,7 @@ class DeadlockSerializationTests(ConnectingTestCase): self.assertFalse(self.thread1_error and self.thread2_error) error = self.thread1_error or self.thread2_error self.assertTrue(isinstance( - error, psycopg2.extensions.TransactionRollbackError)) + error, psycopg2.extensions.TransactionRollbackError)) class QueryCancellationTests(ConnectingTestCase): diff --git a/tests/test_types_basic.py b/tests/test_types_basic.py index f786c2a5..b43ea533 100755 --- a/tests/test_types_basic.py +++ b/tests/test_types_basic.py @@ -68,13 +68,16 @@ class TypesBasicTests(ConnectingTestCase): "wrong decimal quoting: " + str(s)) s = self.execute("SELECT %s AS foo", (decimal.Decimal("NaN"),)) self.failUnless(str(s) == "NaN", "wrong decimal quoting: " + str(s)) - self.failUnless(type(s) == decimal.Decimal, "wrong decimal conversion: " + repr(s)) + self.failUnless(type(s) == decimal.Decimal, + "wrong decimal conversion: " + repr(s)) s = self.execute("SELECT %s AS foo", (decimal.Decimal("infinity"),)) self.failUnless(str(s) == "NaN", "wrong decimal quoting: " + str(s)) - self.failUnless(type(s) == decimal.Decimal, "wrong decimal conversion: " + repr(s)) + self.failUnless(type(s) == decimal.Decimal, + "wrong decimal conversion: " + repr(s)) s = self.execute("SELECT %s AS foo", (decimal.Decimal("-infinity"),)) self.failUnless(str(s) == "NaN", "wrong decimal quoting: " + str(s)) - self.failUnless(type(s) == decimal.Decimal, "wrong decimal conversion: " + repr(s)) + self.failUnless(type(s) == decimal.Decimal, + "wrong decimal conversion: " + repr(s)) def testFloatNan(self): try: @@ -141,8 +144,8 @@ class TypesBasicTests(ConnectingTestCase): self.assertEqual(s, buf2.tobytes()) def testArray(self): - s = self.execute("SELECT %s AS foo", ([[1,2],[3,4]],)) - self.failUnlessEqual(s, [[1,2],[3,4]]) + s = self.execute("SELECT %s AS foo", ([[1, 2], [3, 4]],)) + self.failUnlessEqual(s, [[1, 2], [3, 4]]) s = self.execute("SELECT %s AS foo", (['one', 'two', 'three'],)) self.failUnlessEqual(s, ['one', 'two', 'three']) @@ -150,9 +153,12 @@ class TypesBasicTests(ConnectingTestCase): # ticket #42 import datetime curs = self.conn.cursor() - curs.execute("create table array_test (id integer, col timestamp without time zone[])") + curs.execute( + "create table array_test " + "(id integer, col timestamp without time zone[])") - curs.execute("insert into array_test values (%s, %s)", (1, [datetime.date(2011,2,14)])) + curs.execute("insert into array_test values (%s, %s)", + (1, [datetime.date(2011, 2, 14)])) curs.execute("select col from array_test where id = 1") self.assertEqual(curs.fetchone()[0], [datetime.datetime(2011, 2, 14, 0, 0)]) @@ -208,9 +214,9 @@ class TypesBasicTests(ConnectingTestCase): curs.execute("insert into na (texta) values (%s)", ([None],)) curs.execute("insert into na (texta) values (%s)", (['a', None],)) curs.execute("insert into na (texta) values (%s)", ([None, None],)) - curs.execute("insert into na (inta) values (%s)", ([None],)) - curs.execute("insert into na (inta) values (%s)", ([42, None],)) - curs.execute("insert into na (inta) values (%s)", ([None, None],)) + curs.execute("insert into na (inta) values (%s)", ([None],)) + curs.execute("insert into na (inta) values (%s)", ([42, None],)) + curs.execute("insert into na (inta) values (%s)", ([None, None],)) curs.execute("insert into na (boola) values (%s)", ([None],)) curs.execute("insert into na (boola) values (%s)", ([True, None],)) curs.execute("insert into na (boola) values (%s)", ([None, None],)) @@ -220,7 +226,7 @@ class TypesBasicTests(ConnectingTestCase): curs.execute("insert into na (textaa) values (%s)", ([['a', None]],)) # curs.execute("insert into na (textaa) values (%s)", ([[None, None]],)) # curs.execute("insert into na (intaa) values (%s)", ([[None]],)) - curs.execute("insert into na (intaa) values (%s)", ([[42, None]],)) + curs.execute("insert into na (intaa) values (%s)", ([[42, None]],)) # curs.execute("insert into na (intaa) values (%s)", ([[None, None]],)) # curs.execute("insert into na (boolaa) values (%s)", ([[None]],)) curs.execute("insert into na (boolaa) values (%s)", ([[True, None]],)) @@ -323,30 +329,33 @@ class TypesBasicTests(ConnectingTestCase): self.assertEqual(1, l1) def testGenericArray(self): - a = self.execute("select '{1,2,3}'::int4[]") - self.assertEqual(a, [1,2,3]) - a = self.execute("select array['a','b','''']::text[]") - self.assertEqual(a, ['a','b',"'"]) + a = self.execute("select '{1, 2, 3}'::int4[]") + self.assertEqual(a, [1, 2, 3]) + a = self.execute("select array['a', 'b', '''']::text[]") + self.assertEqual(a, ['a', 'b', "'"]) @testutils.skip_before_postgres(8, 2) def testGenericArrayNull(self): def caster(s, cur): - if s is None: return "nada" + if s is None: + return "nada" return int(s) * 2 base = psycopg2.extensions.new_type((23,), "INT4", caster) array = psycopg2.extensions.new_array_type((1007,), "INT4ARRAY", base) psycopg2.extensions.register_type(array, self.conn) - a = self.execute("select '{1,2,3}'::int4[]") - self.assertEqual(a, [2,4,6]) - a = self.execute("select '{1,2,NULL}'::int4[]") - self.assertEqual(a, [2,4,'nada']) + a = self.execute("select '{1, 2, 3}'::int4[]") + self.assertEqual(a, [2, 4, 6]) + a = self.execute("select '{1, 2, NULL}'::int4[]") + self.assertEqual(a, [2, 4, 'nada']) class AdaptSubclassTest(unittest.TestCase): def test_adapt_subtype(self): from psycopg2.extensions import adapt - class Sub(str): pass + + class Sub(str): + pass s1 = "hel'lo" s2 = Sub(s1) self.assertEqual(adapt(s1).getquoted(), adapt(s2).getquoted()) @@ -354,9 +363,14 @@ class AdaptSubclassTest(unittest.TestCase): def test_adapt_most_specific(self): from psycopg2.extensions import adapt, register_adapter, AsIs - class A(object): pass - class B(A): pass - class C(B): pass + class A(object): + pass + + class B(A): + pass + + class C(B): + pass register_adapter(A, lambda a: AsIs("a")) register_adapter(B, lambda b: AsIs("b")) @@ -370,8 +384,11 @@ class AdaptSubclassTest(unittest.TestCase): def test_no_mro_no_joy(self): from psycopg2.extensions import adapt, register_adapter, AsIs - class A: pass - class B(A): pass + class A: + pass + + class B(A): + pass register_adapter(A, lambda a: AsIs("a")) try: @@ -383,8 +400,11 @@ class AdaptSubclassTest(unittest.TestCase): def test_adapt_subtype_3(self): from psycopg2.extensions import adapt, register_adapter, AsIs - class A: pass - class B(A): pass + class A: + pass + + class B(A): + pass register_adapter(A, lambda a: AsIs("a")) try: @@ -443,7 +463,8 @@ class ByteaParserTest(unittest.TestCase): def test_full_hex(self, upper=False): buf = ''.join(("%02x" % i) for i in range(256)) - if upper: buf = buf.upper() + if upper: + buf = buf.upper() buf = '\\x' + buf rv = self.cast(buf.encode('utf8')) if sys.version_info[0] < 3: diff --git a/tests/test_types_extras.py b/tests/test_types_extras.py index 8bb6dae2..8e615616 100755 --- a/tests/test_types_extras.py +++ b/tests/test_types_extras.py @@ -37,6 +37,7 @@ def filter_scs(conn, s): else: return s.replace(b"E'", b"'") + class TypesExtrasTests(ConnectingTestCase): """Test that all type conversions are working.""" @@ -60,7 +61,8 @@ class TypesExtrasTests(ConnectingTestCase): def testUUIDARRAY(self): import uuid psycopg2.extras.register_uuid() - u = [uuid.UUID('9c6d5a77-7256-457e-9461-347b4358e350'), uuid.UUID('9c6d5a77-7256-457e-9461-347b4358e352')] + u = [uuid.UUID('9c6d5a77-7256-457e-9461-347b4358e350'), + uuid.UUID('9c6d5a77-7256-457e-9461-347b4358e352')] s = self.execute("SELECT %s AS foo", (u,)) self.failUnless(u == s) # array with a NULL element @@ -110,7 +112,8 @@ class TypesExtrasTests(ConnectingTestCase): a.getquoted()) def test_adapt_fail(self): - class Foo(object): pass + class Foo(object): + pass self.assertRaises(psycopg2.ProgrammingError, psycopg2.extensions.adapt, Foo(), ext.ISQLQuote, None) try: @@ -130,6 +133,7 @@ def skip_if_no_hstore(f): return skip_if_no_hstore_ + class HstoreTestCase(ConnectingTestCase): def test_adapt_8(self): if self.conn.server_version >= 90000: @@ -155,7 +159,8 @@ class HstoreTestCase(ConnectingTestCase): self.assertEqual(ii[2], filter_scs(self.conn, b"(E'c' => NULL)")) if 'd' in o: encc = u'\xe0'.encode(psycopg2.extensions.encodings[self.conn.encoding]) - self.assertEqual(ii[3], filter_scs(self.conn, b"(E'd' => E'" + encc + b"')")) + self.assertEqual(ii[3], + filter_scs(self.conn, b"(E'd' => E'" + encc + b"')")) def test_adapt_9(self): if self.conn.server_version < 90000: @@ -199,7 +204,7 @@ class HstoreTestCase(ConnectingTestCase): ok(None, None) ok('', {}) ok('"a"=>"1", "b"=>"2"', {'a': '1', 'b': '2'}) - ok('"a" => "1" ,"b" => "2"', {'a': '1', 'b': '2'}) + ok('"a" => "1" , "b" => "2"', {'a': '1', 'b': '2'}) ok('"a"=>NULL, "b"=>"2"', {'a': None, 'b': '2'}) ok(r'"a"=>"\"", "\""=>"2"', {'a': '"', '"': '2'}) ok('"a"=>"\'", "\'"=>"2"', {'a': "'", "'": '2'}) @@ -402,7 +407,9 @@ class HstoreTestCase(ConnectingTestCase): from psycopg2.extras import register_hstore register_hstore(None, globally=True, oid=oid, array_oid=aoid) try: - cur.execute("select null::hstore, ''::hstore, 'a => b'::hstore, '{a=>b}'::hstore[]") + cur.execute(""" + select null::hstore, ''::hstore, + 'a => b'::hstore, '{a=>b}'::hstore[]""") t = cur.fetchone() self.assert_(t[0] is None) self.assertEqual(t[1], {}) @@ -449,6 +456,7 @@ def skip_if_no_composite(f): return skip_if_no_composite_ + class AdaptTypeTestCase(ConnectingTestCase): @skip_if_no_composite def test_none_in_record(self): @@ -463,8 +471,11 @@ class AdaptTypeTestCase(ConnectingTestCase): # the None adapter is not actually invoked in regular adaptation class WonkyAdapter(object): - def __init__(self, obj): pass - def getquoted(self): return "NOPE!" + def __init__(self, obj): + pass + + def getquoted(self): + return "NOPE!" curs = self.conn.cursor() @@ -481,6 +492,7 @@ class AdaptTypeTestCase(ConnectingTestCase): def test_tokenization(self): from psycopg2.extras import CompositeCaster + def ok(s, v): self.assertEqual(CompositeCaster.tokenize(s), v) @@ -519,26 +531,26 @@ class AdaptTypeTestCase(ConnectingTestCase): self.assertEqual(t.oid, oid) self.assert_(issubclass(t.type, tuple)) self.assertEqual(t.attnames, ['anint', 'astring', 'adate']) - self.assertEqual(t.atttypes, [23,25,1082]) + self.assertEqual(t.atttypes, [23, 25, 1082]) curs = self.conn.cursor() - r = (10, 'hello', date(2011,1,2)) + r = (10, 'hello', date(2011, 1, 2)) curs.execute("select %s::type_isd;", (r,)) v = curs.fetchone()[0] self.assert_(isinstance(v, t.type)) self.assertEqual(v[0], 10) self.assertEqual(v[1], "hello") - self.assertEqual(v[2], date(2011,1,2)) + self.assertEqual(v[2], date(2011, 1, 2)) try: - from collections import namedtuple + from collections import namedtuple # noqa except ImportError: pass else: self.assert_(t.type is not tuple) self.assertEqual(v.anint, 10) self.assertEqual(v.astring, "hello") - self.assertEqual(v.adate, date(2011,1,2)) + self.assertEqual(v.adate, date(2011, 1, 2)) @skip_if_no_composite def test_empty_string(self): @@ -574,14 +586,14 @@ class AdaptTypeTestCase(ConnectingTestCase): psycopg2.extras.register_composite("type_r_ft", self.conn) curs = self.conn.cursor() - r = (0.25, (date(2011,1,2), (42, "hello"))) + r = (0.25, (date(2011, 1, 2), (42, "hello"))) curs.execute("select %s::type_r_ft;", (r,)) v = curs.fetchone()[0] self.assertEqual(r, v) try: - from collections import namedtuple + from collections import namedtuple # noqa except ImportError: pass else: @@ -595,7 +607,7 @@ class AdaptTypeTestCase(ConnectingTestCase): curs2 = self.conn.cursor() psycopg2.extras.register_composite("type_ii", curs1) curs1.execute("select (1,2)::type_ii") - self.assertEqual(curs1.fetchone()[0], (1,2)) + self.assertEqual(curs1.fetchone()[0], (1, 2)) curs2.execute("select (1,2)::type_ii") self.assertEqual(curs2.fetchone()[0], "(1,2)") @@ -610,7 +622,7 @@ class AdaptTypeTestCase(ConnectingTestCase): curs1 = conn1.cursor() curs2 = conn2.cursor() curs1.execute("select (1,2)::type_ii") - self.assertEqual(curs1.fetchone()[0], (1,2)) + self.assertEqual(curs1.fetchone()[0], (1, 2)) curs2.execute("select (1,2)::type_ii") self.assertEqual(curs2.fetchone()[0], "(1,2)") finally: @@ -629,9 +641,9 @@ class AdaptTypeTestCase(ConnectingTestCase): curs1 = conn1.cursor() curs2 = conn2.cursor() curs1.execute("select (1,2)::type_ii") - self.assertEqual(curs1.fetchone()[0], (1,2)) + self.assertEqual(curs1.fetchone()[0], (1, 2)) curs2.execute("select (1,2)::type_ii") - self.assertEqual(curs2.fetchone()[0], (1,2)) + self.assertEqual(curs2.fetchone()[0], (1, 2)) finally: # drop the registered typecasters to help the refcounting # script to return precise values. @@ -661,30 +673,30 @@ class AdaptTypeTestCase(ConnectingTestCase): "typens.typens_ii", self.conn) self.assertEqual(t.schema, 'typens') curs.execute("select (4,8)::typens.typens_ii") - self.assertEqual(curs.fetchone()[0], (4,8)) + self.assertEqual(curs.fetchone()[0], (4, 8)) @skip_if_no_composite @skip_before_postgres(8, 4) def test_composite_array(self): - oid = self._create_type("type_isd", + self._create_type("type_isd", [('anint', 'integer'), ('astring', 'text'), ('adate', 'date')]) t = psycopg2.extras.register_composite("type_isd", self.conn) curs = self.conn.cursor() - r1 = (10, 'hello', date(2011,1,2)) - r2 = (20, 'world', date(2011,1,3)) + r1 = (10, 'hello', date(2011, 1, 2)) + r2 = (20, 'world', date(2011, 1, 3)) curs.execute("select %s::type_isd[];", ([r1, r2],)) v = curs.fetchone()[0] self.assertEqual(len(v), 2) self.assert_(isinstance(v[0], t.type)) self.assertEqual(v[0][0], 10) self.assertEqual(v[0][1], "hello") - self.assertEqual(v[0][2], date(2011,1,2)) + self.assertEqual(v[0][2], date(2011, 1, 2)) self.assert_(isinstance(v[1], t.type)) self.assertEqual(v[1][0], 20) self.assertEqual(v[1][1], "world") - self.assertEqual(v[1][2], date(2011,1,3)) + self.assertEqual(v[1][2], date(2011, 1, 3)) @skip_if_no_composite def test_wrong_schema(self): @@ -752,7 +764,7 @@ class AdaptTypeTestCase(ConnectingTestCase): register_composite('type_ii', conn) curs = conn.cursor() curs.execute("select '(1,2)'::type_ii as x") - self.assertEqual(curs.fetchone()['x'], (1,2)) + self.assertEqual(curs.fetchone()['x'], (1, 2)) finally: conn.close() @@ -761,7 +773,7 @@ class AdaptTypeTestCase(ConnectingTestCase): curs = conn.cursor() register_composite('type_ii', conn) curs.execute("select '(1,2)'::type_ii as x") - self.assertEqual(curs.fetchone()['x'], (1,2)) + self.assertEqual(curs.fetchone()['x'], (1, 2)) finally: conn.close() @@ -782,13 +794,13 @@ class AdaptTypeTestCase(ConnectingTestCase): self.assertEqual(t.oid, oid) curs = self.conn.cursor() - r = (10, 'hello', date(2011,1,2)) + r = (10, 'hello', date(2011, 1, 2)) curs.execute("select %s::type_isd;", (r,)) v = curs.fetchone()[0] self.assert_(isinstance(v, dict)) self.assertEqual(v['anint'], 10) self.assertEqual(v['astring'], "hello") - self.assertEqual(v['adate'], date(2011,1,2)) + self.assertEqual(v['adate'], date(2011, 1, 2)) def _create_type(self, name, fields): curs = self.conn.cursor() @@ -825,6 +837,7 @@ def skip_if_json_module(f): return skip_if_json_module_ + def skip_if_no_json_module(f): """Skip a test if no Python json module is available""" @wraps(f) @@ -836,6 +849,7 @@ def skip_if_no_json_module(f): return skip_if_no_json_module_ + def skip_if_no_json_type(f): """Skip a test if PostgreSQL json type is not available""" @wraps(f) @@ -849,6 +863,7 @@ def skip_if_no_json_type(f): return skip_if_no_json_type_ + class JsonTestCase(ConnectingTestCase): @skip_if_json_module def test_module_not_available(self): @@ -858,6 +873,7 @@ class JsonTestCase(ConnectingTestCase): @skip_if_json_module def test_customizable_with_module_not_available(self): from psycopg2.extras import Json + class MyJson(Json): def dumps(self, obj): assert obj is None @@ -870,7 +886,7 @@ class JsonTestCase(ConnectingTestCase): from psycopg2.extras import json, Json objs = [None, "te'xt", 123, 123.45, - u'\xe0\u20ac', ['a', 100], {'a': 100} ] + u'\xe0\u20ac', ['a', 100], {'a': 100}] curs = self.conn.cursor() for obj in enumerate(objs): @@ -889,7 +905,9 @@ class JsonTestCase(ConnectingTestCase): curs = self.conn.cursor() obj = Decimal('123.45') - dumps = lambda obj: json.dumps(obj, cls=DecimalEncoder) + + def dumps(obj): + return json.dumps(obj, cls=DecimalEncoder) self.assertEqual(curs.mogrify("%s", (Json(obj, dumps=dumps),)), b"'123.45'") @@ -921,8 +939,7 @@ class JsonTestCase(ConnectingTestCase): obj = {'a': 123} self.assertEqual(curs.mogrify("%s", (obj,)), b"""'{"a": 123}'""") finally: - del psycopg2.extensions.adapters[dict, ext.ISQLQuote] - + del psycopg2.extensions.adapters[dict, ext.ISQLQuote] def test_type_not_available(self): curs = self.conn.cursor() @@ -982,7 +999,9 @@ class JsonTestCase(ConnectingTestCase): @skip_if_no_json_type def test_loads(self): json = psycopg2.extras.json - loads = lambda x: json.loads(x, parse_float=Decimal) + + def loads(s): + return json.loads(s, parse_float=Decimal) psycopg2.extras.register_json(self.conn, loads=loads) curs = self.conn.cursor() curs.execute("""select '{"a": 100.0, "b": null}'::json""") @@ -998,7 +1017,9 @@ class JsonTestCase(ConnectingTestCase): old = psycopg2.extensions.string_types.get(114) olda = psycopg2.extensions.string_types.get(199) - loads = lambda x: psycopg2.extras.json.loads(x, parse_float=Decimal) + + def loads(s): + return psycopg2.extras.json.loads(s, parse_float=Decimal) try: new, newa = psycopg2.extras.register_json( loads=loads, oid=oid, array_oid=array_oid) @@ -1020,7 +1041,8 @@ class JsonTestCase(ConnectingTestCase): def test_register_default(self): curs = self.conn.cursor() - loads = lambda x: psycopg2.extras.json.loads(x, parse_float=Decimal) + def loads(s): + return psycopg2.extras.json.loads(s, parse_float=Decimal) psycopg2.extras.register_default_json(curs, loads=loads) curs.execute("""select '{"a": 100.0, "b": null}'::json""") @@ -1070,6 +1092,7 @@ class JsonTestCase(ConnectingTestCase): def skip_if_no_jsonb_type(f): return skip_before_postgres(9, 4)(f) + class JsonbTestCase(ConnectingTestCase): @staticmethod def myloads(s): @@ -1118,7 +1141,10 @@ class JsonbTestCase(ConnectingTestCase): def test_loads(self): json = psycopg2.extras.json - loads = lambda x: json.loads(x, parse_float=Decimal) + + def loads(s): + return json.loads(s, parse_float=Decimal) + psycopg2.extras.register_json(self.conn, loads=loads, name='jsonb') curs = self.conn.cursor() curs.execute("""select '{"a": 100.0, "b": null}'::jsonb""") @@ -1134,7 +1160,9 @@ class JsonbTestCase(ConnectingTestCase): def test_register_default(self): curs = self.conn.cursor() - loads = lambda x: psycopg2.extras.json.loads(x, parse_float=Decimal) + def loads(s): + return psycopg2.extras.json.loads(s, parse_float=Decimal) + psycopg2.extras.register_default_jsonb(curs, loads=loads) curs.execute("""select '{"a": 100.0, "b": null}'::jsonb""") @@ -1200,7 +1228,7 @@ class RangeTestCase(unittest.TestCase): ('[)', True, False), ('(]', False, True), ('()', False, False), - ('[]', True, True),]: + ('[]', True, True)]: r = Range(10, 20, bounds) self.assertEqual(r.lower, 10) self.assertEqual(r.upper, 20) @@ -1294,11 +1322,11 @@ class RangeTestCase(unittest.TestCase): self.assert_(not Range(empty=True)) def test_eq_hash(self): - from psycopg2.extras import Range def assert_equal(r1, r2): self.assert_(r1 == r2) self.assert_(hash(r1) == hash(r2)) + from psycopg2.extras import Range assert_equal(Range(empty=True), Range(empty=True)) assert_equal(Range(), Range()) assert_equal(Range(10, None), Range(10, None)) @@ -1321,8 +1349,11 @@ class RangeTestCase(unittest.TestCase): def test_eq_subclass(self): from psycopg2.extras import Range, NumericRange - class IntRange(NumericRange): pass - class PositiveIntRange(IntRange): pass + class IntRange(NumericRange): + pass + + class PositiveIntRange(IntRange): + pass self.assertEqual(Range(10, 20), IntRange(10, 20)) self.assertEqual(PositiveIntRange(10, 20), IntRange(10, 20)) @@ -1480,8 +1511,8 @@ class RangeCasterTestCase(ConnectingTestCase): r = cur.fetchone()[0] self.assert_(isinstance(r, DateRange)) self.assert_(not r.isempty) - self.assertEqual(r.lower, date(2000,1,2)) - self.assertEqual(r.upper, date(2012,12,31)) + self.assertEqual(r.lower, date(2000, 1, 2)) + self.assertEqual(r.upper, date(2012, 12, 31)) self.assert_(not r.lower_inf) self.assert_(not r.upper_inf) self.assert_(r.lower_inc) @@ -1490,8 +1521,8 @@ class RangeCasterTestCase(ConnectingTestCase): def test_cast_timestamp(self): from psycopg2.extras import DateTimeRange cur = self.conn.cursor() - ts1 = datetime(2000,1,1) - ts2 = datetime(2000,12,31,23,59,59,999) + ts1 = datetime(2000, 1, 1) + ts2 = datetime(2000, 12, 31, 23, 59, 59, 999) cur.execute("select tsrange(%s, %s, '()')", (ts1, ts2)) r = cur.fetchone()[0] self.assert_(isinstance(r, DateTimeRange)) @@ -1507,8 +1538,9 @@ class RangeCasterTestCase(ConnectingTestCase): from psycopg2.extras import DateTimeTZRange from psycopg2.tz import FixedOffsetTimezone cur = self.conn.cursor() - ts1 = datetime(2000,1,1, tzinfo=FixedOffsetTimezone(600)) - ts2 = datetime(2000,12,31,23,59,59,999, tzinfo=FixedOffsetTimezone(600)) + ts1 = datetime(2000, 1, 1, tzinfo=FixedOffsetTimezone(600)) + ts2 = datetime(2000, 12, 31, 23, 59, 59, 999, + tzinfo=FixedOffsetTimezone(600)) cur.execute("select tstzrange(%s, %s, '[]')", (ts1, ts2)) r = cur.fetchone()[0] self.assert_(isinstance(r, DateTimeTZRange)) @@ -1598,8 +1630,9 @@ class RangeCasterTestCase(ConnectingTestCase): self.assert_(isinstance(r1, DateTimeRange)) self.assert_(r1.isempty) - ts1 = datetime(2000,1,1, tzinfo=FixedOffsetTimezone(600)) - ts2 = datetime(2000,12,31,23,59,59,999, tzinfo=FixedOffsetTimezone(600)) + ts1 = datetime(2000, 1, 1, tzinfo=FixedOffsetTimezone(600)) + ts2 = datetime(2000, 12, 31, 23, 59, 59, 999, + tzinfo=FixedOffsetTimezone(600)) r = DateTimeTZRange(ts1, ts2, '(]') cur.execute("select %s", (r,)) r1 = cur.fetchone()[0] @@ -1627,7 +1660,7 @@ class RangeCasterTestCase(ConnectingTestCase): self.assert_(not r1.lower_inc) self.assert_(r1.upper_inc) - cur.execute("select %s", ([r,r,r],)) + cur.execute("select %s", ([r, r, r],)) rs = cur.fetchone()[0] self.assertEqual(len(rs), 3) for r1 in rs: @@ -1651,12 +1684,12 @@ class RangeCasterTestCase(ConnectingTestCase): id integer primary key, range textrange)""") - bounds = [ '[)', '(]', '()', '[]' ] - ranges = [ TextRange(low, up, bounds[i % 4]) + bounds = ['[)', '(]', '()', '[]'] + ranges = [TextRange(low, up, bounds[i % 4]) for i, (low, up) in enumerate(zip( [None] + map(chr, range(1, 128)), - map(chr, range(1,128)) + [None], - ))] + map(chr, range(1, 128)) + [None], + ))] ranges.append(TextRange()) ranges.append(TextRange(empty=True)) @@ -1736,6 +1769,6 @@ decorate_all_tests(RangeCasterTestCase, skip_if_no_range) def test_suite(): return unittest.TestLoader().loadTestsFromName(__name__) + if __name__ == "__main__": unittest.main() - diff --git a/tests/test_with.py b/tests/test_with.py index 2f018fc8..9d91b51e 100755 --- a/tests/test_with.py +++ b/tests/test_with.py @@ -30,6 +30,7 @@ import psycopg2.extensions as ext from testutils import unittest, ConnectingTestCase + class WithTestCase(ConnectingTestCase): def setUp(self): ConnectingTestCase.setUp(self) @@ -93,7 +94,7 @@ class WithConnectionTestCase(WithTestCase): with self.conn as conn: curs = conn.cursor() curs.execute("insert into test_with values (3)") - 1/0 + 1 / 0 self.assertRaises(ZeroDivisionError, f) self.assertEqual(self.conn.status, ext.STATUS_READY) @@ -113,6 +114,7 @@ class WithConnectionTestCase(WithTestCase): def test_subclass_commit(self): commits = [] + class MyConn(ext.connection): def commit(self): commits.append(None) @@ -131,6 +133,7 @@ class WithConnectionTestCase(WithTestCase): def test_subclass_rollback(self): rollbacks = [] + class MyConn(ext.connection): def rollback(self): rollbacks.append(None) @@ -140,7 +143,7 @@ class WithConnectionTestCase(WithTestCase): with self.connect(connection_factory=MyConn) as conn: curs = conn.cursor() curs.execute("insert into test_with values (11)") - 1/0 + 1 / 0 except ZeroDivisionError: pass else: @@ -175,7 +178,7 @@ class WithCursorTestCase(WithTestCase): with self.conn as conn: with conn.cursor() as curs: curs.execute("insert into test_with values (5)") - 1/0 + 1 / 0 except ZeroDivisionError: pass @@ -189,6 +192,7 @@ class WithCursorTestCase(WithTestCase): def test_subclass(self): closes = [] + class MyCurs(ext.cursor): def close(self): closes.append(None) diff --git a/tests/testutils.py b/tests/testutils.py index 1d1ad054..d0a34bcf 100644 --- a/tests/testutils.py +++ b/tests/testutils.py @@ -69,8 +69,8 @@ else: # Silence warnings caused by the stubbornness of the Python unittest # maintainers # http://bugs.python.org/issue9424 -if not hasattr(unittest.TestCase, 'assert_') \ -or unittest.TestCase.assert_ is not unittest.TestCase.assertTrue: +if (not hasattr(unittest.TestCase, 'assert_') + or unittest.TestCase.assert_ is not unittest.TestCase.assertTrue): # mavaff... unittest.TestCase.assert_ = unittest.TestCase.assertTrue unittest.TestCase.failUnless = unittest.TestCase.assertTrue @@ -175,7 +175,7 @@ def skip_if_no_uuid(f): @wraps(f) def skip_if_no_uuid_(self): try: - import uuid + import uuid # noqa except ImportError: return self.skipTest("uuid not available in this Python version") @@ -223,7 +223,7 @@ def skip_if_no_namedtuple(f): @wraps(f) def skip_if_no_namedtuple_(self): try: - from collections import namedtuple + from collections import namedtuple # noqa except ImportError: return self.skipTest("collections.namedtuple not available") else: @@ -237,7 +237,7 @@ def skip_if_no_iobase(f): @wraps(f) def skip_if_no_iobase_(self): try: - from io import TextIOBase + from io import TextIOBase # noqa except ImportError: return self.skipTest("io.TextIOBase not found.") else: @@ -249,6 +249,7 @@ def skip_if_no_iobase(f): def skip_before_postgres(*ver): """Skip a test on PostgreSQL before a certain version.""" ver = ver + (0,) * (3 - len(ver)) + def skip_before_postgres_(f): @wraps(f) def skip_before_postgres__(self): @@ -261,9 +262,11 @@ def skip_before_postgres(*ver): return skip_before_postgres__ return skip_before_postgres_ + def skip_after_postgres(*ver): """Skip a test on PostgreSQL after (including) a certain version.""" ver = ver + (0,) * (3 - len(ver)) + def skip_after_postgres_(f): @wraps(f) def skip_after_postgres__(self): @@ -276,6 +279,7 @@ def skip_after_postgres(*ver): return skip_after_postgres__ return skip_after_postgres_ + def libpq_version(): import psycopg2 v = psycopg2.__libpq_version__ @@ -283,9 +287,11 @@ def libpq_version(): v = psycopg2.extensions.libpq_version() return v + def skip_before_libpq(*ver): """Skip a test if libpq we're linked to is older than a certain version.""" ver = ver + (0,) * (3 - len(ver)) + def skip_before_libpq_(f): @wraps(f) def skip_before_libpq__(self): @@ -298,9 +304,11 @@ def skip_before_libpq(*ver): return skip_before_libpq__ return skip_before_libpq_ + def skip_after_libpq(*ver): """Skip a test if libpq we're linked to is newer than a certain version.""" ver = ver + (0,) * (3 - len(ver)) + def skip_after_libpq_(f): @wraps(f) def skip_after_libpq__(self): @@ -313,6 +321,7 @@ def skip_after_libpq(*ver): return skip_after_libpq__ return skip_after_libpq_ + def skip_before_python(*ver): """Skip a test on Python before a certain version.""" def skip_before_python_(f): @@ -327,6 +336,7 @@ def skip_before_python(*ver): return skip_before_python__ return skip_before_python_ + def skip_from_python(*ver): """Skip a test on Python after (including) a certain version.""" def skip_from_python_(f): @@ -341,6 +351,7 @@ def skip_from_python(*ver): return skip_from_python__ return skip_from_python_ + def skip_if_no_superuser(f): """Skip a test if the database user running the test is not a superuser""" @wraps(f) @@ -357,6 +368,7 @@ def skip_if_no_superuser(f): return skip_if_no_superuser_ + def skip_if_green(reason): def skip_if_green_(f): @wraps(f) @@ -372,6 +384,7 @@ def skip_if_green(reason): skip_copy_if_green = skip_if_green("copy in async mode currently not supported") + def skip_if_no_getrefcount(f): @wraps(f) def skip_if_no_getrefcount_(self): @@ -381,6 +394,7 @@ def skip_if_no_getrefcount(f): return f(self) return skip_if_no_getrefcount_ + def skip_if_windows(f): """Skip a test if run on windows""" @wraps(f) @@ -419,6 +433,7 @@ def script_to_py3(script): f2.close() os.remove(filename) + class py3_raises_typeerror(object): def __enter__(self): diff --git a/tox.ini b/tox.ini index f27f3f15..4a1129d5 100644 --- a/tox.ini +++ b/tox.ini @@ -8,3 +8,8 @@ envlist = py26, py27 [testenv] commands = make check + +[flake8] +max-line-length = 85 +ignore = E128, W503 +exclude = build, doc, sandbox, examples, tests/dbapi20.py From 05627ac0f9e519c52014185b48a782f8669e6843 Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Tue, 11 Oct 2016 00:22:23 +0100 Subject: [PATCH 127/151] Fix unicode mogrify test on python 2 --- tests/test_cursor.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/tests/test_cursor.py b/tests/test_cursor.py index 4fab2c4c..4aae6b2f 100755 --- a/tests/test_cursor.py +++ b/tests/test_cursor.py @@ -70,21 +70,27 @@ class CursorTests(ConnectingTestCase): conn.set_client_encoding('UTF8') snowman = u"\u2603" + def b(s): + if isinstance(s, unicode): + return s.encode('utf8') + else: + return s + # unicode query with non-ascii data cur.execute(u"SELECT '%s';" % snowman) - self.assertEqual(snowman.encode('utf8'), cur.fetchone()[0].encode('utf8')) + self.assertEqual(snowman.encode('utf8'), b(cur.fetchone()[0])) self.assertEqual(("SELECT '%s';" % snowman).encode('utf8'), cur.mogrify(u"SELECT '%s';" % snowman).replace(b"E'", b"'")) # unicode args cur.execute("SELECT %s;", (snowman,)) - self.assertEqual(snowman.encode("utf-8"), cur.fetchone()[0].encode('utf8')) + self.assertEqual(snowman.encode("utf-8"), b(cur.fetchone()[0])) self.assertEqual(("SELECT '%s';" % snowman).encode('utf8'), cur.mogrify("SELECT %s;", (snowman,)).replace(b"E'", b"'")) # unicode query and args cur.execute(u"SELECT %s;", (snowman,)) - self.assertEqual(snowman.encode("utf-8"), cur.fetchone()[0].encode('utf8')) + self.assertEqual(snowman.encode("utf-8"), b(cur.fetchone()[0])) self.assertEqual(("SELECT '%s';" % snowman).encode('utf8'), cur.mogrify(u"SELECT %s;", (snowman,)).replace(b"E'", b"'")) From 86198c1c21e959030cf5710a6d4d2fcb3337596b Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Tue, 11 Oct 2016 01:33:04 +0100 Subject: [PATCH 128/151] inet adapters deprecated Close #343 --- NEWS | 1 + doc/src/extras.rst | 5 +++-- lib/extras.py | 5 +++++ 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/NEWS b/NEWS index 6ada19c0..4921973e 100644 --- a/NEWS +++ b/NEWS @@ -19,6 +19,7 @@ New features: - The attributes `~connection.notices` and `~connection.notifies` can be customized replacing them with any object exposing an `!append()` method (:ticket:`#326`). +- old ``inet`` adapters deprecated (:ticket:`#343`). - Added `~psycopg2.extensions.quote_ident()` function (:ticket:`#359`). - Added `~connection.get_dsn_parameters()` connection method (:ticket:`#364`). diff --git a/doc/src/extras.rst b/doc/src/extras.rst index 78e96efe..3a3d232f 100644 --- a/doc/src/extras.rst +++ b/doc/src/extras.rst @@ -934,8 +934,9 @@ UUID data type :sql:`inet` data type ^^^^^^^^^^^^^^^^^^^^^^ -.. versionadded:: 2.0.9 -.. versionchanged:: 2.4.5 added inet array support. +.. deprecated:: 2.7 + these objects will not receive further development and disappear in future + versions .. doctest:: diff --git a/lib/extras.py b/lib/extras.py index fe74d386..a9f5b447 100644 --- a/lib/extras.py +++ b/lib/extras.py @@ -686,6 +686,11 @@ def register_inet(oid=None, conn_or_curs=None): :param conn_or_curs: where to register the typecaster. If not specified, register it globally. """ + import warnings + warnings.warn( + "the inet adapter is deprecated, it's not very useful", + DeprecationWarning) + if not oid: oid1 = 869 oid2 = 1041 From 706ad2f177d5e3250643765446e66b82e9849648 Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Tue, 11 Oct 2016 02:31:45 +0100 Subject: [PATCH 129/151] Conver network array types into array of strings by default --- NEWS | 3 ++- doc/src/extras.rst | 24 ++++++++++++++++++------ psycopg/typecast_builtins.c | 6 ++++++ tests/test_types_basic.py | 10 ++++++++++ 4 files changed, 36 insertions(+), 7 deletions(-) diff --git a/NEWS b/NEWS index 4921973e..a102e523 100644 --- a/NEWS +++ b/NEWS @@ -19,7 +19,8 @@ New features: - The attributes `~connection.notices` and `~connection.notifies` can be customized replacing them with any object exposing an `!append()` method (:ticket:`#326`). -- old ``inet`` adapters deprecated (:ticket:`#343`). +- old ``inet`` adapters deprecated, but arrays of network types converted + to lists by default (:tickets:`#343, #387`). - Added `~psycopg2.extensions.quote_ident()` function (:ticket:`#359`). - Added `~connection.get_dsn_parameters()` connection method (:ticket:`#364`). diff --git a/doc/src/extras.rst b/doc/src/extras.rst index 3a3d232f..cf871afe 100644 --- a/doc/src/extras.rst +++ b/doc/src/extras.rst @@ -930,13 +930,20 @@ UUID data type .. index:: pair: INET; Data types + pair: CIDR; Data types + pair: MACADDR; Data types -:sql:`inet` data type -^^^^^^^^^^^^^^^^^^^^^^ +Networking data type +^^^^^^^^^^^^^^^^^^^^ -.. deprecated:: 2.7 - these objects will not receive further development and disappear in future - versions +Psycopg casts the PostgreSQL networking data types (:sql:`inet`, :sql:`cidr`, +:sql:`macaddr`) into ordinary strings. However their array are detected as +arrays and directly cast into lists. + +.. versionchanged:: 2.7 + in previous version array of networking types were not treated as arrays + +.. autofunction:: register_inet .. doctest:: @@ -950,11 +957,16 @@ UUID data type >>> cur.fetchone()[0].addr '192.168.0.1/24' +.. deprecated:: 2.7 + this function will not receive further development and disappear in future + versions -.. autofunction:: register_inet .. autoclass:: Inet +.. deprecated:: 2.7 + this object will not receive further development and disappear in future + versions .. index:: diff --git a/psycopg/typecast_builtins.c b/psycopg/typecast_builtins.c index a104b7c4..fa548a73 100644 --- a/psycopg/typecast_builtins.c +++ b/psycopg/typecast_builtins.c @@ -25,6 +25,9 @@ static long int typecast_DATEARRAY_types[] = {1182, 0}; static long int typecast_INTERVALARRAY_types[] = {1187, 0}; static long int typecast_BINARYARRAY_types[] = {1001, 0}; static long int typecast_ROWIDARRAY_types[] = {1028, 1013, 0}; +static long int typecast_INETARRAY_types[] = {1041, 0}; +static long int typecast_CIDRARRAY_types[] = {651, 0}; +static long int typecast_MACADDRARRAY_types[] = {1040, 0}; static long int typecast_UNKNOWN_types[] = {705, 0}; @@ -57,6 +60,9 @@ static typecastObject_initlist typecast_builtins[] = { {"BINARYARRAY", typecast_BINARYARRAY_types, typecast_BINARYARRAY_cast, "BINARY"}, {"ROWIDARRAY", typecast_ROWIDARRAY_types, typecast_ROWIDARRAY_cast, "ROWID"}, {"UNKNOWN", typecast_UNKNOWN_types, typecast_UNKNOWN_cast, NULL}, + {"INETARRAY", typecast_INETARRAY_types, typecast_STRINGARRAY_cast, "STRING"}, + {"CIDRARRAY", typecast_CIDRARRAY_types, typecast_STRINGARRAY_cast, "STRING"}, + {"MACADDRARRAY", typecast_MACADDRARRAY_types, typecast_STRINGARRAY_cast, "STRING"}, {NULL, NULL, NULL, NULL} }; diff --git a/tests/test_types_basic.py b/tests/test_types_basic.py index b43ea533..bee23d53 100755 --- a/tests/test_types_basic.py +++ b/tests/test_types_basic.py @@ -349,6 +349,16 @@ class TypesBasicTests(ConnectingTestCase): a = self.execute("select '{1, 2, NULL}'::int4[]") self.assertEqual(a, [2, 4, 'nada']) + @testutils.skip_before_postgres(8, 2) + def testNetworkArray(self): + # we don't know these types, but we know their arrays + a = self.execute("select '{192.168.0.1/24}'::inet[]") + self.assertEqual(a, ['192.168.0.1/24']) + a = self.execute("select '{192.168.0.0/24}'::cidr[]") + self.assertEqual(a, ['192.168.0.0/24']) + a = self.execute("select '{10:20:30:40:50:60}'::macaddr[]") + self.assertEqual(a, ['10:20:30:40:50:60']) + class AdaptSubclassTest(unittest.TestCase): def test_adapt_subtype(self): From 643ba70bad0f19a68c06ec95de2691c28e060e48 Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Tue, 11 Oct 2016 03:58:09 +0100 Subject: [PATCH 130/151] Added ipaddress objects conversion Close #387 --- NEWS | 5 +- doc/src/conf.py | 2 +- doc/src/extras.rst | 31 ++++++---- doc/src/usage.rst | 5 +- lib/_ipaddress.py | 89 +++++++++++++++++++++++++++ lib/extras.py | 4 ++ tests/__init__.py | 11 ++-- tests/test_ipaddress.py | 131 ++++++++++++++++++++++++++++++++++++++++ 8 files changed, 254 insertions(+), 24 deletions(-) create mode 100644 lib/_ipaddress.py create mode 100755 tests/test_ipaddress.py diff --git a/NEWS b/NEWS index a102e523..b7efef29 100644 --- a/NEWS +++ b/NEWS @@ -19,8 +19,9 @@ New features: - The attributes `~connection.notices` and `~connection.notifies` can be customized replacing them with any object exposing an `!append()` method (:ticket:`#326`). -- old ``inet`` adapters deprecated, but arrays of network types converted - to lists by default (:tickets:`#343, #387`). +- Adapt network types to `ipaddress` objects when available. When not + enabled, convert arrays of network types to lists by default. The old `!Inet` + adapter is deprecated (:tickets:`#317, #343, #387`). - Added `~psycopg2.extensions.quote_ident()` function (:ticket:`#359`). - Added `~connection.get_dsn_parameters()` connection method (:ticket:`#364`). diff --git a/doc/src/conf.py b/doc/src/conf.py index 94ffa349..22c5c46f 100644 --- a/doc/src/conf.py +++ b/doc/src/conf.py @@ -62,7 +62,7 @@ except ImportError: intersphinx_mapping = { 'py': ('http://docs.python.org/', None), - 'py3': ('http://docs.python.org/3.2', None), + 'py3': ('http://docs.python.org/3.4', None), } # Pattern to generate links to the bug tracker diff --git a/doc/src/extras.rst b/doc/src/extras.rst index cf871afe..d33b8eed 100644 --- a/doc/src/extras.rst +++ b/doc/src/extras.rst @@ -933,18 +933,27 @@ UUID data type pair: CIDR; Data types pair: MACADDR; Data types -Networking data type -^^^^^^^^^^^^^^^^^^^^ +.. _adapt-network: -Psycopg casts the PostgreSQL networking data types (:sql:`inet`, :sql:`cidr`, -:sql:`macaddr`) into ordinary strings. However their array are detected as -arrays and directly cast into lists. +Networking data types +^^^^^^^^^^^^^^^^^^^^^ + +By default Psycopg casts the PostgreSQL networking data types (:sql:`inet`, +:sql:`cidr`, :sql:`macaddr`) into ordinary strings; array of such types are +converted into lists of strings. .. versionchanged:: 2.7 - in previous version array of networking types were not treated as arrays + in previous version array of networking types were not treated as arrays. + +.. autofunction:: register_ipaddress + .. autofunction:: register_inet + .. deprecated:: 2.7 + this function will not receive further development and disappear in + future versions. + .. doctest:: >>> psycopg2.extras.register_inet() @@ -957,16 +966,12 @@ arrays and directly cast into lists. >>> cur.fetchone()[0].addr '192.168.0.1/24' -.. deprecated:: 2.7 - this function will not receive further development and disappear in future - versions - .. autoclass:: Inet -.. deprecated:: 2.7 - this object will not receive further development and disappear in future - versions + .. deprecated:: 2.7 + this object will not receive further development and may disappear in + future versions. .. index:: diff --git a/doc/src/usage.rst b/doc/src/usage.rst index 3b42aeb9..e768f372 100644 --- a/doc/src/usage.rst +++ b/doc/src/usage.rst @@ -264,7 +264,10 @@ types: +--------------------+-------------------------+--------------------------+ | Anything\ |tm| | :sql:`json` | :ref:`adapt-json` | +--------------------+-------------------------+--------------------------+ - | `uuid` | :sql:`uuid` | :ref:`adapt-uuid` | + | `~uuid.UUID` | :sql:`uuid` | :ref:`adapt-uuid` | + +--------------------+-------------------------+--------------------------+ + | `ipaddress` | | :sql:`inet` | :ref:`adapt-network` | + | objects | | :sql:`cidr` | | +--------------------+-------------------------+--------------------------+ .. |tm| unicode:: U+2122 diff --git a/lib/_ipaddress.py b/lib/_ipaddress.py new file mode 100644 index 00000000..ee05a260 --- /dev/null +++ b/lib/_ipaddress.py @@ -0,0 +1,89 @@ +"""Implementation of the ipaddres-based network types adaptation +""" + +# psycopg/_ipaddress.py - Ipaddres-based network types adaptation +# +# Copyright (C) 2016 Daniele Varrazzo +# +# psycopg2 is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# In addition, as a special exception, the copyright holders give +# permission to link this program with the OpenSSL library (or with +# modified versions of OpenSSL that use the same license as OpenSSL), +# and distribute linked combinations including the two. +# +# You must obey the GNU Lesser General Public License in all respects for +# all of the code used other than OpenSSL. +# +# psycopg2 is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +# License for more details. + +from psycopg2.extensions import ( + new_type, new_array_type, register_type, register_adapter, QuotedString) + +# The module is imported on register_ipaddress +ipaddress = None + +# The typecasters are created only once +_casters = None + + +def register_ipaddress(conn_or_curs=None): + """ + Register conversion support between `ipaddress` objects and `network types`__. + + :param conn_or_curs: the scope where to register the type casters. + If `!None` register them globally. + + After the function is called, PostgreSQL :sql:`inet` values will be + converted into `~ipaddress.IPv4Interface` or `~ipaddress.IPv6Interface` + objects, :sql:`cidr` values into into `~ipaddress.IPv4Network` or + `~ipaddress.IPv6Network`. + + .. __: https://www.postgresql.org/docs/current/static/datatype-net-types.html + """ + global ipaddress + import ipaddress + + global _casters + if _casters is None: + _casters = _make_casters() + + for c in _casters: + register_type(c, conn_or_curs) + + for t in [ipaddress.IPv4Interface, ipaddress.IPv6Interface, + ipaddress.IPv4Network, ipaddress.IPv6Network]: + register_adapter(t, adapt_ipaddress) + + +def _make_casters(): + inet = new_type((869,), 'INET', cast_interface) + ainet = new_array_type((1041,), 'INET[]', inet) + + cidr = new_type((650,), 'CIDR', cast_network) + acidr = new_array_type((651,), 'CIDR[]', cidr) + + return [inet, ainet, cidr, acidr] + + +def cast_interface(s, cur=None): + if s is None: + return None + # Py2 version force the use of unicode. meh. + return ipaddress.ip_interface(unicode(s)) + + +def cast_network(s, cur=None): + if s is None: + return None + return ipaddress.ip_network(unicode(s)) + + +def adapt_ipaddress(obj): + return QuotedString(str(obj)) diff --git a/lib/extras.py b/lib/extras.py index a9f5b447..5c4f5d2a 100644 --- a/lib/extras.py +++ b/lib/extras.py @@ -59,6 +59,10 @@ from psycopg2._range import ( # noqa register_range, RangeAdapter, RangeCaster) +# Expose ipaddress-related objects +from psycopg2._ipaddress import register_ipaddress # noqa + + class DictCursorBase(_cursor): """Base class for all dict-like cursors.""" diff --git a/tests/__init__.py b/tests/__init__.py index ada55276..1a240994 100755 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -38,6 +38,7 @@ import test_dates import test_errcodes import test_extras_dictcursor import test_green +import test_ipaddress import test_lobject import test_module import test_notify @@ -46,11 +47,7 @@ import test_quote import test_transaction import test_types_basic import test_types_extras - -if sys.version_info[:2] >= (2, 5): - import test_with -else: - test_with = None +import test_with def test_suite(): @@ -78,6 +75,7 @@ def test_suite(): suite.addTest(test_errcodes.test_suite()) suite.addTest(test_extras_dictcursor.test_suite()) suite.addTest(test_green.test_suite()) + suite.addTest(test_ipaddress.test_suite()) suite.addTest(test_lobject.test_suite()) suite.addTest(test_module.test_suite()) suite.addTest(test_notify.test_suite()) @@ -86,8 +84,7 @@ def test_suite(): suite.addTest(test_transaction.test_suite()) suite.addTest(test_types_basic.test_suite()) suite.addTest(test_types_extras.test_suite()) - if test_with: - suite.addTest(test_with.test_suite()) + suite.addTest(test_with.test_suite()) return suite if __name__ == '__main__': diff --git a/tests/test_ipaddress.py b/tests/test_ipaddress.py new file mode 100755 index 00000000..97eabbaf --- /dev/null +++ b/tests/test_ipaddress.py @@ -0,0 +1,131 @@ +#!/usr/bin/env python +# # test_ipaddress.py - tests for ipaddress support # +# Copyright (C) 2016 Daniele Varrazzo +# +# psycopg2 is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# psycopg2 is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +# License for more details. + +from __future__ import unicode_literals + +import sys +from functools import wraps + +from testutils import unittest, ConnectingTestCase, decorate_all_tests + +import psycopg2 +import psycopg2.extras + + +def skip_if_no_ipaddress(f): + @wraps(f) + def skip_if_no_ipaddress_(self): + if sys.version_info[:2] < (3, 3): + try: + import ipaddress # noqa + except ImportError: + return self.skipTest("'ipaddress' module not available") + + return f(self) + + return skip_if_no_ipaddress_ + + +class NetworkingTestCase(ConnectingTestCase): + def test_inet_cast(self): + import ipaddress as ip + cur = self.conn.cursor() + psycopg2.extras.register_ipaddress(cur) + + cur.execute("select null::inet") + self.assert_(cur.fetchone()[0] is None) + + cur.execute("select '127.0.0.1/24'::inet") + obj = cur.fetchone()[0] + self.assert_(isinstance(obj, ip.IPv4Interface), repr(obj)) + self.assertEquals(obj, ip.ip_interface('127.0.0.1/24')) + + cur.execute("select '::ffff:102:300/128'::inet") + obj = cur.fetchone()[0] + self.assert_(isinstance(obj, ip.IPv6Interface), repr(obj)) + self.assertEquals(obj, ip.ip_interface('::ffff:102:300/128')) + + def test_inet_array_cast(self): + import ipaddress as ip + cur = self.conn.cursor() + psycopg2.extras.register_ipaddress(cur) + cur.execute("select '{NULL,127.0.0.1,::ffff:102:300/128}'::inet[]") + l = cur.fetchone()[0] + self.assert_(l[0] is None) + self.assertEquals(l[1], ip.ip_interface('127.0.0.1')) + self.assertEquals(l[2], ip.ip_interface('::ffff:102:300/128')) + self.assert_(isinstance(l[1], ip.IPv4Interface), l) + self.assert_(isinstance(l[2], ip.IPv6Interface), l) + + def test_inet_adapt(self): + import ipaddress as ip + cur = self.conn.cursor() + psycopg2.extras.register_ipaddress(cur) + + cur.execute("select %s", [ip.ip_interface('127.0.0.1/24')]) + self.assertEquals(cur.fetchone()[0], '127.0.0.1/24') + + cur.execute("select %s", [ip.ip_interface('::ffff:102:300/128')]) + self.assertEquals(cur.fetchone()[0], '::ffff:102:300/128') + + def test_cidr_cast(self): + import ipaddress as ip + cur = self.conn.cursor() + psycopg2.extras.register_ipaddress(cur) + + cur.execute("select null::cidr") + self.assert_(cur.fetchone()[0] is None) + + cur.execute("select '127.0.0.0/24'::cidr") + obj = cur.fetchone()[0] + self.assert_(isinstance(obj, ip.IPv4Network), repr(obj)) + self.assertEquals(obj, ip.ip_network('127.0.0.0/24')) + + cur.execute("select '::ffff:102:300/128'::cidr") + obj = cur.fetchone()[0] + self.assert_(isinstance(obj, ip.IPv6Network), repr(obj)) + self.assertEquals(obj, ip.ip_network('::ffff:102:300/128')) + + def test_cidr_array_cast(self): + import ipaddress as ip + cur = self.conn.cursor() + psycopg2.extras.register_ipaddress(cur) + cur.execute("select '{NULL,127.0.0.1,::ffff:102:300/128}'::cidr[]") + l = cur.fetchone()[0] + self.assert_(l[0] is None) + self.assertEquals(l[1], ip.ip_network('127.0.0.1')) + self.assertEquals(l[2], ip.ip_network('::ffff:102:300/128')) + self.assert_(isinstance(l[1], ip.IPv4Network), l) + self.assert_(isinstance(l[2], ip.IPv6Network), l) + + def test_cidr_adapt(self): + import ipaddress as ip + cur = self.conn.cursor() + psycopg2.extras.register_ipaddress(cur) + + cur.execute("select %s", [ip.ip_network('127.0.0.0/24')]) + self.assertEquals(cur.fetchone()[0], '127.0.0.0/24') + + cur.execute("select %s", [ip.ip_network('::ffff:102:300/128')]) + self.assertEquals(cur.fetchone()[0], '::ffff:102:300/128') + +decorate_all_tests(NetworkingTestCase, skip_if_no_ipaddress) + + +def test_suite(): + return unittest.TestLoader().loadTestsFromName(__name__) + + +if __name__ == "__main__": + unittest.main() From 584c7e68902c45c72f2ab3b0b171aca58ce36f0d Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Wed, 12 Oct 2016 00:28:25 +0100 Subject: [PATCH 131/151] Dropped compiler warning in debug mode --- psycopg/replication_cursor_type.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/psycopg/replication_cursor_type.c b/psycopg/replication_cursor_type.c index 271214f1..d66bec36 100644 --- a/psycopg/replication_cursor_type.c +++ b/psycopg/replication_cursor_type.c @@ -60,7 +60,7 @@ psyco_repl_curs_start_replication_expert(replicationCursorObject *self, EXC_IF_GREEN(start_replication_expert); EXC_IF_TPC_PREPARED(conn, start_replication_expert); - Dprintf("psyco_repl_curs_start_replication_expert: '%s'; decode: %d", command, decode); + Dprintf("psyco_repl_curs_start_replication_expert: '%s'; decode: %ld", command, decode); if (pq_execute(curs, command, conn->async, 1 /* no_result */, 1 /* no_begin */) >= 0) { res = Py_None; From fb1dbc2a9b308dafa1d8d8e21ef39722d4c6473c Mon Sep 17 00:00:00 2001 From: Christoph Moench-Tegeder Date: Fri, 21 Oct 2016 15:32:11 +0200 Subject: [PATCH 132/151] do not "SET datestyle" on replication connections A replication connection - marked by the use of the keyword "replication" in the DSN - does not support SET commands. Trying to sent "SET datestyle" will result in an exception. --- psycopg/connection_int.c | 29 ++++++++++++++++++++++++++--- 1 file changed, 26 insertions(+), 3 deletions(-) diff --git a/psycopg/connection_int.c b/psycopg/connection_int.c index 43d0fdae..c8880b16 100644 --- a/psycopg/connection_int.c +++ b/psycopg/connection_int.c @@ -494,6 +494,26 @@ conn_setup_cancel(connectionObject *self, PGconn *pgconn) return 0; } +/* Return 1 if the "replication" keyword is set in the DSN, 0 otherwise */ +static int +dsn_has_replication(char *pgdsn) +{ + int ret = 0; + PQconninfoOption *connopts, *ptr; + + connopts = PQconninfoParse(pgdsn, NULL); + + for(ptr = connopts; ptr->keyword != NULL; ptr++) { + printf("keyword %s val %s\n", ptr->keyword, ptr->val); + if(strcmp(ptr->keyword, "replication") == 0 && ptr->val != NULL) + ret = 1; + } + + PQconninfoFree(connopts); + + return ret; +} + /* Return 1 if the server datestyle allows us to work without problems, 0 if it needs to be set to something better, e.g. ISO. */ @@ -543,7 +563,7 @@ conn_setup(connectionObject *self, PGconn *pgconn) pthread_mutex_lock(&self->lock); Py_BLOCK_THREADS; - if (!conn_is_datestyle_ok(self->pgconn)) { + if (!dsn_has_replication(self->dsn) && !conn_is_datestyle_ok(self->pgconn)) { int res; Py_UNBLOCK_THREADS; res = pq_set_guc_locked(self, "datestyle", "ISO", @@ -859,8 +879,11 @@ _conn_poll_setup_async(connectionObject *self) self->autocommit = 1; /* If the datestyle is ISO or anything else good, - * we can skip the CONN_STATUS_DATESTYLE step. */ - if (!conn_is_datestyle_ok(self->pgconn)) { + * we can skip the CONN_STATUS_DATESTYLE step. + * Note that we cannot change the datestyle on a replication + * connection. + */ + if (!dsn_has_replication(self->dsn) && !conn_is_datestyle_ok(self->pgconn)) { Dprintf("conn_poll: status -> CONN_STATUS_DATESTYLE"); self->status = CONN_STATUS_DATESTYLE; if (0 == pq_send_query(self, psyco_datestyle)) { From 3971ee6d1fccf831395f396a81afbd65c21b605d Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Sat, 24 Dec 2016 00:07:23 +0100 Subject: [PATCH 133/151] Testing CI with Travis --- .travis.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.travis.yml b/.travis.yml index 1aa25416..09744c20 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,5 +1,11 @@ language: python +services: + - postgresql + +addons: + postgresql: 9.4 + python: - 2.6 - 2.7 From 0be783c4546c2bbebc67040b212db38ab872d59c Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Sat, 24 Dec 2016 00:12:07 +0100 Subject: [PATCH 134/151] Disable email notification Mmm... it seems it's going to be a long night... --- .travis.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.travis.yml b/.travis.yml index 09744c20..9587a78f 100644 --- a/.travis.yml +++ b/.travis.yml @@ -17,3 +17,7 @@ install: - python setup.py install script: make check + + +notifications: + email: false From b3cd125d2757872e9337d8df3d8e286345a67450 Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Sat, 24 Dec 2016 00:18:09 +0100 Subject: [PATCH 135/151] Create the hstore extension in the trevis db --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index 9587a78f..027763e2 100644 --- a/.travis.yml +++ b/.travis.yml @@ -12,6 +12,7 @@ python: before_script: - psql -c 'create database psycopg2_test;' -U postgres + - psql -c 'create extension hstore;' -U postgres install: - python setup.py install From a478ba9a4785eac4839f0c4f65e90f6557d42c65 Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Sat, 24 Dec 2016 00:18:22 +0100 Subject: [PATCH 136/151] Fixed tests failing on Python 2.6 --- tests/test_connection.py | 2 +- tests/test_module.py | 4 ++-- tests/test_quote.py | 10 ++++++---- tests/test_replication.py | 4 ---- tests/testutils.py | 3 +++ 5 files changed, 12 insertions(+), 11 deletions(-) diff --git a/tests/test_connection.py b/tests/test_connection.py index 8744488d..833751b9 100755 --- a/tests/test_connection.py +++ b/tests/test_connection.py @@ -465,7 +465,7 @@ class MakeDsnTestCase(ConnectingTestCase): conn = self.connect() d = conn.get_dsn_parameters() self.assertEqual(d['dbname'], dbname) # the only param we can check reliably - self.assertNotIn('password', d) + self.assert_('password' not in d, d) class IsolationLevelsTestCase(ConnectingTestCase): diff --git a/tests/test_module.py b/tests/test_module.py index 1a9a19d4..6a1606d6 100755 --- a/tests/test_module.py +++ b/tests/test_module.py @@ -119,8 +119,8 @@ class ConnectTestCase(unittest.TestCase): def test_int_port_param(self): psycopg2.connect(database='sony', port=6543) dsn = " %s " % self.args[0] - self.assertIn(" dbname=sony ", dsn) - self.assertIn(" port=6543 ", dsn) + self.assert_(" dbname=sony " in dsn, dsn) + self.assert_(" port=6543 " in dsn, dsn) def test_empty_param(self): psycopg2.connect(database='sony', password='') diff --git a/tests/test_quote.py b/tests/test_quote.py index f74fd854..72c9c1e4 100755 --- a/tests/test_quote.py +++ b/tests/test_quote.py @@ -65,11 +65,13 @@ class QuotingTestCase(ConnectingTestCase): curs = self.conn.cursor() data = 'abcd\x01\x00cdefg' - with self.assertRaises(ValueError) as e: + try: curs.execute("SELECT %s", (data,)) - - self.assertEquals(str(e.exception), - 'A string literal cannot contain NUL (0x00) characters.') + except ValueError as e: + self.assertEquals(str(e), + 'A string literal cannot contain NUL (0x00) characters.') + else: + self.fail("ValueError not raised") def test_binary(self): data = b"""some data with \000\013 binary diff --git a/tests/test_replication.py b/tests/test_replication.py index ca99038a..2ccd4c77 100644 --- a/tests/test_replication.py +++ b/tests/test_replication.py @@ -35,11 +35,7 @@ from testutils import ConnectingTestCase class ReplicationTestCase(ConnectingTestCase): def setUp(self): - if not testconfig.repl_dsn: - self.skipTest("replication tests disabled by default") - super(ReplicationTestCase, self).setUp() - self.slot = testconfig.repl_slot self._slots = [] diff --git a/tests/testutils.py b/tests/testutils.py index d0a34bcf..1dd0c999 100644 --- a/tests/testutils.py +++ b/tests/testutils.py @@ -122,6 +122,9 @@ class ConnectingTestCase(unittest.TestCase): Should raise a skip test if not available, but guard for None on old Python versions. """ + if repl_dsn is None: + return self.skipTest("replication tests disabled by default") + if 'dsn' not in kwargs: kwargs['dsn'] = repl_dsn import psycopg2 From 11ad1005e0b03de7eefe883e890a060611bcaede Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Sat, 24 Dec 2016 01:09:57 +0100 Subject: [PATCH 137/151] Added python3 supported versions --- .travis.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.travis.yml b/.travis.yml index 027763e2..02d96045 100644 --- a/.travis.yml +++ b/.travis.yml @@ -9,6 +9,11 @@ addons: python: - 2.6 - 2.7 + - 3.2 + - 3.3 + - 3.4 + - 3.5 + - 3.6-dev before_script: - psql -c 'create database psycopg2_test;' -U postgres From def22982fb01c6b1411c721ddf0a9f73865d0383 Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Sat, 24 Dec 2016 02:15:24 +0100 Subject: [PATCH 138/151] Run the tests against all the available server versions --- .travis.yml | 18 +++++++---------- scripts/travis_prepare.sh | 41 +++++++++++++++++++++++++++++++++++++++ scripts/travis_test.sh | 39 +++++++++++++++++++++++++++++++++++++ 3 files changed, 87 insertions(+), 11 deletions(-) create mode 100755 scripts/travis_prepare.sh create mode 100755 scripts/travis_test.sh diff --git a/.travis.yml b/.travis.yml index 02d96045..d41c801b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,11 +1,9 @@ +# Travis CI configuration file for psycopg2 + +dist: trusty +sudo: required language: python -services: - - postgresql - -addons: - postgresql: 9.4 - python: - 2.6 - 2.7 @@ -15,14 +13,12 @@ python: - 3.5 - 3.6-dev -before_script: - - psql -c 'create database psycopg2_test;' -U postgres - - psql -c 'create extension hstore;' -U postgres - install: - python setup.py install -script: make check +script: + - sudo scripts/travis_prepare.sh + - scripts/travis_test.sh notifications: diff --git a/scripts/travis_prepare.sh b/scripts/travis_prepare.sh new file mode 100755 index 00000000..86b85bae --- /dev/null +++ b/scripts/travis_prepare.sh @@ -0,0 +1,41 @@ +#!/bin/bash + +set -e + +# Prepare the test databases in Travis CI. +# The script should be run with sudo. +# The script is not idempotent: it assumes the machine in a clean state +# and is designed for a sudo-enabled Trusty environment. + +set_param () { + # Set a parameter in a postgresql.conf file + version=$1 + param=$2 + value=$3 + + sed -i "s/^\s*#\?\s*$param.*/$param = $value/" \ + "/etc/postgresql/$version/psycopg/postgresql.conf" +} + +create () { + version=$1 + port=$2 + dbname=psycopg2_test_$port + + pg_createcluster -p $port --start-conf manual $version psycopg + set_param "$version" max_prepared_transactions 10 + sed -i "s/local\s*all\s*postgres.*/local all postgres trust/" \ + "/etc/postgresql/$version/psycopg/pg_hba.conf" + pg_ctlcluster "$version" psycopg start + + sudo -u postgres psql -c "create user travis" "port=$port" +} + +# Would give a permission denied error in the travis build dir +cd / + +create 9.6 54396 +create 9.5 54395 +create 9.4 54394 +create 9.3 54393 +create 9.2 54392 diff --git a/scripts/travis_test.sh b/scripts/travis_test.sh new file mode 100755 index 00000000..3a1bdb28 --- /dev/null +++ b/scripts/travis_test.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +# Run the tests in all the databases +# The script is designed for a Trusty environment. + +set -e + +run_test () { + version=$1 + port=$2 + pyver=$(python -c "import sys; print(''.join(map(str,sys.version_info[:2])))") + dbname=psycopg_test_$pyver + + # Create a database for each python version to allow tests to run in parallel + psql -c "create database $dbname" \ + "user=postgres port=$port dbname=postgres" + + psql -c "grant create on database $dbname to travis" \ + "user=postgres port=$port dbname=postgres" + + psql -c "create extension hstore" \ + "user=postgres port=$port dbname=$dbname" + + printf "\n\nRunning tests against PostgreSQL $version\n\n" + export PSYCOPG2_TESTDB=$dbname + export PSYCOPG2_TESTDB_PORT=$port + export PSYCOPG2_TESTDB_USER=travis + make check + + printf "\n\nRunning tests against PostgreSQL $version (green mode)\n\n" + export PSYCOPG2_TEST_GREEN=1 + make check +} + +run_test 9.6 54396 +run_test 9.5 54395 +run_test 9.4 54394 +run_test 9.3 54393 +run_test 9.2 54392 From 6758ce5eefe7122f1e3e68743504a9ca6b33321c Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Sat, 24 Dec 2016 04:27:51 +0100 Subject: [PATCH 139/151] Test Python versions in a more relevant order --- .travis.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.travis.yml b/.travis.yml index d41c801b..ef056fa1 100644 --- a/.travis.yml +++ b/.travis.yml @@ -5,13 +5,13 @@ sudo: required language: python python: - - 2.6 - 2.7 - - 3.2 - - 3.3 - - 3.4 - - 3.5 - 3.6-dev + - 3.5 + - 2.6 + - 3.4 + - 3.3 + - 3.2 install: - python setup.py install From 1463bdb86d46d2d729bf0663443d765ac975f100 Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Sat, 24 Dec 2016 04:28:34 +0100 Subject: [PATCH 140/151] Added build badge to readme --- README.rst | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/README.rst b/README.rst index 51d2d6b6..f18be564 100644 --- a/README.rst +++ b/README.rst @@ -44,3 +44,8 @@ For any other resource (source code repository, bug tracker, mailing list) please check the `project homepage`__. .. __: http://initd.org/psycopg/ + + +.. image:: https://travis-ci.org/psycopg/psycopg2.svg?branch=master + :target: https://travis-ci.org/psycopg/psycopg2 + :alt: Build Status From feebc8f68991fc1e84ac4fefb9cea0c373eea6db Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Sat, 24 Dec 2016 04:42:07 +0100 Subject: [PATCH 141/151] Don't use separate databases for tests I got this wrong: I thought parallel test ran in the same VM; they are isolated instead. --- .travis.yml | 5 ++--- scripts/travis_prepare.sh | 8 +++++--- scripts/travis_test.sh | 13 +------------ 3 files changed, 8 insertions(+), 18 deletions(-) diff --git a/.travis.yml b/.travis.yml index ef056fa1..10411637 100644 --- a/.travis.yml +++ b/.travis.yml @@ -7,19 +7,18 @@ language: python python: - 2.7 - 3.6-dev - - 3.5 - 2.6 + - 3.5 - 3.4 - 3.3 - 3.2 install: - python setup.py install + - sudo scripts/travis_prepare.sh script: - - sudo scripts/travis_prepare.sh - scripts/travis_test.sh - notifications: email: false diff --git a/scripts/travis_prepare.sh b/scripts/travis_prepare.sh index 86b85bae..f4e86118 100755 --- a/scripts/travis_prepare.sh +++ b/scripts/travis_prepare.sh @@ -20,17 +20,19 @@ set_param () { create () { version=$1 port=$2 - dbname=psycopg2_test_$port + dbname=psycopg2_test pg_createcluster -p $port --start-conf manual $version psycopg set_param "$version" max_prepared_transactions 10 - sed -i "s/local\s*all\s*postgres.*/local all postgres trust/" \ - "/etc/postgresql/$version/psycopg/pg_hba.conf" pg_ctlcluster "$version" psycopg start sudo -u postgres psql -c "create user travis" "port=$port" + sudo -u postgres psql -c "create database $dbname" "port=$port" + sudo -u postgres psql -c "grant create on database $dbname to travis" "port=$port" + sudo -u postgres psql -c "create extension hstore" "port=$port dbname=$dbname" } + # Would give a permission denied error in the travis build dir cd / diff --git a/scripts/travis_test.sh b/scripts/travis_test.sh index 3a1bdb28..df9413a1 100755 --- a/scripts/travis_test.sh +++ b/scripts/travis_test.sh @@ -8,18 +8,7 @@ set -e run_test () { version=$1 port=$2 - pyver=$(python -c "import sys; print(''.join(map(str,sys.version_info[:2])))") - dbname=psycopg_test_$pyver - - # Create a database for each python version to allow tests to run in parallel - psql -c "create database $dbname" \ - "user=postgres port=$port dbname=postgres" - - psql -c "grant create on database $dbname to travis" \ - "user=postgres port=$port dbname=postgres" - - psql -c "create extension hstore" \ - "user=postgres port=$port dbname=$dbname" + dbname=psycopg2_test printf "\n\nRunning tests against PostgreSQL $version\n\n" export PSYCOPG2_TESTDB=$dbname From c2d405116b7b68808930eebf7e7b076d8dd17030 Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Sun, 25 Dec 2016 17:44:25 +0100 Subject: [PATCH 142/151] Dropped testing print --- psycopg/connection_int.c | 1 - 1 file changed, 1 deletion(-) diff --git a/psycopg/connection_int.c b/psycopg/connection_int.c index c8880b16..e5c6579f 100644 --- a/psycopg/connection_int.c +++ b/psycopg/connection_int.c @@ -504,7 +504,6 @@ dsn_has_replication(char *pgdsn) connopts = PQconninfoParse(pgdsn, NULL); for(ptr = connopts; ptr->keyword != NULL; ptr++) { - printf("keyword %s val %s\n", ptr->keyword, ptr->val); if(strcmp(ptr->keyword, "replication") == 0 && ptr->val != NULL) ret = 1; } From e27579292aff953dacdc1892f00dc32bb73a29c1 Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Sun, 25 Dec 2016 17:45:01 +0100 Subject: [PATCH 143/151] Avoid deadlock on close if set datestyle failed --- psycopg/connection_int.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/psycopg/connection_int.c b/psycopg/connection_int.c index e5c6579f..a34e5ef9 100644 --- a/psycopg/connection_int.c +++ b/psycopg/connection_int.c @@ -541,21 +541,22 @@ conn_setup(connectionObject *self, PGconn *pgconn) { PGresult *pgres = NULL; char *error = NULL; + int rv = -1; self->equote = conn_get_standard_conforming_strings(pgconn); self->server_version = conn_get_server_version(pgconn); self->protocol = conn_get_protocol_version(self->pgconn); if (3 != self->protocol) { PyErr_SetString(InterfaceError, "only protocol 3 supported"); - return -1; + goto exit; } if (0 > conn_read_encoding(self, pgconn)) { - return -1; + goto exit; } if (0 > conn_setup_cancel(self, pgconn)) { - return -1; + goto exit; } Py_BEGIN_ALLOW_THREADS; @@ -570,18 +571,23 @@ conn_setup(connectionObject *self, PGconn *pgconn) Py_BLOCK_THREADS; if (res < 0) { pq_complete_error(self, &pgres, &error); - return -1; + goto unlock; } } /* for reset */ self->autocommit = 0; + /* success */ + rv = 0; + +unlock: Py_UNBLOCK_THREADS; pthread_mutex_unlock(&self->lock); Py_END_ALLOW_THREADS; - return 0; +exit: + return rv; } /* conn_connect - execute a connection to the database */ From b73115ac41559c31fc3a2a3fdb0893046c08d1a5 Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Sun, 25 Dec 2016 17:46:11 +0100 Subject: [PATCH 144/151] Added test to verify bug #482 --- tests/test_replication.py | 17 +++++++++++++++-- tests/testutils.py | 11 ++++++++++- 2 files changed, 25 insertions(+), 3 deletions(-) mode change 100644 => 100755 tests/test_replication.py diff --git a/tests/test_replication.py b/tests/test_replication.py old mode 100644 new mode 100755 index 2ccd4c77..33a8065a --- a/tests/test_replication.py +++ b/tests/test_replication.py @@ -23,7 +23,6 @@ # License for more details. import psycopg2 -import psycopg2.extensions from psycopg2.extras import ( PhysicalReplicationConnection, LogicalReplicationConnection, StopReplication) @@ -89,6 +88,20 @@ class ReplicationTest(ReplicationTestCase): cur.execute("IDENTIFY_SYSTEM") cur.fetchall() + @skip_before_postgres(9, 0) + def test_datestyle(self): + if testconfig.repl_dsn is None: + return self.skipTest("replication tests disabled by default") + + conn = self.repl_connect( + dsn=testconfig.repl_dsn, options='-cdatestyle=german', + connection_factory=PhysicalReplicationConnection) + if conn is None: + return + cur = conn.cursor() + cur.execute("IDENTIFY_SYSTEM") + cur.fetchall() + @skip_before_postgres(9, 4) def test_logical_replication_connection(self): conn = self.repl_connect(connection_factory=LogicalReplicationConnection) @@ -168,7 +181,7 @@ class AsyncReplicationTest(ReplicationTestCase): connection_factory=LogicalReplicationConnection, async=1) if conn is None: return - self.wait(conn) + cur = conn.cursor() self.create_replication_slot(cur, output_plugin='test_decoding') diff --git a/tests/testutils.py b/tests/testutils.py index 1dd0c999..93477357 100644 --- a/tests/testutils.py +++ b/tests/testutils.py @@ -130,8 +130,17 @@ class ConnectingTestCase(unittest.TestCase): import psycopg2 try: conn = self.connect(**kwargs) + if conn.async == 1: + self.wait(conn) except psycopg2.OperationalError, e: - return self.skipTest("replication db not configured: %s" % e) + # If pgcode is not set it is a genuine connection error + # Otherwise we tried to run some bad operation in the connection + # (e.g. bug #482) and we'd rather know that. + if e.pgcode is None: + return self.skipTest("replication db not configured: %s" % e) + else: + raise + return conn def _get_conn(self): From 874705db429de5cc23a20c5e5cb85287c163f037 Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Sun, 25 Dec 2016 17:49:58 +0100 Subject: [PATCH 145/151] Configure Travis to test replication --- scripts/travis_prepare.sh | 19 ++++++++++++++++++- scripts/travis_test.sh | 6 ++++-- 2 files changed, 22 insertions(+), 3 deletions(-) diff --git a/scripts/travis_prepare.sh b/scripts/travis_prepare.sh index f4e86118..2b1e12eb 100755 --- a/scripts/travis_prepare.sh +++ b/scripts/travis_prepare.sh @@ -23,10 +23,27 @@ create () { dbname=psycopg2_test pg_createcluster -p $port --start-conf manual $version psycopg + + # for two-phase commit testing set_param "$version" max_prepared_transactions 10 + + # for replication testing + set_param "$version" max_wal_senders 5 + set_param "$version" max_replication_slots 5 + if [ "$version" == "9.2" -o "$version" == "9.3" ] + then + set_param "$version" wal_level hot_standby + else + set_param "$version" wal_level logical + fi + + echo "local replication travis trust" \ + >> "/etc/postgresql/$version/psycopg/pg_hba.conf" + + pg_ctlcluster "$version" psycopg start - sudo -u postgres psql -c "create user travis" "port=$port" + sudo -u postgres psql -c "create user travis replication" "port=$port" sudo -u postgres psql -c "create database $dbname" "port=$port" sudo -u postgres psql -c "grant create on database $dbname to travis" "port=$port" sudo -u postgres psql -c "create extension hstore" "port=$port dbname=$dbname" diff --git a/scripts/travis_test.sh b/scripts/travis_test.sh index df9413a1..15783088 100755 --- a/scripts/travis_test.sh +++ b/scripts/travis_test.sh @@ -14,11 +14,13 @@ run_test () { export PSYCOPG2_TESTDB=$dbname export PSYCOPG2_TESTDB_PORT=$port export PSYCOPG2_TESTDB_USER=travis - make check + export PSYCOPG2_TEST_REPL_DSN= + + python -c "from psycopg2 import tests; tests.unittest.main(defaultTest='tests.test_suite')" --verbose printf "\n\nRunning tests against PostgreSQL $version (green mode)\n\n" export PSYCOPG2_TEST_GREEN=1 - make check + python -c "from psycopg2 import tests; tests.unittest.main(defaultTest='tests.test_suite')" --verbose } run_test 9.6 54396 From c22093ddd49ea6045e05b9eaafafc7a001bac1a5 Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Sun, 25 Dec 2016 19:00:30 +0100 Subject: [PATCH 146/151] Skip replication tests in green mode --- tests/test_replication.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/tests/test_replication.py b/tests/test_replication.py index 33a8065a..79d1295d 100755 --- a/tests/test_replication.py +++ b/tests/test_replication.py @@ -27,9 +27,10 @@ from psycopg2.extras import ( PhysicalReplicationConnection, LogicalReplicationConnection, StopReplication) import testconfig -from testutils import unittest -from testutils import skip_before_postgres -from testutils import ConnectingTestCase +from testutils import unittest, ConnectingTestCase +from testutils import skip_before_postgres, skip_if_green + +skip_repl_if_green = skip_if_green("replication not supported in green mode") class ReplicationTestCase(ConnectingTestCase): @@ -123,6 +124,7 @@ class ReplicationTest(ReplicationTestCase): psycopg2.ProgrammingError, self.create_replication_slot, cur) @skip_before_postgres(9, 4) # slots require 9.4 + @skip_repl_if_green def test_start_on_missing_replication_slot(self): conn = self.repl_connect(connection_factory=PhysicalReplicationConnection) if conn is None: @@ -136,6 +138,7 @@ class ReplicationTest(ReplicationTestCase): cur.start_replication(self.slot) @skip_before_postgres(9, 4) # slots require 9.4 + @skip_repl_if_green def test_start_and_recover_from_error(self): conn = self.repl_connect(connection_factory=LogicalReplicationConnection) if conn is None: @@ -157,6 +160,7 @@ class ReplicationTest(ReplicationTestCase): cur.start_replication(slot_name=self.slot) @skip_before_postgres(9, 4) # slots require 9.4 + @skip_repl_if_green def test_stop_replication(self): conn = self.repl_connect(connection_factory=LogicalReplicationConnection) if conn is None: @@ -176,6 +180,7 @@ class ReplicationTest(ReplicationTestCase): class AsyncReplicationTest(ReplicationTestCase): @skip_before_postgres(9, 4) # slots require 9.4 + @skip_repl_if_green def test_async_replication(self): conn = self.repl_connect( connection_factory=LogicalReplicationConnection, async=1) From d48d4bab0520d047da1522e12ddaa51702e231d0 Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Thu, 7 Jul 2016 12:05:29 +0100 Subject: [PATCH 147/151] Added empty options in setup.cfg Setuptools removes them from the sdist, see #453 --- setup.cfg | 15 +++++++-------- setup.py | 7 ++++++- 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/setup.cfg b/setup.cfg index 90a47dd4..0d41934f 100644 --- a/setup.cfg +++ b/setup.cfg @@ -7,24 +7,23 @@ define= # "pg_config" is required to locate PostgreSQL headers and libraries needed to # build psycopg2. If pg_config is not in the path or is installed under a -# different name uncomment the following option and set it to the pg_config -# full path. -#pg_config= +# different name set the following option to the pg_config full path. +pg_config= # Set to 1 to use Python datetime objects for default date/time representation. use_pydatetime=1 # If the build system does not find the mx.DateTime headers, try -# uncommenting the following line and setting its value to the right path. -#mx_include_dir= +# setting its value to the right path. +mx_include_dir= # For Windows only: # Set to 1 if the PostgreSQL library was built with OpenSSL. # Required to link in OpenSSL libraries and dependencies. have_ssl=0 -# Statically link against the postgresql client library. -#static_libpq=1 +# Set to 1 to statically link against the postgresql client library. +static_libpq=0 # Add here eventual extra libraries required to link the module. -#libraries= +libraries= diff --git a/setup.py b/setup.py index 3f021830..c1065258 100644 --- a/setup.py +++ b/setup.py @@ -381,6 +381,11 @@ class psycopg_build_ext(build_ext): def finalize_options(self): """Complete the build system configuration.""" + # An empty option in the setup.cfg causes self.libraries to include + # an empty string in the list of libraries + if self.libraries is not None and not self.libraries.strip(): + self.libraries = None + build_ext.finalize_options(self) pg_config_helper = PostgresConfig(self) @@ -521,7 +526,7 @@ if parser.has_option('build_ext', 'mx_include_dir'): mxincludedir = parser.get('build_ext', 'mx_include_dir') else: mxincludedir = os.path.join(get_python_inc(plat_specific=1), "mx") -if os.path.exists(mxincludedir): +if mxincludedir.strip() and os.path.exists(mxincludedir): # Build the support for mx: we will check at runtime if it can be imported include_dirs.append(mxincludedir) define_macros.append(('HAVE_MXDATETIME', '1')) From 35b4a01b6d941d7d6f1b8040db6037c53f713aa1 Mon Sep 17 00:00:00 2001 From: Tim Graham Date: Sat, 17 Sep 2016 15:36:19 -0400 Subject: [PATCH 148/151] Fix "invalid escape sequence" warning in Python 3.6 http://bugs.python.org/issue27364 --- lib/extras.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/extras.py b/lib/extras.py index 5c4f5d2a..7fc853a6 100644 --- a/lib/extras.py +++ b/lib/extras.py @@ -908,7 +908,7 @@ WHERE typname = 'hstore'; def register_hstore(conn_or_curs, globally=False, unicode=False, oid=None, array_oid=None): - """Register adapter and typecaster for `!dict`\-\ |hstore| conversions. + r"""Register adapter and typecaster for `!dict`\-\ |hstore| conversions. :param conn_or_curs: a connection or cursor: the typecaster will be registered only on this object unless *globally* is set to `!True` From dcb198e8b7b15b7f3c6f39d9879cb0a2474bc41d Mon Sep 17 00:00:00 2001 From: Luke Nezda Date: Mon, 12 Sep 2016 09:19:20 -0500 Subject: [PATCH 149/151] fix wait_select sample to be `extras` not `extensions` --- doc/src/faq.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/src/faq.rst b/doc/src/faq.rst index d0636669..89d8a639 100644 --- a/doc/src/faq.rst +++ b/doc/src/faq.rst @@ -241,7 +241,7 @@ How do I interrupt a long-running query in an interactive shell? .. code-block:: pycon - >>> psycopg2.extensions.set_wait_callback(psycopg2.extensions.wait_select) + >>> psycopg2.extensions.set_wait_callback(psycopg2.extras.wait_select) >>> cnn = psycopg2.connect('') >>> cur = cnn.cursor() >>> cur.execute("select pg_sleep(10)") From 4c99cadabe176ef46573378e7cb8bd71b63bd0f3 Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Sun, 25 Dec 2016 20:55:01 +0100 Subject: [PATCH 150/151] Fixed intersphinx links to Pyton docs --- doc/src/conf.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/src/conf.py b/doc/src/conf.py index 22c5c46f..a918c08c 100644 --- a/doc/src/conf.py +++ b/doc/src/conf.py @@ -61,8 +61,8 @@ except ImportError: release = version intersphinx_mapping = { - 'py': ('http://docs.python.org/', None), - 'py3': ('http://docs.python.org/3.4', None), + 'py': ('http://docs.python.org/2', None), + 'py3': ('http://docs.python.org/3', None), } # Pattern to generate links to the bug tracker From 17698c481566c0e8c1d5d65fe88e0cb7e4505957 Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Sun, 25 Dec 2016 20:55:15 +0100 Subject: [PATCH 151/151] Fixed REst error in newsfile --- NEWS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/NEWS b/NEWS index b7efef29..67883d74 100644 --- a/NEWS +++ b/NEWS @@ -36,7 +36,7 @@ What's new in psycopg 2.6.3 ^^^^^^^^^^^^^^^^^^^^^^^^^^^ - Throw an exception trying to pass ``NULL`` chars as parameters - (:ticket:`#420). + (:ticket:`#420`). - Make `~psycopg2.extras.Range` objects picklable (:ticket:`#462`).