Fixed problem with large writes in large objects code

This commit is contained in:
Federico Di Gregorio 2009-08-09 17:05:16 +02:00
parent a2af059e6c
commit 3a6911216b
3 changed files with 24 additions and 0 deletions

View File

@ -1,5 +1,9 @@
2009-08-08 Federico Di Gregorio <fog@initd.org>
* psycopg/lobject_int.c: fixed problem with writing large data using
lo_write: apparently the large objects code does not like non-blocking
connections.
* setup.py: fixed version detection for PostgreSQL rc, as
suggested by Sok Ann Yap.

View File

@ -201,12 +201,17 @@ lobject_write(lobjectObject *self, const char *buf, size_t len)
PGresult *pgres = NULL;
char *error = NULL;
Dprintf("lobject_writing: fd = %d, len = %d",
self->fd, len);
Py_BEGIN_ALLOW_THREADS;
pthread_mutex_lock(&(self->conn->lock));
PQsetnonblocking(self->conn->pgconn, 0);
written = lo_write(self->conn->pgconn, self->fd, buf, len);
if (written < 0)
collect_error(self->conn, &error);
PQsetnonblocking(self->conn->pgconn, 1);
pthread_mutex_unlock(&(self->conn->lock));
Py_END_ALLOW_THREADS;

View File

@ -98,6 +98,11 @@ class LargeObjectTests(unittest.TestCase):
lo = self.conn.lobject()
self.assertEqual(lo.write("some data"), len("some data"))
def test_write_large(self):
lo = self.conn.lobject()
data = "data" * 1000000
self.assertEqual(lo.write(data), len(data))
def test_read(self):
lo = self.conn.lobject()
length = lo.write("some data")
@ -107,6 +112,16 @@ class LargeObjectTests(unittest.TestCase):
self.assertEqual(lo.read(4), "some")
self.assertEqual(lo.read(), " data")
def test_read_large(self):
lo = self.conn.lobject()
data = "data" * 1000000
length = lo.write("some"+data)
lo.close()
lo = self.conn.lobject(lo.oid)
self.assertEqual(lo.read(4), "some")
self.assertEqual(lo.read(), data)
def test_seek_tell(self):
lo = self.conn.lobject()
length = lo.write("some data")