Compare commits

..

No commits in common. "master" and "2_8" have entirely different histories.
master ... 2_8

189 changed files with 4171 additions and 4846 deletions

246
.appveyor.yml Normal file
View File

@ -0,0 +1,246 @@
version : 2.x.{build}
clone_folder: C:\Project
environment:
global:
# MSVC Express 2008's setenv.cmd failes if /E:ON and /V:ON are not
# enabled in the batch script interpreter
#
# WITH_COMPILER: "cmd /E:ON /V:ON /C .\\appveyor\\run_with_compiler.cmd"
CMD_IN_ENV: cmd /E:ON /V:ON /C .\appveyor\run_with_env.cmd
matrix:
# For Python versions available on Appveyor, see
# https://www.appveyor.com/docs/build-environment/
- {PYVER: "27", PYTHON_ARCH: "32"}
- {PYVER: "27", PYTHON_ARCH: "64"}
- {PYVER: "37", PYTHON_ARCH: "32"}
- {PYVER: "37", PYTHON_ARCH: "64"}
- {PYVER: "36", PYTHON_ARCH: "32"}
- {PYVER: "36", PYTHON_ARCH: "64"}
- {PYVER: "35", PYTHON_ARCH: "32"}
- {PYVER: "35", PYTHON_ARCH: "64"}
- {PYVER: "34", PYTHON_ARCH: "32"}
- {PYVER: "34", PYTHON_ARCH: "64"}
OPENSSL_VERSION: "1_0_2r"
POSTGRES_VERSION: "11_2"
PSYCOPG2_TESTDB: psycopg2_test
PSYCOPG2_TESTDB_USER: postgres
PSYCOPG2_TESTDB_PASSWORD: Password12!
PSYCOPG2_TESTDB_HOST: localhost
PSYCOPG2_TESTDB_PORT: 5432
PGUSER: postgres
PGPASSWORD: Password12!
matrix:
fast_finish: false
services:
# Note: if you change this service also change the paths to match
# (see where Program Files\Postgres\9.6 is used)
- postgresql96
cache:
# Rebuild cache if following file changes
# (See the file to zap the cache manually)
- C:\Others -> scripts\appveyor.cache_rebuild
# Script called before repo cloning
init:
# Uncomment next line to get RDP access during the build.
#- ps: iex ((new-object net.webclient).DownloadString('https://raw.githubusercontent.com/appveyor/ci/master/scripts/enable-rdp.ps1'))
# Set env variable according to the build environment
- SET PYTHON=C:\Python%PYVER%
- IF "%PYTHON_ARCH%"=="64" SET PYTHON=%PYTHON%-x64
# Py 2.7 = VS Ver. 9.0 (VS 2008)
# Py 3.4 = VS Ver. 10.0 (VS 2010)
# Py 3.5, 3.6, 3.7 = VS Ver. 14.0 (VS 2015)
- IF "%PYVER%"=="27" SET VS_VER=9.0
- IF "%PYVER%"=="34" SET VS_VER=10.0
- IF "%PYVER%"=="35" SET VS_VER=14.0
- IF "%PYVER%"=="36" SET VS_VER=14.0
- IF "%PYVER%"=="37" SET VS_VER=14.0
- IF "%VS_VER%"=="10.0" IF "%PYTHON_ARCH%"=="64" SET DISTUTILS_USE_SDK=1
# Set Python to the path
- SET PATH=%PYTHON%;%PYTHON%\Scripts;C:\Program Files\Git\mingw64\bin;%PATH%
# Verify Python version and architecture
- ECHO *******************************************************************
- ECHO Python Information
- ECHO *******************************************************************
- "%PYTHON%\\python --version"
- "%PYTHON%\\python -c \"import sys; print('64bit: ' + str(sys.maxsize > 2**32))\""
# Get & Install NASM
#- curl -L -o nasminst.exe https://www.nasm.us/pub/nasm/releasebuilds/2.12.02/win64/nasm-2.12.02-installer-x64.exe && start /wait nasminst.exe /S
#- SET PATH="C:\Program Files (x86)\nasm;%PATH%"
# Fix problem with VS2008 Express and 64bit builds
- ECHO Fixing VS2008 Express and 64bit builds
- COPY "C:\\Program Files (x86)\\Microsoft Visual Studio 9.0\\VC\\bin\\vcvars64.bat" "C:\\Program Files (x86)\\Microsoft Visual Studio 9.0\\VC\\bin\\amd64\\vcvarsamd64.bat"
# Fix problem with VS2010 Express 64bit missing vcvars64.bat
# Note: repository not cloned at this point, so need to fetch
# file another way
- ECHO Fixing VS2010 Express and 64bit builds
- curl -fsSL -o "C:\\Program Files (x86)\\Microsoft Visual Studio 10.0\\VC\\bin\\amd64\\vcvars64.bat" https://raw.githubusercontent.com/psycopg/psycopg2/master/scripts/vcvars64-vs2010.bat
# Setup the compiler based upon version and architecture
- ECHO Configuring Compiler
- IF "%PYTHON_ARCH%"=="32" (CALL "C:\\Program Files (x86)\\Microsoft Visual Studio %VS_VER%\\VC\\vcvarsall.bat" x86)
- IF "%PYTHON_ARCH%"=="64" (CALL "C:\\Program Files (x86)\\Microsoft Visual Studio %VS_VER%\\VC\\vcvarsall.bat" amd64)
# The program rc.exe on 64bit with some versions look in the wrong path
# location when building postgresql. This cheats by copying the x64 bit
# files to that location.
- IF "%PYTHON_ARCH%"=="64" (COPY /Y "C:\\Program Files\\Microsoft SDKs\\Windows\\v7.0\\Bin\\x64\\rc*" "C:\\Program Files (x86)\\Microsoft SDKs\\Windows\\v7.0A\\Bin")
# Change PostgreSQL config before service starts to allow > 1 prepared
# transactions for test cases
- ECHO max_prepared_transactions = 10 >> "C:\\Program Files\\PostgreSQL\\9.6\\data\\postgresql.conf"
# Repository gets cloned, Cache is restored
install:
# We start off CD'ed to cloned folder
- SET BASE_DIR=C:\Others\%PYTHON_ARCH%\%VS_VER%
- SET BUILD_DIR=%BASE_DIR%\Builds
- IF NOT EXIST %BUILD_DIR% MKDIR %BUILD_DIR%
- ECHO *******************************************************************
- ECHO Initialized variables specific for this build
- ECHO *******************************************************************
- ECHO %BASE_DIR%
- ECHO %BUILD_DIR%
- ECHO *******************************************************************
# Setup directories for building OpenSSL libraries
- ECHO *******************************************************************
- ECHO Preparing for building OpenSSL
- ECHO *******************************************************************
- SET OPENSSLTOP=%BASE_DIR%\openssl
- IF NOT EXIST %OPENSSLTOP%\include\openssl MKDIR %OPENSSLTOP%\include\openssl
- IF NOT EXIST %OPENSSLTOP%\lib MKDIR %OPENSSLTOP%\lib
# Setup OpenSSL Environment Variables based on processor architecture
- ps: >-
If ($env:PYTHON_ARCH -Match "32" ) {
$env:VCVARS_PLATFORM="x86"
$env:TARGET="VC-WIN32"
$env:DO="do_ms"
} Else {
$env:VCVARS_PLATFORM="amd64"
$env:TARGET="VC-WIN64A"
$env:DO="do_win64a"
$env:CPU="AMD64"
}
# Download OpenSSL source
- CD C:\Others
- IF NOT EXIST OpenSSL_%OPENSSL_VERSION%.zip (
curl -fsSL -o OpenSSL_%OPENSSL_VERSION%.zip https://github.com/openssl/openssl/archive/OpenSSL_%OPENSSL_VERSION%.zip
)
# To use OpenSSL >= 1.1.0, both libpq and psycopg build environments have
# to support the new library names. Below are commands to build OpenSSL
# 1.1.0:
# - mkdir _build
# - cd _build
# - perl ..\Configure %TARGET% no-asm no-shared --prefix=%BASE_DIR%\openssl --openssldir=%BASE_DIR%\openssl
# - nmake build_libs install_dev
- IF NOT EXIST %OPENSSLTOP%\lib\ssleay32.lib (
CD %BUILD_DIR% &&
7z x C:\Others\OpenSSL_%OPENSSL_VERSION%.zip &&
CD openssl-OpenSSL_%OPENSSL_VERSION% &&
perl Configure %TARGET% no-asm no-shared no-zlib --prefix=%OPENSSLTOP% --openssldir=%OPENSSLTOP% &&
CALL ms\%DO% &&
nmake -f ms\nt.mak init headers lib &&
COPY inc32\openssl\*.h %OPENSSLTOP%\include\openssl &&
COPY out32\*.lib %OPENSSLTOP%\lib &&
CD %BASE_DIR% &&
RMDIR /S /Q %BUILD_DIR%\openssl-OpenSSL_%OPENSSL_VERSION%
)
# Setup directories for building PostgreSQL librarires
- ECHO *******************************************************************
- ECHO Preparing for building PostgreSQL libraries
- ECHO *******************************************************************
- SET PGTOP=%BASE_DIR%\postgresql
- IF NOT EXIST %PGTOP%\include MKDIR %PGTOP%\include
- IF NOT EXIST %PGTOP%\lib MKDIR %PGTOP%\lib
- IF NOT EXIST %PGTOP%\bin MKDIR %PGTOP%\bin
# Download PostgreSQL source
- CD C:\Others
- IF NOT EXIST postgres-REL_%POSTGRES_VERSION%.zip (
curl -fsSL -o postgres-REL_%POSTGRES_VERSION%.zip https://github.com/postgres/postgres/archive/REL_%POSTGRES_VERSION%.zip
)
# Setup build config file (config.pl)
# Hack the Mkvcbuild.pm file so we build the lib version of libpq
# Build libpgport, libpgcommon, libpq
# Install includes
# Copy over built libraries
# Prepare local include directory for building from
# Build pg_config in place
# NOTE: Cannot set and use the same variable inside an IF
- SET PGBUILD=%BUILD_DIR%\postgres-REL_%POSTGRES_VERSION%
- IF NOT EXIST %PGTOP%\lib\libpq.lib (
CD %BUILD_DIR% &&
7z x C:\Others\postgres-REL_%POSTGRES_VERSION%.zip &&
CD postgres-REL_%POSTGRES_VERSION%\src\tools\msvc &&
ECHO $config-^>{ldap} = 0; > config.pl &&
ECHO $config-^>{openssl} = "%OPENSSLTOP:\=\\%"; >> config.pl &&
ECHO.>> config.pl &&
ECHO 1;>> config.pl &&
perl -pi.bak -e "s/'libpq', 'dll'/'libpq', 'lib'/g" Mkvcbuild.pm &&
build libpgport &&
build libpgcommon &&
build libpq &&
ECHO "" > %PGBUILD%\src\backend\parser\gram.h &&
perl -pi.bak -e "s/qw\(Install\)/qw\(Install CopyIncludeFiles\)/g" Install.pm &&
perl -MInstall=CopyIncludeFiles -e"chdir('../../..'); CopyIncludeFiles('%PGTOP%')" &&
COPY %PGBUILD%\Release\libpgport\libpgport.lib %PGTOP%\lib &&
COPY %PGBUILD%\Release\libpgcommon\libpgcommon.lib %PGTOP%\lib &&
COPY %PGBUILD%\Release\libpq\libpq.lib %PGTOP%\lib &&
XCOPY /Y /S %PGBUILD%\src\include\port\win32\* %PGBUILD%\src\include &&
XCOPY /Y /S %PGBUILD%\src\include\port\win32_msvc\* %PGBUILD%\src\include &&
CD %PGBUILD%\src\bin\pg_config &&
cl pg_config.c /MT /nologo /I%PGBUILD%\src\include /link /LIBPATH:%PGTOP%\lib libpgcommon.lib libpgport.lib advapi32.lib /NODEFAULTLIB:libcmt.lib /OUT:%PGTOP%\bin\pg_config.exe &&
CD %BASE_DIR% &&
RMDIR /S /Q %PGBUILD%
)
build: off
#before_build:
build_script:
# Add PostgreSQL binaries to the path
- PATH=C:\Program Files\PostgreSQL\9.6\bin\;%PATH%
- CD C:\Project
- "%PYTHON%\\python.exe setup.py build_ext --have-ssl --pg-config %PGTOP%\\bin\\pg_config.exe -l libpgcommon -l libpgport -L %OPENSSLTOP%\\lib -I %OPENSSLTOP%\\include"
- "%PYTHON%\\python.exe setup.py build"
- "%PYTHON%\\python.exe setup.py install"
- RD /S /Q psycopg2.egg-info
#after_build:
before_test:
# Create and setup PostgreSQL database for the tests
- createdb %PSYCOPG2_TESTDB%
- psql -d %PSYCOPG2_TESTDB% -c "CREATE EXTENSION HSTORE;"
test_script:
# Print psycopg and libpq versions
- "%PYTHON%\\python.exe -c \"import psycopg2; print(psycopg2.__version__)\""
- "%PYTHON%\\python.exe -c \"import psycopg2; print(psycopg2.__libpq_version__)\""
- "%PYTHON%\\python.exe -c \"import psycopg2; print(psycopg2.extensions.libpq_version())\""
- "%PYTHON%\\python.exe -c \"import tests; tests.unittest.main(defaultTest='tests.test_suite')\" --verbose"

4
.github/FUNDING.yml vendored
View File

@ -1,4 +0,0 @@
github:
- dvarrazzo
custom:
- "https://www.paypal.me/dvarrazzo"

View File

@ -1,23 +0,0 @@
---
name: Problem installing psycopg2
about: Report a case in which psycopg2 failed to install on your platform
title: ''
labels: ''
assignees: ''
---
**This is a bug tracker**
If you have a question, such has "how do you do X with Python/PostgreSQL/psycopg2" please [write to the mailing list](https://lists.postgresql.org/manage/) or [open a question](https://github.com/psycopg/psycopg2/discussions) instead.
**Before opening this ticket, please confirm that:**
- [ ] I am running the latest version of pip, i.e. typing ``pip --version`` you get [this version](https://pypi.org/project/pip/).
- [ ] I have read the [installation documentation](https://www.psycopg.org/docs/install.html) and the [frequently asked questions](https://www.psycopg.org/docs/faq.html)
- [ ] If install failed, I typed `pg_config` on the command line and I obtained an output instead of an error.
**Please complete the following information:**
- OS:
- Psycopg version:
- Python version:
- PostgreSQL version:
- pip version

View File

@ -1,27 +0,0 @@
---
name: Problem using psycopg2
about: Report a case in which psycopg2 is not working as expected
title: ''
labels: ''
assignees: ''
---
**This is a bug tracker**
If you have a question, such has "how do you do X with Python/PostgreSQL/psycopg2" please [write to the mailing list](https://lists.postgresql.org/manage/) or [open a question](https://github.com/psycopg/psycopg2/discussions) instead.
**Please complete the following information:**
- OS:
- Psycopg version:
- Python version:
- PostgreSQL version:
- pip version
**Describe the bug**
Please let us know:
1: what you did
2: what you expected to happen
3: what happened instead
If possible, provide a script reproducing the issue.

View File

@ -1,6 +0,0 @@
version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "monthly"

View File

@ -1,18 +0,0 @@
name: Build documentation
on:
push:
branches:
# This should match the DOC_BRANCH value in the psycopg-website Makefile
- master
jobs:
docs:
runs-on: ubuntu-latest
steps:
- name: Trigger docs build
uses: peter-evans/repository-dispatch@v3
with:
repository: psycopg/psycopg-website
event-type: psycopg2-commit
token: ${{ secrets.ACCESS_TOKEN }}

View File

@ -1,266 +0,0 @@
---
name: Build packages
on:
- workflow_dispatch
env:
PIP_BREAK_SYSTEM_PACKAGES: "1"
LIBPQ_VERSION: "16.0"
OPENSSL_VERSION: "1.1.1w"
jobs:
sdist: # {{{
if: true
strategy:
fail-fast: false
matrix:
include:
- package_name: psycopg2
- package_name: psycopg2-binary
runs-on: ubuntu-latest
steps:
- name: Checkout repos
uses: actions/checkout@v4
- name: Build sdist
run: ./scripts/build/build_sdist.sh
env:
PACKAGE_NAME: ${{ matrix.package_name }}
- name: Upload artifacts
uses: actions/upload-artifact@v4
with:
name: sdist-${{ matrix.package_name }}
path: |
dist/*.tar.gz
env:
PSYCOPG2_TESTDB: postgres
PSYCOPG2_TESTDB_HOST: 172.17.0.1
PSYCOPG2_TESTDB_USER: postgres
PSYCOPG2_TESTDB_PASSWORD: password
PSYCOPG2_TEST_FAST: 1
services:
postgresql:
image: postgres:16
env:
POSTGRES_PASSWORD: password
ports:
- 5432:5432
# Set health checks to wait until postgres has started
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
# }}}
linux: # {{{
if: true
strategy:
fail-fast: false
matrix:
platform: [manylinux, musllinux]
arch: [x86_64, i686, aarch64, ppc64le]
pyver: [cp38, cp39, cp310, cp311, cp312, cp313]
runs-on: ubuntu-latest
steps:
- name: Checkout repos
uses: actions/checkout@v4
- name: Set up QEMU for multi-arch build
uses: docker/setup-qemu-action@v3
- name: Cache libpq build
uses: actions/cache@v4
with:
path: /tmp/libpq.build
key: libpq-${{ env.LIBPQ_VERSION }}-${{ matrix.platform }}-${{ matrix.arch }}
- name: Build wheels
uses: pypa/cibuildwheel@v2.23.3
env:
CIBW_MANYLINUX_X86_64_IMAGE: manylinux2014
CIBW_MANYLINUX_I686_IMAGE: manylinux2014
CIBW_MANYLINUX_AARCH64_IMAGE: manylinux2014
CIBW_MANYLINUX_PPC64LE_IMAGE: manylinux2014
CIBW_BUILD: ${{matrix.pyver}}-${{matrix.platform}}_${{matrix.arch}}
CIBW_ARCHS_LINUX: auto aarch64 ppc64le
CIBW_BEFORE_ALL_LINUX: ./scripts/build/wheel_linux_before_all.sh
CIBW_REPAIR_WHEEL_COMMAND: >-
./scripts/build/strip_wheel.sh {wheel}
&& auditwheel repair -w {dest_dir} {wheel}
CIBW_TEST_COMMAND: >-
export PYTHONPATH={project} &&
python -c "import tests; tests.unittest.main(defaultTest='tests.test_suite')"
CIBW_ENVIRONMENT_PASS_LINUX: LIBPQ_VERSION OPENSSL_VERSION
CIBW_ENVIRONMENT: >-
PACKAGE_NAME=psycopg2-binary
LIBPQ_BUILD_PREFIX=/host/tmp/libpq.build
PATH="$LIBPQ_BUILD_PREFIX/bin:$PATH"
LD_LIBRARY_PATH="$LIBPQ_BUILD_PREFIX/lib:$LIBPQ_BUILD_PREFIX/lib64"
PSYCOPG2_TESTDB=postgres
PSYCOPG2_TESTDB_HOST=172.17.0.1
PSYCOPG2_TESTDB_USER=postgres
PSYCOPG2_TESTDB_PASSWORD=password
PSYCOPG2_TEST_FAST=1
- uses: actions/upload-artifact@v4
with:
name: linux-${{matrix.pyver}}-${{matrix.platform}}_${{matrix.arch}}
path: ./wheelhouse/*.whl
services:
postgresql:
image: postgres:16
env:
POSTGRES_PASSWORD: password
ports:
- 5432:5432
# Set health checks to wait until postgres has started
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
# }}}
macos: # {{{
runs-on: macos-latest
if: true
strategy:
fail-fast: false
matrix:
# These archs require an Apple M1 runner: [arm64, universal2]
arch: [x86_64, arm64]
pyver: [cp39, cp310, cp311, cp312, cp313]
steps:
- name: Checkout repos
uses: actions/checkout@v4
- name: Cache libpq build
uses: actions/cache@v4
with:
path: /tmp/libpq.build
key: libpq-${{ env.LIBPQ_VERSION }}-macos-${{ matrix.arch }}
- name: Build wheels
uses: pypa/cibuildwheel@v2.23.3
env:
CIBW_BUILD: ${{matrix.pyver}}-macosx_${{matrix.arch}}
CIBW_ARCHS_MACOS: ${{matrix.arch}}
MACOSX_ARCHITECTURE: ${{matrix.arch}}
CIBW_BEFORE_ALL_MACOS: ./scripts/build/wheel_macos_before_all.sh
CIBW_TEST_COMMAND: >-
export PYTHONPATH={project} &&
python -c "import tests; tests.unittest.main(defaultTest='tests.test_suite')"
CIBW_ENVIRONMENT: >-
PG_VERSION=16
PACKAGE_NAME=psycopg2-binary
PSYCOPG2_TESTDB=postgres
PATH="/tmp/libpq.build/bin:$PATH"
- name: Upload artifacts
uses: actions/upload-artifact@v4
with:
name: macos-${{matrix.pyver}}-macos-${{matrix.arch}}
path: ./wheelhouse/*.whl
# }}}
windows: # {{{
runs-on: windows-latest
if: true
strategy:
fail-fast: false
matrix:
arch: [win_amd64]
pyver: [cp38, cp39, cp310, cp311, cp312, cp313]
package_name: [psycopg2, psycopg2-binary]
defaults:
run:
shell: bash
steps:
# there are some other libpq in PATH
- name: Drop spurious libpq in the path
run: rm -rf c:/tools/php C:/Strawberry/c/bin
- name: Checkout repo
uses: actions/checkout@v4
- name: Start PostgreSQL service for test
run: |
$PgSvc = Get-Service "postgresql*"
Set-Service $PgSvc.Name -StartupType manual
$PgSvc.Start()
shell: powershell
- name: Export GitHub Actions cache environment variables
uses: actions/github-script@v7
with:
script: |
const path = require('path')
core.exportVariable('ACTIONS_CACHE_URL', process.env.ACTIONS_CACHE_URL || '');
core.exportVariable('ACTIONS_RUNTIME_TOKEN', process.env.ACTIONS_RUNTIME_TOKEN || '');
core.addPath(path.join(process.env.VCPKG_INSTALLATION_ROOT, 'installed/x64-windows-release/lib'));
core.addPath(path.join(process.env.VCPKG_INSTALLATION_ROOT, 'installed/x64-windows-release/bin'));
- name: Create the binary package source tree
run: >-
sed -i 's/^setup(name="psycopg2"/setup(name="${{matrix.package_name}}"/'
setup.py
if: ${{ matrix.package_name != 'psycopg2' }}
- name: Build wheels
uses: pypa/cibuildwheel@v2.23.3
env:
VCPKG_BINARY_SOURCES: "clear;x-gha,readwrite" # cache vcpkg
CIBW_BUILD: ${{matrix.pyver}}-${{matrix.arch}}
CIBW_ARCHS_WINDOWS: AMD64 x86
CIBW_BEFORE_BUILD_WINDOWS: '.\scripts\build\wheel_win32_before_build.bat'
CIBW_REPAIR_WHEEL_COMMAND_WINDOWS: >-
delvewheel repair -w {dest_dir}
--no-mangle "libiconv-2.dll;libwinpthread-1.dll" {wheel}
CIBW_TEST_COMMAND: >-
set PYTHONPATH={project} &&
python -c "import tests; tests.unittest.main(defaultTest='tests.test_suite')"
# Note: no fast test because we don't run Windows tests
CIBW_ENVIRONMENT_WINDOWS: >-
PSYCOPG2_TESTDB=postgres
PSYCOPG2_TESTDB_USER=postgres
PSYCOPG2_TESTDB_HOST=localhost
- name: Upload artifacts
uses: actions/upload-artifact@v4
with:
name: windows-${{ matrix.package_name }}-${{matrix.pyver}}-${{matrix.arch}}
path: ./wheelhouse/*.whl
# }}}
merge: # {{{
runs-on: ubuntu-latest
needs:
- sdist
- linux
- macos
- windows
steps:
- name: Merge Artifacts
uses: actions/upload-artifact/merge@v4
with:
name: psycopg2-artifacts
delete-merged: true
# }}}

View File

@ -1,79 +0,0 @@
name: Tests
env:
PIP_BREAK_SYSTEM_PACKAGES: "1"
on:
push:
pull_request:
jobs:
linux:
runs-on: ubuntu-latest
if: true
strategy:
fail-fast: false
matrix:
include:
- {python: "3.8", postgres: "12"}
- {python: "3.9", postgres: "13"}
- {python: "3.10", postgres: "14"}
- {python: "3.11", postgres: "15"}
- {python: "3.12", postgres: "16"}
- {python: "3.13", postgres: "17"}
# Opposite extremes of the supported Py/PG range, other architecture
- {python: "3.8", postgres: "17", architecture: "x86"}
- {python: "3.9", postgres: "16", architecture: "x86"}
- {python: "3.10", postgres: "15", architecture: "x86"}
- {python: "3.11", postgres: "14", architecture: "x86"}
- {python: "3.12", postgres: "13", architecture: "x86"}
- {python: "3.13", postgres: "12", architecture: "x86"}
env:
PSYCOPG2_TESTDB: postgres
PSYCOPG2_TESTDB_HOST: 127.0.0.1
PSYCOPG2_TESTDB_USER: postgres
PSYCOPG2_TESTDB_PASSWORD: password
services:
postgresql:
image: postgres:${{ matrix.postgres }}
env:
POSTGRES_PASSWORD: password
ports:
- 5432:5432
# Set health checks to wait until postgres has started
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
steps:
- uses: actions/checkout@v4
# Can enable to test an unreleased libpq version.
- name: install libpq 16
if: false
run: |
set -x
rel=$(lsb_release -c -s)
echo "deb http://apt.postgresql.org/pub/repos/apt ${rel}-pgdg main 16" \
| sudo tee -a /etc/apt/sources.list.d/pgdg.list
sudo apt-get -qq update
pqver=$(apt-cache show libpq5 | grep ^Version: | head -1 \
| awk '{print $2}')
sudo apt-get -qq -y install "libpq-dev=${pqver}" "libpq5=${pqver}"
- name: Install tox
run: pip install "tox < 4"
- uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python }}
- name: Run tests
env:
MATRIX_PYTHON: ${{ matrix.python }}
run: tox -e ${MATRIX_PYTHON%-dev}
timeout-minutes: 5

4
.gitignore vendored
View File

@ -6,7 +6,7 @@ MANIFEST
*.sw[po]
*.egg-info/
dist/*
/build
build/*
env
env?
.idea
@ -14,5 +14,3 @@ env?
.vscode/
/rel
/wheels
/packages
/wheelhouse

26
.travis.yml Normal file
View File

@ -0,0 +1,26 @@
# Travis CI configuration file for psycopg2
dist: xenial
sudo: required
language: python
matrix:
include:
- python: 2.7
- python: 3.7
- python: 3.6
- python: 3.5
- python: 3.4
dist: trusty
install:
- pip install -U pip setuptools wheel
- pip install .
- rm -rf psycopg2.egg-info
- sudo scripts/travis_prepare.sh
script:
- scripts/travis_test.sh
notifications:
email: false

View File

@ -1,4 +1,4 @@
Installation instructions are included in the docs.
Please check the 'doc/src/install.rst' file or online at
<https://www.psycopg.org/docs/install.html>.
<http://initd.org/psycopg/docs/install.html>.

View File

@ -42,7 +42,7 @@ endif
VERSION := $(shell grep PSYCOPG_VERSION setup.py | head -1 | sed -e "s/.*'\(.*\)'/\1/")
SDIST := dist/psycopg2-$(VERSION).tar.gz
.PHONY: check clean
.PHONY: env check clean
default: package

214
NEWS
View File

@ -1,204 +1,6 @@
Current release
---------------
What's new in psycopg 2.9.10
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- Add support for Python 3.13.
- Receive notifications on commit (:ticket:`#1728`).
- `~psycopg2.errorcodes` map and `~psycopg2.errors` classes updated to
PostgreSQL 17.
- Drop support for Python 3.7.
What's new in psycopg 2.9.9
^^^^^^^^^^^^^^^^^^^^^^^^^^^
- Add support for Python 3.12.
- Drop support for Python 3.6.
What's new in psycopg 2.9.8
^^^^^^^^^^^^^^^^^^^^^^^^^^^
- Wheel package bundled with PostgreSQL 16 libpq in order to add support for
recent features, such as ``sslcertmode``.
What's new in psycopg 2.9.7
^^^^^^^^^^^^^^^^^^^^^^^^^^^
- Fix propagation of exceptions raised during module initialization
(:ticket:`#1598`).
- Fix building when pg_config returns an empty string (:ticket:`#1599`).
- Wheel package bundled with OpenSSL 1.1.1v.
What's new in psycopg 2.9.6
^^^^^^^^^^^^^^^^^^^^^^^^^^^
- Package manylinux 2014 for aarch64 and ppc64le platforms, in order to
include libpq 15 in the binary package (:ticket:`#1396`).
- Wheel package bundled with OpenSSL 1.1.1t.
What's new in psycopg 2.9.5
^^^^^^^^^^^^^^^^^^^^^^^^^^^
- Add support for Python 3.11.
- Add support for rowcount in MERGE statements in binary packages
(:ticket:`#1497`).
- Wheel package bundled with OpenSSL 1.1.1r and PostgreSQL 15 libpq.
What's new in psycopg 2.9.4
^^^^^^^^^^^^^^^^^^^^^^^^^^^
- Fix `~psycopg2.extras.register_composite()`,
`~psycopg2.extras.register_range()` with customized :sql:`search_path`
(:ticket:`#1487`).
- Handle correctly composite types with names or in schemas requiring escape.
- Find ``pg_service.conf`` file in the ``/etc/postgresql-common`` directory in
binary packages (:ticket:`#1365`).
- `~psycopg2.errorcodes` map and `~psycopg2.errors` classes updated to
PostgreSQL 15.
- Wheel package bundled with OpenSSL 1.1.1q and PostgreSQL 14.4 libpq.
What's new in psycopg 2.9.3
^^^^^^^^^^^^^^^^^^^^^^^^^^^
- Alpine (musl) wheels now available (:ticket:`#1392`).
- macOS arm64 (Apple M1) wheels now available (:ticket:`1482`).
What's new in psycopg 2.9.2
^^^^^^^^^^^^^^^^^^^^^^^^^^^
- Raise `ValueError` for dates >= Y10k (:ticket:`#1307`).
- `~psycopg2.errorcodes` map and `~psycopg2.errors` classes updated to
PostgreSQL 14.
- Add preliminary support for Python 3.11 (:tickets:`#1376, #1386`).
- Wheel package bundled with OpenSSL 1.1.1l and PostgreSQL 14.1 libpq
(:ticket:`#1388`).
What's new in psycopg 2.9.1
^^^^^^^^^^^^^^^^^^^^^^^^^^^
- Fix regression with named `~psycopg2.sql.Placeholder` (:ticket:`#1291`).
What's new in psycopg 2.9
-------------------------
- ``with connection`` starts a transaction on autocommit transactions too
(:ticket:`#941`).
- Timezones with fractional minutes are supported on Python 3.7 and following
(:ticket:`#1272`).
- Escape table and column names in `~cursor.copy_from()` and
`~cursor.copy_to()`.
- Connection exceptions with sqlstate ``08XXX`` reclassified as
`~psycopg2.OperationalError` (a subclass of the previously used
`~psycopg2.DatabaseError`) (:ticket:`#1148`).
- Include library dirs required from libpq to work around MacOS build problems
(:ticket:`#1200`).
Other changes:
- Dropped support for Python 2.7, 3.4, 3.5 (:tickets:`#1198, #1000, #1197`).
- Dropped support for mx.DateTime.
- Use `datetime.timezone` objects by default in datetime objects instead of
`~psycopg2.tz.FixedOffsetTimezone`.
- The `psycopg2.tz` module is deprecated and scheduled to be dropped in the
next major release.
- Provide :pep:`599` wheels packages (manylinux2014 tag) for i686 and x86_64
platforms.
- Provide :pep:`600` wheels packages (manylinux_2_24 tag) for aarch64 and
ppc64le platforms.
- Wheel package bundled with OpenSSL 1.1.1k and PostgreSQL 13.3 libpq.
- Build system for Linux/MacOS binary packages moved to GitHub Actions.
What's new in psycopg 2.8.7
^^^^^^^^^^^^^^^^^^^^^^^^^^^
- Accept empty params as `~psycopg2.connect()` (:ticket:`#1250`).
- Fix attributes refcount in `Column` initialisation (:ticket:`#1252`).
- Allow re-initialisation of static variables in the C module (:ticket:`#1267`).
What's new in psycopg 2.8.6
^^^^^^^^^^^^^^^^^^^^^^^^^^^
- Fixed memory leak changing connection encoding to the current one
(:ticket:`#1101`).
- Fixed search of mxDateTime headers in virtualenvs (:ticket:`#996`).
- Added missing values from errorcodes (:ticket:`#1133`).
- `cursor.query` reports the query of the last :sql:`COPY` operation too
(:ticket:`#1141`).
- `~psycopg2.errorcodes` map and `~psycopg2.errors` classes updated to
PostgreSQL 13.
- Added wheel packages for ARM architecture (:ticket:`#1125`).
- Wheel package bundled with OpenSSL 1.1.1g.
What's new in psycopg 2.8.5
^^^^^^^^^^^^^^^^^^^^^^^^^^^
- Fixed use of `!connection_factory` and `!cursor_factory` together
(:ticket:`#1019`).
- Added support for `~logging.LoggerAdapter` in
`~psycopg2.extras.LoggingConnection` (:ticket:`#1026`).
- `~psycopg2.extensions.Column` objects in `cursor.description` can be sliced
(:ticket:`#1034`).
- Added AIX support (:ticket:`#1061`).
- Fixed `~copy.copy()` of `~psycopg2.extras.DictCursor` rows (:ticket:`#1073`).
What's new in psycopg 2.8.4
^^^^^^^^^^^^^^^^^^^^^^^^^^^
- Fixed building with Python 3.8 (:ticket:`#854`).
- Don't swallow keyboard interrupts on connect when a password is specified
in the connection string (:ticket:`#898`).
- Don't advance replication cursor when the message wasn't confirmed
(:ticket:`#940`).
- Fixed inclusion of ``time.h`` on linux (:ticket:`#951`).
- Fixed int overflow for large values in `~psycopg2.extensions.Column.table_oid`
and `~psycopg2.extensions.Column.type_code` (:ticket:`#961`).
- `~psycopg2.errorcodes` map and `~psycopg2.errors` classes updated to
PostgreSQL 12.
- Wheel package bundled with OpenSSL 1.1.1d and PostgreSQL at least 11.4.
What's new in psycopg 2.8.3
^^^^^^^^^^^^^^^^^^^^^^^^^^^
- Added *interval_status* parameter to
`~psycopg2.extras.ReplicationCursor.start_replication()` method and other
facilities to send automatic replication keepalives at periodic intervals
(:ticket:`#913`).
- Fixed namedtuples caching introduced in 2.8 (:ticket:`#928`).
What's new in psycopg 2.8.2
^^^^^^^^^^^^^^^^^^^^^^^^^^^
- Fixed `~psycopg2.extras.RealDictCursor` when there are repeated columns
(:ticket:`#884`).
- Binary packages built with openssl 1.1.1b. Should fix concurrency problems
(:tickets:`#543, #836`).
What's new in psycopg 2.8.1
^^^^^^^^^^^^^^^^^^^^^^^^^^^
- Fixed `~psycopg2.extras.RealDictRow` modifiability (:ticket:`#886`).
- Fixed "there's no async cursor" error polling a connection with no cursor
(:ticket:`#887`).
What's new in psycopg 2.8
-------------------------
@ -258,7 +60,7 @@ Other changes:
source files are now compatible with Python 2 & 3 as is.
- The `!psycopg2.test` package is no longer installed by ``python setup.py
install``.
- Wheel package bundled with OpenSSL 1.0.2r and PostgreSQL 11.2 libpq.
- Wheel package compiled against OpenSSL 1.0.2r and PostgreSQL 11.2 libpq.
What's new in psycopg 2.7.7
@ -266,14 +68,14 @@ What's new in psycopg 2.7.7
- Cleanup of the cursor results assignment code, which might have solved
double free and inconsistencies in concurrent usage (:tickets:`#346, #384`).
- Wheel package bundled with OpenSSL 1.0.2q.
- Wheel package compiled against OpenSSL 1.0.2q.
What's new in psycopg 2.7.6.1
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- Fixed binary package broken on OS X 10.12 (:ticket:`#807`).
- Wheel package bundled with PostgreSQL 11.1 libpq.
- Wheel package compiled against PostgreSQL 11.1 libpq.
What's new in psycopg 2.7.6
@ -290,7 +92,7 @@ What's new in psycopg 2.7.6
- `~psycopg2.extras.execute_values()` accepts `~psycopg2.sql.Composable`
objects (:ticket:`#794`).
- `~psycopg2.errorcodes` map updated to PostgreSQL 11.
- Wheel package bundled with PostgreSQL 10.5 libpq and OpenSSL 1.0.2p.
- Wheel package compiled against PostgreSQL 10.5 libpq and OpenSSL 1.0.2p.
What's new in psycopg 2.7.5
@ -304,7 +106,7 @@ What's new in psycopg 2.7.5
- Maybe fixed building on MSYS2 (as reported in :ticket:`#658`).
- Allow string subclasses in connection and other places (:ticket:`#679`).
- Don't raise an exception closing an unused named cursor (:ticket:`#716`).
- Wheel package bundled with PostgreSQL 10.4 libpq and OpenSSL 1.0.2o.
- Wheel package compiled against PostgreSQL 10.4 libpq and OpenSSL 1.0.2o.
What's new in psycopg 2.7.4
@ -326,7 +128,7 @@ What's new in psycopg 2.7.4
- Fixed `~cursor.rowcount` after `~cursor.executemany()` with :sql:`RETURNING`
statements (:ticket:`#633`).
- Fixed compatibility problem with pypy3 (:ticket:`#649`).
- Wheel packages bundled with PostgreSQL 10.1 libpq and OpenSSL 1.0.2n.
- Wheel packages compiled against PostgreSQL 10.1 libpq and OpenSSL 1.0.2n.
- Wheel packages for Python 2.6 no more available (support dropped from
wheel building infrastructure).
@ -334,7 +136,7 @@ What's new in psycopg 2.7.4
What's new in psycopg 2.7.3.2
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- Wheel package bundled with PostgreSQL 10.0 libpq and OpenSSL 1.0.2l
- Wheel package compiled against PostgreSQL 10.0 libpq and OpenSSL 1.0.2l
(:tickets:`#601, #602`).
@ -407,7 +209,7 @@ New features:
them together.
- Added `~psycopg2.__libpq_version__` and
`~psycopg2.extensions.libpq_version()` to inspect the version of the
``libpq`` library the module was bundled with
``libpq`` library the module was compiled/loaded with
(:tickets:`#35, #323`).
- The attributes `~connection.notices` and `~connection.notifies` can be
customized replacing them with any object exposing an `!append()` method

View File

@ -17,31 +17,17 @@ flexible objects adaptation system.
Psycopg 2 is both Unicode and Python 3 friendly.
.. Note::
The psycopg2 package is still widely used and actively maintained, but it
is not expected to receive new features.
`Psycopg 3`__ is the evolution of psycopg2 and is where `new features are
being developed`__: if you are starting a new project you should probably
start from 3!
.. __: https://pypi.org/project/psycopg/
.. __: https://www.psycopg.org/psycopg3/docs/index.html
Documentation
-------------
Documentation is included in the ``doc`` directory and is `available online`__.
.. __: https://www.psycopg.org/docs/
.. __: http://initd.org/psycopg/docs/
For any other resource (source code repository, bug tracker, mailing list)
please check the `project homepage`__.
.. __: https://psycopg.org/
Installation
------------
@ -70,11 +56,19 @@ production it is advised to use the package built from sources.
.. _PyPI: https://pypi.org/project/psycopg2/
.. _psycopg2-binary: https://pypi.org/project/psycopg2-binary/
.. _install: https://www.psycopg.org/docs/install.html#install-from-source
.. _faq: https://www.psycopg.org/docs/faq.html#faq-compile
.. _install: http://initd.org/psycopg/docs/install.html#install-from-source
.. _faq: http://initd.org/psycopg/docs/faq.html#faq-compile
:Build status: |gh-actions|
.. __: http://initd.org/psycopg/
.. |gh-actions| image:: https://github.com/psycopg/psycopg2/actions/workflows/tests.yml/badge.svg
:target: https://github.com/psycopg/psycopg2/actions/workflows/tests.yml
:alt: Build status
:Linux/OSX: |travis|
:Windows: |appveyor|
.. |travis| image:: https://travis-ci.org/psycopg/psycopg2.svg?branch=master
:target: https://travis-ci.org/psycopg/psycopg2
:alt: Linux and OSX build status
.. |appveyor| image:: https://ci.appveyor.com/api/projects/status/github/psycopg/psycopg2?branch=master&svg=true
:target: https://ci.appveyor.com/project/psycopg/psycopg2/branch/master
:alt: Windows build status

3
doc/.gitignore vendored
View File

@ -3,6 +3,3 @@ src/_build/*
html/*
psycopg2.txt
src/sqlstate_errors.rst
# Added by psycopg-website to customize published docs
src/_templates/layout.html

View File

@ -1,7 +1,7 @@
GNU LESSER GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.

View File

@ -1,4 +1,4 @@
.PHONY: env help clean html package doctest
.PHONY: env help clean html doctest
docs: html
@ -8,32 +8,33 @@ check: doctest
# It is not clean by 'make clean'
PYTHON := python$(PYTHON_VERSION)
PYTHON_VERSION ?= $(shell $(PYTHON) -c 'import sys; print("%d.%d" % sys.version_info[:2])')
BUILD_DIR = $(shell pwd)/../build/lib.$(PYTHON_VERSION)
PYTHON_VERSION ?= $(shell $(PYTHON) -c 'import sys; print ("%d.%d" % sys.version_info[:2])')
SPHINXBUILD ?= $$(pwd)/env/bin/sphinx-build
SPHOPTS = SPHINXBUILD=$(SPHINXBUILD)
html: package src/sqlstate_errors.rst
html: src/sqlstate_errors.rst
$(MAKE) PYTHON=$(PYTHON) -C .. package
$(MAKE) $(SPHOPTS) -C src $@
cp -r src/_build/html .
src/sqlstate_errors.rst: ../psycopg/sqlstate_errors.h $(BUILD_DIR)
./env/bin/python src/tools/make_sqlstate_docs.py $< > $@
$(BUILD_DIR):
$(MAKE) PYTHON=$(PYTHON) -C .. package
src/sqlstate_errors.rst: ../psycopg/sqlstate_errors.h
env/bin/python src/tools/make_sqlstate_docs.py $< > $@
doctest:
$(MAKE) PYTHON=$(PYTHON) -C .. package
$(MAKE) $(SPHOPTS) -C src $@
upload:
# this command requires ssh configured to the proper target
tar czf - -C html . | ssh psycoweb tar xzvf - -C docs/current
clean:
$(MAKE) $(SPHOPTS) -C src $@
rm -rf html src/sqlstate_errors.rst
env: requirements.txt
$(PYTHON) -m venv env
virtualenv -p $(PYTHON) env
./env/bin/pip install -r requirements.txt
echo "$$(pwd)/../build/lib.$(PYTHON_VERSION)" \
> env/lib/python$(PYTHON_VERSION)/site-packages/psycopg.pth

View File

@ -6,7 +6,7 @@ introspection, so you will need the same prerequisites_. The only extra
prerequisite is virtualenv_: the packages needed to build the docs will be
installed when building the env.
.. _prerequisites: https://www.psycopg.org/docs/install.html#install-from-source
.. _prerequisites: http://initd.org/psycopg/docs/install.html#install-from-source
.. _virtualenv: https://virtualenv.pypa.io/en/latest/
Build the env once with::

View File

@ -13,46 +13,82 @@ How to make a psycopg2 release
In the rest of this document we assume you have exported the version number
into an environment variable, e.g.::
$ export VERSION=2.8.4
$ export VERSION=2.7
- Push psycopg2 to master or to the maint branch. Make sure tests on `GitHub
Actions`__.
- In the `Travis settings`__ you may want to be sure that the variables
``TEST_PAST`` and ``TEST_FUTURE`` are set to a nonzero string to check all
the supported postgres version.
.. __: https://github.com/psycopg/psycopg2/actions/workflows/tests.yml
.. __: https://travis-ci.org/psycopg/psycopg2/settings
- Push psycopg2 to master or to the maint branch. Make sure tests on Travis__
and AppVeyor__ pass.
.. __: https://travis-ci.org/psycopg/psycopg2
.. __: https://ci.appveyor.com/project/psycopg/psycopg2
- For an extra test merge or rebase the `test_i686`__ branch on the commit to
release and push it too: this will test with Python 32 bits and debug
versions.
.. __: https://github.com/psycopg/psycopg2/tree/test_i686
- Create a signed tag with the content of the relevant NEWS bit and push it.
E.g.::
# Tag name will be 2_8_4
$ git tag -a -s ${VERSION//\./_}
$ git tag -a -s 2_7
Psycopg 2.8.4 released
Psycopg 2.7 released
What's new in psycopg 2.8.4
---------------------------
What's new in psycopg 2.7
-------------------------
New features:
- Fixed bug blah (:ticket:`#42`).
- Added `~psycopg2.sql` module to generate SQL dynamically (:ticket:`#308`).
...
- Create the packages:
- Update the `psycopg2-wheels`_ submodule to the tag version and push. This
will build the packages on `Travis CI`__ and `AppVeyor`__ and upload them to
the `initd.org upload`__ dir.
- On GitHub Actions run manually a `package build workflow`__.
.. _psycopg2-wheels: https://github.com/psycopg/psycopg2-wheels
.. __: https://travis-ci.org/psycopg/psycopg2-wheels
.. __: https://ci.appveyor.com/project/psycopg/psycopg2-wheels
.. __: http://initd.org/psycopg/upload/
.. __: https://github.com/psycopg/psycopg2/actions/workflows/packages.yml
- Download the packages generated (this assumes ssh configured properly)::
- When the workflows have finished download the packages from the job
artifacts.
$ rsync -arv initd-upload:psycopg2-${VERSION} .
- Only for stable packages: upload the signed packages on PyPI::
- Sign the packages and upload the signatures back::
$ twine upload -s wheelhouse/psycopg2-${VERSION}/*
$ for f in psycopg2-${VERSION}/*.{exe,tar.gz,whl}; do \
gpg --armor --detach-sign $f;
done
$ rsync -arv psycopg2-${VERSION} initd-upload:
- Run the ``copy-tarball.sh`` script on the server to copy the uploaded files
in the `tarballs`__ dir::
$ ssh psycoweb@initd.org copy-tarball.sh ${VERSION}
.. __: http://initd.org/psycopg/tarballs/
- Remove the ``.exe`` from the dir, because we don't want to upload them on
PyPI::
$ rm -v psycopg2-${VERSION}/*.exe{,.asc}
- Only for stable packages: upload the packages and signatures on PyPI::
$ twine upload psycopg2-${VERSION}/*
- Create a release and release notes in the psycopg website, announce to
psycopg and pgsql-announce mailing lists.
- Edit ``setup.py`` changing the version again (e.g. go to ``2.8.5.dev0``).
- Edit ``setup.py`` changing the version again (e.g. go to ``2.7.1.dev0``).
Releasing test packages
@ -60,7 +96,7 @@ Releasing test packages
Test packages may be uploaded on the `PyPI testing site`__ using::
$ twine upload -s -r testpypi wheelhouse/psycopg2-${VERSION}/*
$ twine upload -r testpypi psycopg2-${VERSION}/*
assuming `proper configuration`__ of ``~/.pypirc``.

View File

@ -1,2 +0,0 @@
Sphinx
sphinx-better-theme

View File

@ -1,50 +1,3 @@
#
# This file is autogenerated by pip-compile with Python 3.10
# by the following command:
#
# pip-compile requirements.in
#
alabaster==0.7.13
# via sphinx
babel==2.12.1
# via sphinx
certifi>=2023.7.22
# via requests
charset-normalizer==3.1.0
# via requests
docutils==0.19
# via sphinx
idna==3.4
# via requests
imagesize==1.4.1
# via sphinx
jinja2==3.1.2
# via sphinx
markupsafe==2.1.2
# via jinja2
packaging==23.1
# via sphinx
pygments==2.15.0
# via sphinx
requests==2.31.0
# via sphinx
snowballstemmer==2.2.0
# via sphinx
sphinx==6.1.3
# via -r requirements.in
sphinx-better-theme==0.1.5
# via -r requirements.in
sphinxcontrib-applehelp==1.0.4
# via sphinx
sphinxcontrib-devhelp==1.0.2
# via sphinx
sphinxcontrib-htmlhelp==2.0.1
# via sphinx
sphinxcontrib-jsmath==1.0.1
# via sphinx
sphinxcontrib-qthelp==1.0.3
# via sphinx
sphinxcontrib-serializinghtml==1.1.5
# via sphinx
urllib3==1.26.17
# via requests
# Packages only needed to build the docs
Pygments>=2.2,<2.3
Sphinx>=1.6,<=1.7

View File

@ -1,3 +1,5 @@
@import url("classic.css");
blockquote {
font-style: italic;
}
@ -35,102 +37,3 @@ dl.faq dt {
table.data-types div.line-block {
margin-bottom: 0;
}
/* better theme customisation */
body {
background-color: #216464;
}
header, .related, .document, footer {
background-color: white;
}
header h1 {
font-size: 150%;
margin-bottom: 0;
padding: 0.5rem 10px 0.5rem 10px;
}
h1, h2, h3 {
font-weight: normal;
}
.body h1, .body h2, .body h3 {
color: #074848;
}
h1 {
font-size: 200%;
}
h2 {
font-size: 160%;
}
h3 {
font-size: 140%;
}
footer#pagefooter {
margin-bottom: 1rem;
font-size: 85%;
color: #444;
}
#rellinks, #breadcrumbs {
padding-right: 10px;
padding-left: 10px;
}
.sphinxsidebar {
padding-left: 10px;
}
.bodywrapper {
padding-right: 10px;
}
div.body h1, div.body h2, div.body h3 {
background-color: #f2f2f2;
border-bottom: 1px solid #d0d0d0;
}
div.body p.rubric {
border-bottom: 1px solid #d0d0d0;
}
body .sphinxsidebar .search {
margin-top: 0;
}
html pre {
background-color: #efc;
border: 1px solid #ac9;
border-left: none;
border-right: none;
}
a, a:visited {
color: #0b6868;
}
th {
background-color: #ede;
}
code.xref, a code {
font-weight: bold;
}
code.descname {
font-weight: bold;
font-size: 120%;
}
@media (max-width: 820px) {
body {
background-color: white;
}
}

View File

@ -1,6 +0,0 @@
{# Add a title over the search box #}
{%- if pagename != "search" %}
<h3>Quick search</h3>
{%- include "!searchbox.html" %}
{%- endif %}

View File

@ -226,7 +226,7 @@ read:
>>> cur.execute("SELECT '(10.2,20.3)'::point")
>>> point = cur.fetchone()[0]
>>> print(type(point), point.x, point.y)
>>> print type(point), point.x, point.y
<class 'Point'> 10.2 20.3
A typecaster created by `!new_type()` can be also used with
@ -284,15 +284,15 @@ something to read::
curs = conn.cursor()
curs.execute("LISTEN test;")
print("Waiting for notifications on channel 'test'")
print "Waiting for notifications on channel 'test'"
while True:
if select.select([conn],[],[],5) == ([],[],[]):
print("Timeout")
print "Timeout"
else:
conn.poll()
while conn.notifies:
notify = conn.notifies.pop(0)
print("Got NOTIFY:", notify.pid, notify.channel, notify.payload)
print "Got NOTIFY:", notify.pid, notify.channel, notify.payload
Running the script and executing a command such as :sql:`NOTIFY test, 'hello'`
in a separate :program:`psql` shell, the output may look similar to:
@ -490,7 +490,7 @@ resources about the topic.
.. _Eventlet: https://eventlet.net/
.. _gevent: http://www.gevent.org/
.. _SQLAlchemy: https://www.sqlalchemy.org/
.. _psycogreen: https://github.com/psycopg/psycogreen/
.. _psycogreen: http://bitbucket.org/dvarrazzo/psycogreen/
.. __: https://www.postgresql.org/docs/current/static/libpq-async.html
.. warning::
@ -552,7 +552,8 @@ value greater than zero in ``postgresql.conf`` (these changes require a server
restart). Create a database ``psycopg2_test``.
Then run the following code to quickly try the replication support out. This
is not production code -- it's only intended as a simple demo of logical
is not production code -- it has no error handling, it sends feedback too
often, etc. -- and it's only intended as a simple demo of logical
replication::
from __future__ import print_function

View File

@ -1,3 +1,4 @@
# -*- coding: utf-8 -*-
#
# Psycopg documentation build configuration file, created by
# sphinx-quickstart on Sun Feb 7 13:48:41 2010.
@ -10,9 +11,7 @@
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
from better import better_theme_path
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
@ -23,16 +22,11 @@ sys.path.append(os.path.abspath('tools/lib'))
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.ifconfig',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
]
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.ifconfig',
'sphinx.ext.doctest', 'sphinx.ext.intersphinx' ]
# Specific extensions for Psycopg documentation.
extensions += ['dbapi_extension', 'sql_role', 'ticket_role']
extensions += [ 'dbapi_extension', 'sql_role', 'ticket_role' ]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
@ -41,16 +35,14 @@ templates_path = ['_templates']
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8'
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Psycopg'
copyright = (
'2001-2021, Federico Di Gregorio, Daniele Varrazzo, The Psycopg Team'
)
project = u'Psycopg'
copyright = u'2001-2019, Federico Di Gregorio, Daniele Varrazzo'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
@ -62,14 +54,15 @@ version = '2.0'
# The full version, including alpha/beta/rc tags.
try:
import psycopg2
release = psycopg2.__version__.split()[0]
version = '.'.join(release.split('.')[:2])
except ImportError:
print("WARNING: couldn't import psycopg to read version.")
release = version
else:
release = psycopg2.__version__.split()[0]
version = '.'.join(release.split('.')[:2])
intersphinx_mapping = {'py': ('https://docs.python.org/3', None)}
intersphinx_mapping = {
'py': ('https://docs.python.org/3', None),
}
# Pattern to generate links to the bug tracker
ticket_url = 'https://github.com/psycopg/psycopg2/issues/%s'
@ -78,16 +71,16 @@ ticket_remap_offset = 230
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
#today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
# unused_docs = []
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
@ -97,15 +90,15 @@ exclude_trees = ['_build', 'html']
default_role = 'obj'
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
#show_authors = False
# Using 'python' instead of the default gives warnings if parsing an example
# fails, instead of defaulting to none
@ -115,7 +108,7 @@ highlight_language = 'python'
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
#modindex_common_prefix = []
# Include TODO items in the documentation
todo_include_todos = False
@ -128,6 +121,8 @@ rst_epilog = """
.. _transaction isolation level:
https://www.postgresql.org/docs/current/static/transaction-iso.html
.. _mx.DateTime: https://www.egenix.com/products/python/mxBase/mxDateTime/
.. |MVCC| replace:: :abbr:`MVCC (Multiversion concurrency control)`
"""
@ -135,41 +130,35 @@ rst_epilog = """
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'better'
html_theme = 'classic'
# The stylesheet to use with HTML output: this will include the original one
# adding a few classes.
# html_style = 'psycopg.css'
# Hide the sphinx footer
html_show_sphinx = False
html_style = 'psycopg.css'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'linktotheme': False,
'cssfiles': ['_static/psycopg.css'],
}
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [better_theme_path]
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'Home'
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
@ -178,41 +167,38 @@ html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# no need for the prev/next topic link using better theme: they are on top
html_sidebars = {
'**': ['localtoc.html', 'searchbox.html'],
}
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
#html_additional_pages = {}
# If false, no module index is generated.
# html_use_modindex = True
#html_use_modindex = True
# If false, no index is generated.
# html_use_index = True
#html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
#html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'psycopgdoc'
@ -221,41 +207,35 @@ htmlhelp_basename = 'psycopgdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
(
'index',
'psycopg.tex',
'Psycopg Documentation',
'Federico Di Gregorio',
'manual',
)
('index', 'psycopg.tex', u'Psycopg Documentation',
u'Federico Di Gregorio', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
#latex_appendices = []
# If false, no module index is generated.
# latex_use_modindex = True
#latex_use_modindex = True
toc_object_entries = False
doctest_global_setup = """

View File

@ -21,28 +21,6 @@ The ``connection`` class
Connections are thread safe and can be shared among many threads. See
:ref:`thread-safety` for details.
Connections can be used as context managers. Note that a context wraps a
transaction: if the context exits with success the transaction is
committed, if it exits with an exception the transaction is rolled back.
Note that the connection is not closed by the context and it can be used
for several contexts.
.. code:: python
conn = psycopg2.connect(DSN)
with conn:
with conn.cursor() as curs:
curs.execute(SQL1)
with conn:
with conn.cursor() as curs:
curs.execute(SQL2)
# leaving contexts doesn't close the connection
conn.close()
.. method:: cursor(name=None, cursor_factory=None, scrollable=None, withhold=False)
Return a new `cursor` object using the connection.
@ -139,7 +117,7 @@ The ``connection`` class
with a `~connection.commit()`/`~connection.rollback()` before
closing.
.. _PgBouncer: http://www.pgbouncer.org/
.. _PgBouncer: http://pgbouncer.projects.postgresql.org/
.. index::
@ -661,7 +639,7 @@ The ``connection`` class
:param new_oid: Create a new object using the specified OID. The
function raises `~psycopg2.OperationalError` if the OID is already
in use. Default is 0, meaning assign a new one automatically.
:param new_file: The name of a file to be imported in the database
:param new_file: The name of a file to be imported in the the database
(using the |lo_import|_ function)
:param lobject_factory: Subclass of
`~psycopg2.extensions.lobject` to be instantiated.
@ -746,7 +724,6 @@ The ``connection`` class
raw connection structure to C functions, e.g. via `ctypes`::
>>> import ctypes
>>> import ctypes.util
>>> libpq = ctypes.pydll.LoadLibrary(ctypes.util.find_library('pq'))
>>> libpq.PQserverVersion.argtypes = [ctypes.c_void_p]
>>> libpq.PQserverVersion.restype = ctypes.c_int
@ -772,7 +749,7 @@ The ``connection`` class
.. rubric:: informative methods of the native connection
.. note::
.. note::
These methods are better accessed using the `~connection.info`
attributes and may be dropped in future versions.
@ -845,10 +822,8 @@ The ``connection`` class
Also available as `~connection.info`\ `!.`\
`~psycopg2.extensions.ConnectionInfo.backend_pid`.
Returns the process ID (PID) of the backend server process *you
connected to*. Note that if you use a connection pool service such as
PgBouncer_ this value will not be updated if your connection is
switched to a different backend.
Returns the process ID (PID) of the backend server process handling
this connection.
Note that the PID belongs to a process executing on the database
server host, not the local host!

View File

@ -34,16 +34,6 @@ The ``cursor`` class
many cursors from the same connection and should use each cursor from
a single thread. See :ref:`thread-safety` for details.
Cursors can be used as context managers: leaving the context will close
the cursor.
.. code:: python
with conn.cursor() as curs:
curs.execute(SQL)
# the cursor is now closed
.. attribute:: description
@ -124,7 +114,7 @@ The ``cursor`` class
.. attribute:: name
Read-only attribute containing the name of the cursor if it was
created as named cursor by `connection.cursor()`, or `!None` if
creates as named cursor by `connection.cursor()`, or `!None` if
it is a client side cursor. See :ref:`server-side-cursors`.
.. extension::
@ -208,14 +198,6 @@ The ``cursor`` class
Parameters are bounded to the query using the same rules described in
the `~cursor.execute()` method.
.. code:: python
>>> nums = ((1,), (5,), (10,))
>>> cur.executemany("INSERT INTO test (num) VALUES (%s)", nums)
>>> tuples = ((123, "foo"), (42, "bar"), (23, "baz"))
>>> cur.executemany("INSERT INTO test (num, data) VALUES (%s, %s)", tuples)
.. warning::
In its current implementation this method is not faster than
executing `~cursor.execute()` in a loop. For better performance
@ -240,16 +222,6 @@ The ``cursor`` class
.. versionchanged:: 2.7
added support for named arguments.
.. note::
`!callproc()` can only be used with PostgreSQL functions__, not
with the procedures__ introduced in PostgreSQL 11, which require
the :sql:`CALL` statement to run. Please use a normal
`execute()` to run them.
.. __: https://www.postgresql.org/docs/current/sql-createfunction.html
.. __: https://www.postgresql.org/docs/current/sql-createprocedure.html
.. method:: mogrify(operation [, parameters])
Return a query string after arguments binding. The string returned is
@ -292,7 +264,7 @@ The ``cursor`` class
>>> cur.execute("SELECT * FROM test;")
>>> for record in cur:
... print(record)
... print record
...
(1, 100, "abc'def")
(2, None, 'dada')
@ -516,10 +488,8 @@ The ``cursor`` class
The time zone factory used to handle data types such as
:sql:`TIMESTAMP WITH TIME ZONE`. It should be a `~datetime.tzinfo`
object. Default is `datetime.timezone`.
.. versionchanged:: 2.9
previosly the default factory was `psycopg2.tz.FixedOffsetTimezone`.
object. A few implementations are available in the `psycopg2.tz`
module.
.. method:: nextset()
@ -570,6 +540,13 @@ The ``cursor`` class
>>> cur.fetchall()
[(6, 42, 'foo'), (7, 74, 'bar')]
.. note:: the name of the table is not quoted: if the table name
contains uppercase letters or special characters it must be quoted
with double quotes::
cur.copy_from(f, '"TABLE"')
.. versionchanged:: 2.0.6
added the *columns* parameter.
@ -578,11 +555,6 @@ The ``cursor`` class
are encoded in the connection `~connection.encoding` when sent to
the backend.
.. versionchanged:: 2.9
the table and fields names are now quoted. If you need to specify
a schema-qualified table please use `copy_expert()`.
.. method:: copy_to(file, table, sep='\\t', null='\\\\N', columns=None)
Write the content of the table named *table* *to* the file-like
@ -604,6 +576,12 @@ The ``cursor`` class
2|\N|dada
...
.. note:: the name of the table is not quoted: if the table name
contains uppercase letters or special characters it must be quoted
with double quotes::
cur.copy_to(f, '"TABLE"')
.. versionchanged:: 2.0.6
added the *columns* parameter.
@ -612,10 +590,6 @@ The ``cursor`` class
are decoded in the connection `~connection.encoding` when read
from the backend.
.. versionchanged:: 2.9
the table and fields names are now quoted. If you need to specify
a schema-qualified table please use `copy_expert()`.
.. method:: copy_expert(sql, file, size=8192)

View File

@ -50,7 +50,7 @@ An example of the available constants defined in the module:
'42P01'
Constants representing all the error values defined by PostgreSQL versions
between 8.1 and 15 are included in the module.
between 8.1 and 11 are included in the module.
.. autofunction:: lookup(code)

View File

@ -10,21 +10,11 @@
.. versionadded:: 2.8
.. versionchanged:: 2.8.4 added errors introduced in PostgreSQL 12
.. versionchanged:: 2.8.6 added errors introduced in PostgreSQL 13
.. versionchanged:: 2.9.2 added errors introduced in PostgreSQL 14
.. versionchanged:: 2.9.4 added errors introduced in PostgreSQL 15
.. versionchanged:: 2.9.10 added errors introduced in PostgreSQL 17
This module exposes the classes psycopg raises upon receiving an error from
the database with a :sql:`SQLSTATE` value attached (available in the
`~psycopg2.Error.pgcode` attribute). The content of the module is generated
from the PostgreSQL source code and includes classes for every error defined
by PostgreSQL in versions between 9.1 and 15.
by PostgreSQL in versions between 9.1 and 11.
Every class in the module is named after what referred as "condition name" `in
the documentation`__, converted to CamelCase: e.g. the error 22012,

View File

@ -413,9 +413,9 @@ deal with Python objects adaptation:
.. method:: getquoted()
Return the string enclosed in single quotes. Any single quote appearing
in the string is escaped by doubling it according to SQL string
constants syntax. Backslashes are escaped too.
Return the string enclosed in single quotes. Any single quote
appearing in the the string is escaped by doubling it according to SQL
string constants syntax. Backslashes are escaped too.
>>> QuotedString(r"O'Reilly").getquoted()
"'O''Reilly'"
@ -453,6 +453,13 @@ deal with Python objects adaptation:
Specialized adapters for Python datetime objects.
.. class:: DateFromMx
TimeFromMx
TimestampFromMx
IntervalFromMx
Specialized adapters for `mx.DateTime`_ objects.
.. data:: adapters
Dictionary of the currently registered object adapters. Use
@ -751,8 +758,8 @@ methods. The level can be set to one of the following constants:
.. data:: ISOLATION_LEVEL_READ_COMMITTED
This is usually the default PostgreSQL value, but a different default may
be set in the database configuration.
This is usually the the default PostgreSQL value, but a different default
may be set in the database configuration.
A new transaction is started at the first `~cursor.execute()` command on a
cursor and at each new `!execute()` after a `~connection.commit()` or a
@ -997,6 +1004,20 @@ from the database. See :ref:`unicode-handling` for details.
Typecasters to convert time-related data types to Python `!datetime`
objects.
.. data:: MXDATE
MXDATETIME
MXDATETIMETZ
MXINTERVAL
MXTIME
MXDATEARRAY
MXDATETIMEARRAY
MXDATETIMETZARRAY
MXINTERVALARRAY
MXTIMEARRAY
Typecasters to convert time-related data types to `mx.DateTime`_ objects.
Only available if Psycopg was compiled with `!mx` support.
.. versionchanged:: 2.2
previously the `DECIMAL` typecaster and the specific time-related
typecasters (`!PY*` and `!MX*`) were not exposed by the `extensions`

View File

@ -41,8 +41,8 @@ If you want to use a `!connection` subclass you can pass it as the
Dictionary-like cursor
^^^^^^^^^^^^^^^^^^^^^^
The dict cursors allow to access to the attributes of retrieved records
using an interface similar to the Python dictionaries instead of the tuples.
The dict cursors allow to access to the retrieved records using an interface
similar to the Python dictionaries instead of the tuples.
>>> dict_cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
>>> dict_cur.execute("INSERT INTO test (num, data) VALUES(%s, %s)",
@ -270,7 +270,7 @@ The individual messages in the replication stream are represented by
Replication slots are a feature of PostgreSQL server starting with
version 9.4.
.. method:: start_replication(slot_name=None, slot_type=None, start_lsn=0, timeline=0, options=None, decode=False, status_interval=10)
.. method:: start_replication(slot_name=None, slot_type=None, start_lsn=0, timeline=0, options=None, decode=False)
Start replication on the connection.
@ -288,7 +288,6 @@ The individual messages in the replication stream are represented by
slot (not allowed with physical replication)
:param decode: a flag indicating that unicode conversion should be
performed on messages received from the server
:param status_interval: time between feedback packets sent to the server
If a *slot_name* is specified, the slot must exist on the server and
its type must match the replication type used.
@ -329,14 +328,6 @@ The individual messages in the replication stream are represented by
*This parameter should not be set with physical replication or with
logical replication plugins that produce binary output.*
Replication stream should periodically send feedback to the database
to prevent disconnect via timeout. Feedback is automatically sent when
`read_message()` is called or during run of the `consume_stream()`.
To specify the feedback interval use *status_interval* parameter.
The value of this parameter must be set to at least 1 second, but
it can have a fractional part.
This function constructs a |START_REPLICATION|_ command and calls
`start_replication_expert()` internally.
@ -345,13 +336,10 @@ The individual messages in the replication stream are represented by
`read_message()` in case of :ref:`asynchronous connection
<async-support>`.
.. versionchanged:: 2.8.3
added the *status_interval* parameter.
.. |START_REPLICATION| replace:: :sql:`START_REPLICATION`
.. _START_REPLICATION: https://www.postgresql.org/docs/current/static/protocol-replication.html
.. method:: start_replication_expert(command, decode=False, status_interval=10)
.. method:: start_replication_expert(command, decode=False)
Start replication on the connection using provided
|START_REPLICATION|_ command.
@ -360,13 +348,9 @@ The individual messages in the replication stream are represented by
`~psycopg2.sql.Composable` instance for dynamic generation.
:param decode: a flag indicating that unicode conversion should be
performed on messages received from the server.
:param status_interval: time between feedback packets sent to the server
.. versionchanged:: 2.8.3
added the *status_interval* parameter.
.. method:: consume_stream(consume, keepalive_interval=None)
.. method:: consume_stream(consume, keepalive_interval=10)
:param consume: a callable object with signature :samp:`consume({msg})`
:param keepalive_interval: interval (in seconds) to send keepalive
@ -389,15 +373,14 @@ The individual messages in the replication stream are represented by
`ReplicationMessage` class. See `read_message()` for details about
message decoding.
This method also sends feedback messages to the server every
*keepalive_interval* (in seconds). The value of this parameter must
This method also sends keepalive messages to the server in case there
were no new data from the server for the duration of
*keepalive_interval* (in seconds). The value of this parameter must
be set to at least 1 second, but it can have a fractional part.
If the *keepalive_interval* is not specified, the value of
*status_interval* specified in the `start_replication()` or
`start_replication_expert()` will be used.
The client must confirm every processed message by calling
`send_feedback()` method on the corresponding replication cursor. A
After processing certain amount of messages the client should send a
confirmation message to the server. This should be done by calling
`send_feedback()` method on the corresponding replication cursor. A
reference to the cursor is provided in the `ReplicationMessage` as an
attribute.
@ -410,7 +393,9 @@ The individual messages in the replication stream are represented by
def __call__(self, msg):
self.process_message(msg.payload)
msg.cursor.send_feedback(flush_lsn=msg.data_start)
if self.should_send_feedback(msg):
msg.cursor.send_feedback(flush_lsn=msg.data_start)
consumer = LogicalStreamConsumer()
cur.consume_stream(consumer)
@ -423,10 +408,12 @@ The individual messages in the replication stream are represented by
retains all the WAL segments that might be needed to stream the
changes via all of the currently open replication slots.
.. versionchanged:: 2.8.3
changed the default value of the *keepalive_interval* parameter to `!None`.
On the other hand, it is not recommended to send confirmation
after *every* processed message, since that will put an
unnecessary load on network and the server. A possible strategy
is to confirm after every COMMIT message.
.. method:: send_feedback(write_lsn=0, flush_lsn=0, apply_lsn=0, reply=False, force=False)
.. method:: send_feedback(write_lsn=0, flush_lsn=0, apply_lsn=0, reply=False)
:param write_lsn: a LSN position up to which the client has written the data locally
:param flush_lsn: a LSN position up to which the client has processed the
@ -436,21 +423,13 @@ The individual messages in the replication stream are represented by
has applied the changes (physical replication
master-slave protocol only)
:param reply: request the server to send back a keepalive message immediately
:param force: force sending a feedback message regardless of status_interval timeout
Use this method to report to the server that all messages up to a
certain LSN position have been processed on the client and may be
discarded on the server.
If the *reply* or *force* parameters are not set, this method will
just update internal structures without sending the feedback message
to the server. The library sends feedback message automatically
when *status_interval* timeout is reached. For this to work, you must
call `send_feedback()` on the same Cursor that you called `start_replication()`
on (the one in `message.cursor`) or your feedback will be lost.
.. versionchanged:: 2.8.3
added the *force* parameter.
This method can also be called with all default parameters' values to
just send a keepalive message to the server.
Low-level replication cursor methods for :ref:`asynchronous connection
<async-support>` operation.
@ -484,9 +463,9 @@ The individual messages in the replication stream are represented by
corresponding connection to block the process until there is more data
from the server.
Last, but not least, this method sends feedback messages when
*status_interval* timeout is reached or when keepalive message with
reply request arrived from the server.
The server can send keepalive messages to the client periodically.
Such messages are silently consumed by this method and are never
reported to the caller.
.. method:: fileno()
@ -502,13 +481,6 @@ The individual messages in the replication stream are represented by
communication with the server (a data or keepalive message in either
direction).
.. attribute:: feedback_timestamp
A `~datetime` object representing the timestamp at the moment when
the last feedback message sent to the server.
.. versionadded:: 2.8.3
.. attribute:: wal_end
LSN position of the current end of WAL on the server at the
@ -524,21 +496,33 @@ The individual messages in the replication stream are represented by
def consume(msg):
# ...
msg.cursor.send_feedback(flush_lsn=msg.data_start)
status_interval = 10.0
keepalive_interval = 10.0
while True:
msg = cur.read_message()
if msg:
consume(msg)
else:
now = datetime.now()
timeout = status_interval - (now - cur.feedback_timestamp).total_seconds()
timeout = keepalive_interval - (now - cur.io_timestamp).total_seconds()
try:
sel = select([cur], [], [], max(0, timeout))
if not any(sel):
cur.send_feedback() # timed out, send keepalive message
except InterruptedError:
pass # recalculate timeout and continue
.. warning::
The :samp:`consume({msg})` function will only be called when there are new
database writes on the server e.g. any DML or DDL statement. Depending on
your Postgres cluster configuration this might cause the server to run out
of disk space if the writes are too far apart. To prevent this from
happening you can use `~ReplicationCursor.wal_end` value to periodically
send feedback to the server to notify that your replication client has
received and processed all the messages.
.. index::
pair: Cursor; Replication
@ -645,13 +629,6 @@ want to convert the float values from :sql:`json` into
loads = lambda x: json.loads(x, parse_float=Decimal)
psycopg2.extras.register_json(conn, loads=loads)
Or, if you want to use an alternative JSON module implementation, such as the
faster UltraJSON_, you can use::
psycopg2.extras.register_default_json(loads=ujson.loads, globally=True)
psycopg2.extras.register_default_jsonb(loads=ujson.loads, globally=True)
.. _UltraJSON: https://pypi.org/project/ujson/
.. autoclass:: Json
@ -1020,7 +997,7 @@ Fast execution helpers
The current implementation of `~cursor.executemany()` is (using an extremely
charitable understatement) not particularly performing. These functions can
be used to speed up the repeated execution of a statement against a set of
be used to speed up the repeated execution of a statement againts a set of
parameters. By reducing the number of server roundtrips the performance can be
`orders of magnitude better`__ than using `!executemany()`.
@ -1029,14 +1006,6 @@ parameters. By reducing the number of server roundtrips the performance can be
.. autofunction:: execute_batch
.. code:: python
>>> nums = ((1,), (5,), (10,))
>>> execute_batch(cur, "INSERT INTO test (num) VALUES (%s)", nums)
>>> tuples = ((123, "foo"), (42, "bar"), (23, "baz"))
>>> execute_batch(cur, "INSERT INTO test (num, data) VALUES (%s, %s)", tuples)
.. versionadded:: 2.7
.. note::

View File

@ -7,30 +7,6 @@ Here are a few gotchas you may encounter using `psycopg2`. Feel free to
suggest new entries!
Meta
----
.. _faq-question:
.. cssclass:: faq
How do I ask a question?
- Have you first checked if your question is answered already in the
documentation?
- If your question is about installing psycopg, have you checked the
:ref:`install FAQ <faq-compile>` and the :ref:`install docs
<installation>`?
- Have you googled for your error message?
- If you haven't found an answer yet, please write to the `Mailing List`_.
- If you haven't found a bug, DO NOT write to the bug tracker to ask
questions. You will only get piro grumpy.
.. _mailing list: https://www.postgresql.org/list/psycopg/
.. _faq-transactions:
Problems with transactions handling
@ -180,7 +156,7 @@ Psycopg automatically converts PostgreSQL :sql:`json` data into Python objects.
Psycopg converts :sql:`json` values into Python objects but :sql:`jsonb` values are returned as strings. Can :sql:`jsonb` be converted automatically?
Automatic conversion of :sql:`jsonb` values is supported from Psycopg
release 2.5.4. For previous versions you can register the :sql:`json`
typecaster on the :sql:`jsonb` oids (which are known and not supposed to
typecaster on the :sql:`jsonb` oids (which are known and not suppsed to
change in future PostgreSQL versions)::
psycopg2.extras.register_json(oid=3802, array_oid=3807, globally=True)
@ -271,7 +247,7 @@ When should I save and re-use a connection as opposed to creating a new one as n
What are the advantages or disadvantages of using named cursors?
The only disadvantages is that they use up resources on the server and
that there is a little overhead because at least two queries (one to
that there is a little overhead because a at least two queries (one to
create the cursor and one to fetch the initial result set) are issued to
the backend. The advantage is that data is fetched one chunk at a time:
using small `~cursor.fetchmany()` values it is possible to use very
@ -292,7 +268,7 @@ How do I interrupt a long-running query in an interactive shell?
can handle a :kbd:`Ctrl-C` correctly. For previous versions, you can use
`this implementation`__.
.. __: https://www.psycopg.org/articles/2014/07/20/cancelling-postgresql-statements-python/
.. __: http://initd.org/psycopg/articles/2014/07/20/cancelling-postgresql-statements-python/
.. code-block:: pycon
@ -311,24 +287,15 @@ How do I interrupt a long-running query in an interactive shell?
.. _faq-compile:
Problems compiling and installing psycopg2
------------------------------------------
.. _faq-wheels:
.. cssclass:: faq
Psycopg 2.8 fails to install, Psycopg 2.7 was working fine.
With Psycopg 2.7 you were installing binary packages, but they have proven
unreliable so now you have to install them explicitly using the
``psycopg2-binary`` package. See :ref:`binary-packages` for all the
details.
Problems compiling and deploying psycopg2
-----------------------------------------
.. _faq-python-h:
.. cssclass:: faq
I can't compile `!psycopg2`: the compiler says *error: Python.h: No such file or directory*. What am I missing?
You need to install a Python development package: it is usually called
``python-dev`` or ``python3-dev`` according to your Python version.
``python-dev``.
.. _faq-libpq-fe-h:

View File

@ -23,7 +23,7 @@ extended and customized thanks to a flexible :ref:`objects adaptation system
Psycopg 2 is both Unicode and Python 3 friendly.
.. _Psycopg: https://psycopg.org/
.. _Psycopg: http://initd.org/psycopg/
.. _PostgreSQL: https://www.postgresql.org/
.. _Python: https://www.python.org/
.. _libpq: https://www.postgresql.org/docs/current/static/libpq.html
@ -57,7 +57,6 @@ Psycopg 2 is both Unicode and Python 3 friendly.
.. rubric:: Indices and tables
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`

View File

@ -1,6 +1,4 @@
.. _installation:
Installation
Introduction
============
.. sectionauthor:: Daniele Varrazzo <daniele.varrazzo@gmail.com>
@ -8,117 +6,20 @@ Installation
Psycopg is a PostgreSQL_ adapter for the Python_ programming language. It is a
wrapper for the libpq_, the official PostgreSQL client library.
The `psycopg2` package is the current mature implementation of the adapter: it
is a C extension and as such it is only compatible with CPython_. If you want
to use Psycopg on a different Python implementation (PyPy, Jython, IronPython)
there is an experimental `porting of Psycopg for Ctypes`__, but it is not as
mature as the C implementation yet.
.. _PostgreSQL: https://www.postgresql.org/
.. _Python: https://www.python.org/
.. _libpq: https://www.postgresql.org/docs/current/static/libpq.html
.. _CPython: https://en.wikipedia.org/wiki/CPython
.. _Ctypes: https://docs.python.org/library/ctypes.html
.. __: https://github.com/mvantellingen/psycopg2-ctypes
.. index::
single: Install; from PyPI
single: Install; wheel
single: Wheel
.. _binary-packages:
Quick Install
-------------
For most operating systems, the quickest way to install Psycopg is using the
wheel_ package available on PyPI_:
.. code-block:: console
$ pip install psycopg2-binary
This will install a pre-compiled binary version of the module which does not
require the build or runtime prerequisites described below. Make sure to use
an up-to-date version of :program:`pip` (you can upgrade it using something
like ``pip install -U pip``).
You may then import the ``psycopg2`` package, as usual:
.. code-block:: python
import psycopg2
# Connect to your postgres DB
conn = psycopg2.connect("dbname=test user=postgres")
# Open a cursor to perform database operations
cur = conn.cursor()
# Execute a query
cur.execute("SELECT * FROM my_data")
# Retrieve query results
records = cur.fetchall()
.. _PyPI: https://pypi.org/project/psycopg2-binary/
.. _wheel: https://pythonwheels.com/
psycopg vs psycopg-binary
^^^^^^^^^^^^^^^^^^^^^^^^^
The ``psycopg2-binary`` package is meant for beginners to start playing
with Python and PostgreSQL without the need to meet the build
requirements.
If you are the maintainer of a published package depending on `!psycopg2`
you shouldn't use ``psycopg2-binary`` as a module dependency. **For
production use you are advised to use the source distribution.**
The binary packages come with their own versions of a few C libraries,
among which ``libpq`` and ``libssl``, which will be used regardless of other
libraries available on the client: upgrading the system libraries will not
upgrade the libraries used by `!psycopg2`. Please build `!psycopg2` from
source if you want to maintain binary upgradeability.
.. warning::
The `!psycopg2` wheel package comes packaged, among the others, with its
own ``libssl`` binary. This may create conflicts with other extension
modules binding with ``libssl`` as well, for instance with the Python
`ssl` module: in some cases, under concurrency, the interaction between
the two libraries may result in a segfault. In case of doubts you are
advised to use a package built from source.
.. index::
single: Install; disable wheel
single: Wheel; disable
.. _disable-wheel:
Change in binary packages between Psycopg 2.7 and 2.8
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
In version 2.7.x, :command:`pip install psycopg2` would have tried to install
automatically the binary package of Psycopg. Because of concurrency problems
binary packages have displayed, ``psycopg2-binary`` has become a separate
package, and from 2.8 it has become the only way to install the binary
package.
If you are using Psycopg 2.7 and you want to disable the use of wheel binary
packages, relying on the system libraries available on your client, you
can use the :command:`pip` |--no-binary option|__, e.g.:
.. code-block:: console
$ pip install --no-binary :all: psycopg2
.. |--no-binary option| replace:: ``--no-binary`` option
.. __: https://pip.pypa.io/en/stable/reference/pip_install/#install-no-binary
which can be specified in your :file:`requirements.txt` files too, e.g. use:
.. code-block:: none
psycopg2>=2.7,<2.8 --no-binary psycopg2
to use the last bugfix release of the `!psycopg2` 2.7 package, specifying to
always compile it from source. Of course in this case you will have to meet
the :ref:`build prerequisites <build-prerequisites>`.
.. index::
single: Prerequisites
@ -131,17 +32,11 @@ The current `!psycopg2` implementation supports:
..
NOTE: keep consistent with setup.py and the /features/ page.
- Python versions from 3.8 to 3.13
- PostgreSQL server versions from 7.4 to 17
- Python version 2.7
- Python 3 versions from 3.4 to 3.7
- PostgreSQL server versions from 7.4 to 11
- PostgreSQL client library version from 9.1
.. note::
Not all the psycopg2 versions support all the supported Python versions.
Please see the :ref:`release notes <news>` to verify when the support for
a new Python version was added and when the support for an old Python
version was removed.
.. _build-prerequisites:
@ -160,9 +55,8 @@ it from sources you will need:
- A C compiler.
- The Python header files. They are usually installed in a package such as
**python-dev** or **python3-dev**. A message such as *error: Python.h: No
such file or directory* is an indication that the Python headers are
missing.
**python-dev**. A message such as *error: Python.h: No such file or
directory* is an indication that the Python headers are missing.
- The libpq header files. They are usually installed in a package such as
**libpq-dev**. If you get an *error: libpq-fe.h: No such file or directory*
@ -205,7 +99,7 @@ self-contained wheel package, it will need the libpq_ library at runtime
(usually distributed in a ``libpq.so`` or ``libpq.dll`` file). `!psycopg2`
relies on the host OS to find the library if the library is installed in a
standard location there is usually no problem; if the library is in a
non-standard location you will have to tell Psycopg how to find it,
non-standard location you will have to tell somehow Psycopg how to find it,
which is OS-dependent (for instance setting a suitable
:envvar:`LD_LIBRARY_PATH` on Linux).
@ -226,6 +120,96 @@ which is OS-dependent (for instance setting a suitable
to connect to.
.. index::
single: Install; from PyPI
single: Install; wheel
single: Wheel
Binary install from PyPI
------------------------
`!psycopg2` is also `available on PyPI`__ in the form of wheel_ packages for
the most common platform (Linux, OSX, Windows): this should make you able to
install a binary version of the module, not requiring the above build or
runtime prerequisites.
.. note::
The ``-binary`` package is meant for beginners to start playing with
Python and PostgreSQL without the need to meet the build requirements.
If you are the maintainer of a publish package depending on `!psycopg2`
you shouldn't use ``psycopg2-binary`` as a module dependency. For
production use you are advised to use the source distribution.
Make sure to use an up-to-date version of :program:`pip` (you can upgrade it
using something like ``pip install -U pip``), then you can run:
.. code-block:: console
$ pip install psycopg2-binary
.. __: PyPI-binary_
.. _PyPI-binary: https://pypi.org/project/psycopg2-binary/
.. _wheel: https://pythonwheels.com/
.. note::
The binary packages come with their own versions of a few C libraries,
among which ``libpq`` and ``libssl``, which will be used regardless of other
libraries available on the client: upgrading the system libraries will not
upgrade the libraries used by `!psycopg2`. Please build `!psycopg2` from
source if you want to maintain binary upgradeability.
.. warning::
The `!psycopg2` wheel package comes packaged, among the others, with its
own ``libssl`` binary. This may create conflicts with other extension
modules binding with ``libssl`` as well, for instance with the Python
`ssl` module: in some cases, under concurrency, the interaction between
the two libraries may result in a segfault. In case of doubts you are
advised to use a package built from source.
.. index::
single: Install; disable wheel
single: Wheel; disable
.. _disable-wheel:
Disabling wheel packages for Psycopg 2.7
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
In version 2.7.x, `pip install psycopg2` would have tried to install the wheel
binary package of Psycopg. Because of the problems the wheel package have
displayed, `psycopg2-binary` has become a separate package, and from 2.8 it
has become the only way to install the binary package.
If you are using psycopg 2.7 and you want to disable the use of wheel binary
packages, relying on the system libraries available on your client, you
can use the :command:`pip` |--no-binary option|__, e.g.:
.. code-block:: console
$ pip install --no-binary :all: psycopg2
.. |--no-binary option| replace:: ``--no-binary`` option
.. __: https://pip.pypa.io/en/stable/reference/pip_install/#install-no-binary
which can be specified in your :file:`requirements.txt` files too, e.g. use:
.. code-block:: none
psycopg2>=2.7,<2.8 --no-binary psycopg2
to use the last bugfix release of the `!psycopg2` 2.7 package, specifying to
always compile it from source. Of course in this case you will have to meet
the :ref:`build prerequisites <build-prerequisites>`.
.. index::
single: setup.py
single: setup.cfg
@ -237,6 +221,7 @@ If you have less standard requirements such as:
- creating a :ref:`debug build <debug-build>`,
- using :program:`pg_config` not in the :envvar:`PATH`,
- supporting ``mx.DateTime``,
then take a look at the ``setup.cfg`` file.
@ -265,8 +250,7 @@ In case of problems, Psycopg can be configured to emit detailed debug
messages, which can be very useful for diagnostics and to report a bug. In
order to create a debug package:
- `Download`__ and unpack the Psycopg *source package* (the ``.tar.gz``
package).
- `Download`__ and unpack the Psycopg source package.
- Edit the ``setup.cfg`` file adding the ``PSYCOPG_DEBUG`` flag to the
``define`` option.
@ -283,32 +267,9 @@ order to create a debug package:
one you just compiled and not e.g. the system one): you will have a copious
stream of informations printed on stderr.
.. __: https://pypi.org/project/psycopg2/#files
.. __: http://initd.org/psycopg/download/
Non-standard Python Implementation
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The `psycopg2` package is the current mature implementation of the adapter: it
is a C extension and as such it is only compatible with CPython_. If you want
to use Psycopg on a different Python implementation (PyPy, Jython, IronPython)
there is a couple of alternative:
- a `Ctypes port`__, but it is not as mature as the C implementation yet
and it is not as feature-complete;
- a `CFFI port`__ which is currently more used and reported more efficient on
PyPy, but please be careful of its version numbers because they are not
aligned to the official psycopg2 ones and some features may differ.
.. _PostgreSQL: https://www.postgresql.org/
.. _Python: https://www.python.org/
.. _libpq: https://www.postgresql.org/docs/current/static/libpq.html
.. _CPython: https://en.wikipedia.org/wiki/CPython
.. _Ctypes: https://docs.python.org/library/ctypes.html
.. __: https://github.com/mvantellingen/psycopg2-ctypes
.. __: https://github.com/chtd/psycopg2cffi
.. index::
single: tests
@ -337,6 +298,7 @@ setting the environment variables:
The database should already exist before running the tests.
.. _other-problems:
If you still have problems
@ -360,5 +322,5 @@ Try the following. *In order:*
ever and about the quality time you have wasted figuring out the correct
:envvar:`ARCHFLAGS`. Especially useful from the Starbucks near you.
.. _mailing list: https://www.postgresql.org/list/psycopg/
.. _mailing list: https://lists.postgresql.org/mj/mj_wwwusr?func=lists-long-full&extra=psycopg
.. _bug tracker: https://github.com/psycopg/psycopg2/issues

View File

@ -168,7 +168,7 @@ available through the following exceptions:
>>> e.pgcode
'42P01'
>>> print(e.pgerror)
>>> print e.pgerror
ERROR: relation "barf" does not exist
LINE 1: SELECT * FROM barf
^
@ -184,7 +184,7 @@ available through the following exceptions:
>>> try:
... cur.execute("SELECT * FROM barf")
... except psycopg2.Error as e:
... except psycopg2.Error, e:
... pass
>>> e.diag.severity

View File

@ -2,8 +2,6 @@
single: Release notes
single: News
.. _news:
Release notes
=============

View File

@ -33,7 +33,7 @@ name should be escaped using `~psycopg2.extensions.quote_ident()`::
# This works, but it is not optimal
table_name = 'my_table'
cur.execute(
"insert into %s values (%%s, %%s)" % ext.quote_ident(table_name, cur),
"insert into %s values (%%s, %%s)" % ext.quote_ident(table_name),
[10, 20])
This is now safe, but it somewhat ad-hoc. In case, for some reason, it is
@ -55,53 +55,10 @@ from the query parameters::
.format(sql.Identifier('my_table')),
[10, 20])
Module usage
------------
Usually you should express the template of your query as an `SQL` instance
with `{}`\-style placeholders and use `~SQL.format()` to merge the variable
parts into them, all of which must be `Composable` subclasses. You can still
have `%s`\ -style placeholders in your query and pass values to
`~cursor.execute()`: such value placeholders will be untouched by
`!format()`::
query = sql.SQL("select {field} from {table} where {pkey} = %s").format(
field=sql.Identifier('my_name'),
table=sql.Identifier('some_table'),
pkey=sql.Identifier('id'))
The resulting object is meant to be passed directly to cursor methods such as
`~cursor.execute()`, `~cursor.executemany()`, `~cursor.copy_expert()`, but can
also be used to compose a query as a Python string, using the
`~Composable.as_string()` method::
cur.execute(query, (42,))
If part of your query is a variable sequence of arguments, such as a
comma-separated list of field names, you can use the `SQL.join()` method to
pass them to the query::
query = sql.SQL("select {fields} from {table}").format(
fields=sql.SQL(',').join([
sql.Identifier('field1'),
sql.Identifier('field2'),
sql.Identifier('field3'),
]),
table=sql.Identifier('some_table'))
`!sql` objects
--------------
The `!sql` objects are in the following inheritance hierarchy:
| `Composable`: the base class exposing the common interface
| ``|__`` `SQL`: a literal snippet of an SQL query
| ``|__`` `Identifier`: a PostgreSQL identifier or dot-separated sequence of identifiers
| ``|__`` `Literal`: a value hardcoded into a query
| ``|__`` `Placeholder`: a `%s`\ -style placeholder whose value will be added later e.g. by `~cursor.execute()`
| ``|__`` `Composed`: a sequence of `!Composable` instances.
The objects exposed by the `!sql` module can be used to compose a query as a
Python string (using the `~Composable.as_string()` method) or passed directly
to cursor methods such as `~cursor.execute()`, `~cursor.executemany()`,
`~cursor.copy_expert()`.
.. autoclass:: Composable

View File

@ -1,3 +1,4 @@
# -*- coding: utf-8 -*-
"""
extension
~~~~~~~~~

View File

@ -1,3 +1,4 @@
# -*- coding: utf-8 -*-
"""
sql role
~~~~~~~~

View File

@ -1,3 +1,4 @@
# -*- coding: utf-8 -*-
"""
ticket role
~~~~~~~~~~~

View File

@ -2,6 +2,7 @@
"""Create the docs table of the sqlstate errors.
"""
from __future__ import print_function
import re
import sys
@ -25,8 +26,8 @@ def main():
for k in sorted(sqlstate_errors):
exc = sqlstate_errors[k]
lines.append(Line(
f"``{k}``", f"`!{exc.__name__}`",
f"`!{get_base_exception(exc).__name__}`", k))
"``%s``" % k, "`!%s`" % exc.__name__,
"`!%s`" % get_base_exception(exc).__name__, k))
widths = [max(len(l[c]) for l in lines) for c in range(3)]
h = Line(*(['=' * w for w in widths] + [None]))
@ -39,7 +40,7 @@ def main():
for l in lines:
cls = l.sqlstate[:2] if l.sqlstate else None
if cls and cls != sqlclass:
print(f"**Class {cls}**: {sqlclasses[cls]}")
print("**Class %s**: %s" % (cls, sqlclasses[cls]))
print(h1)
sqlclass = cls

View File

@ -5,10 +5,6 @@
.. module:: psycopg2.tz
.. deprecated:: 2.9
The module will be dropped in psycopg 2.10. Use `datetime.timezone`
instead.
This module holds two different tzinfo implementations that can be used as the
`tzinfo` argument to `~datetime.datetime` constructors, directly passed to
Psycopg functions or used to set the `cursor.tzinfo_factory` attribute in

View File

@ -407,7 +407,7 @@ defined on the database connection (the `PostgreSQL encoding`__, available in
`connection.encoding`, is translated into a `Python encoding`__ using the
`~psycopg2.extensions.encodings` mapping)::
>>> print(u, type(u))
>>> print u, type(u)
àèìòù€ <type 'unicode'>
>>> cur.execute("INSERT INTO test (num, data) VALUES (%s,%s);", (74, u))
@ -418,19 +418,19 @@ defined on the database connection (the `PostgreSQL encoding`__, available in
When reading data from the database, in Python 2 the strings returned are
usually 8 bit `!str` objects encoded in the database client encoding::
>>> print(conn.encoding)
>>> print conn.encoding
UTF8
>>> cur.execute("SELECT data FROM test WHERE num = 74")
>>> x = cur.fetchone()[0]
>>> print(x, type(x), repr(x))
>>> print x, type(x), repr(x)
àèìòù€ <type 'str'> '\xc3\xa0\xc3\xa8\xc3\xac\xc3\xb2\xc3\xb9\xe2\x82\xac'
>>> conn.set_client_encoding('LATIN9')
>>> cur.execute("SELECT data FROM test WHERE num = 74")
>>> x = cur.fetchone()[0]
>>> print(type(x), repr(x))
>>> print type(x), repr(x)
<type 'str'> '\xe0\xe8\xec\xf2\xf9\xa4'
In Python 3 instead the strings are automatically *decoded* in the connection
@ -442,7 +442,7 @@ In Python 2 you must register a :ref:`typecaster
>>> cur.execute("SELECT data FROM test WHERE num = 74")
>>> x = cur.fetchone()[0]
>>> print(x, type(x), repr(x))
>>> print x, type(x), repr(x)
àèìòù€ <type 'unicode'> u'\xe0\xe8\xec\xf2\xf9\u20ac'
In the above example, the `~psycopg2.extensions.UNICODE` typecaster is
@ -540,6 +540,7 @@ or `!memoryview` (in Python 3).
single: Date objects; Adaptation
single: Time objects; Adaptation
single: Interval objects; Adaptation
single: mx.DateTime; Adaptation
.. _adapt-date:
@ -549,7 +550,8 @@ Date/Time objects adaptation
Python builtin `~datetime.datetime`, `~datetime.date`,
`~datetime.time`, `~datetime.timedelta` are converted into PostgreSQL's
:sql:`timestamp[tz]`, :sql:`date`, :sql:`time[tz]`, :sql:`interval` data types.
Time zones are supported too.
Time zones are supported too. The Egenix `mx.DateTime`_ objects are adapted
the same way::
>>> dt = datetime.datetime.now()
>>> dt
@ -574,39 +576,29 @@ Time zones handling
'''''''''''''''''''
The PostgreSQL type :sql:`timestamp with time zone` (a.k.a.
:sql:`timestamptz`) is converted into Python `~datetime.datetime` objects.
:sql:`timestamptz`) is converted into Python `~datetime.datetime` objects with
a `~datetime.datetime.tzinfo` attribute set to a
`~psycopg2.tz.FixedOffsetTimezone` instance.
>>> cur.execute("SET TIME ZONE 'Europe/Rome'") # UTC + 1 hour
>>> cur.execute("SELECT '2010-01-01 10:30:45'::timestamptz")
>>> cur.fetchone()[0]
datetime.datetime(2010, 1, 1, 10, 30, 45,
tzinfo=datetime.timezone(datetime.timedelta(seconds=3600)))
>>> cur.execute("SET TIME ZONE 'Europe/Rome';") # UTC + 1 hour
>>> cur.execute("SELECT '2010-01-01 10:30:45'::timestamptz;")
>>> cur.fetchone()[0].tzinfo
psycopg2.tz.FixedOffsetTimezone(offset=60, name=None)
.. note::
Note that only time zones with an integer number of minutes are supported:
this is a limitation of the Python `datetime` module. A few historical time
zones had seconds in the UTC offset: these time zones will have the offset
rounded to the nearest minute, with an error of up to 30 seconds.
Before Python 3.7, the `datetime` module only supported timezones with an
integer number of minutes. A few historical time zones had seconds in the
UTC offset: these time zones will have the offset rounded to the nearest
minute, with an error of up to 30 seconds, on Python versions before 3.7.
>>> cur.execute("SET TIME ZONE 'Asia/Calcutta'") # offset was +5:21:10
>>> cur.execute("SELECT '1900-01-01 10:30:45'::timestamptz")
>>> cur.fetchone()[0].tzinfo
# On Python 3.6: 5h, 21m
datetime.timezone(datetime.timedelta(0, 19260))
# On Python 3.7 and following: 5h, 21m, 10s
datetime.timezone(datetime.timedelta(seconds=19270))
>>> cur.execute("SET TIME ZONE 'Asia/Calcutta';") # offset was +5:53:20
>>> cur.execute("SELECT '1930-01-01 10:30:45'::timestamptz;")
>>> cur.fetchone()[0].tzinfo
psycopg2.tz.FixedOffsetTimezone(offset=353, name=None)
.. versionchanged:: 2.2.2
timezones with seconds are supported (with rounding). Previously such
timezones raised an error.
.. versionchanged:: 2.9
timezones with seconds are supported without rounding.
.. versionchanged:: 2.9
use `datetime.timezone` as default tzinfo object instead of
`~psycopg2.tz.FixedOffsetTimezone`.
.. index::
double: Date objects; Infinite
@ -758,25 +750,18 @@ until a call to the `~connection.rollback()` method.
The connection is responsible for terminating its transaction, calling either
the `~connection.commit()` or `~connection.rollback()` method. Committed
changes are immediately made persistent in the database. If the connection
is closed (using the `~connection.close()` method) or destroyed (using `!del`
or by letting it fall out of scope) while a transaction is in progress, the
server will discard the transaction. However doing so is not advisable:
middleware such as PgBouncer_ may see the connection closed uncleanly and
dispose of it.
.. _PgBouncer: http://www.pgbouncer.org/
changes are immediately made persistent into the database. Closing the
connection using the `~connection.close()` method or destroying the
connection object (using `!del` or letting it fall out of scope)
will result in an implicit rollback.
It is possible to set the connection in *autocommit* mode: this way all the
commands executed will be immediately committed and no rollback is possible. A
few commands (e.g. :sql:`CREATE DATABASE`, :sql:`VACUUM`, :sql:`CALL` on
`stored procedures`__ using transaction control...) require to be run
few commands (e.g. :sql:`CREATE DATABASE`, :sql:`VACUUM`...) require to be run
outside any transaction: in order to be able to run these commands from
Psycopg, the connection must be in autocommit mode: you can use the
`~connection.autocommit` property.
.. __: https://www.postgresql.org/docs/current/xproc.html
.. warning::
By default even a simple :sql:`SELECT` will start a transaction: in
@ -795,8 +780,6 @@ the details.
.. index::
single: with statement
.. _with:
``with`` statement
^^^^^^^^^^^^^^^^^^
@ -814,7 +797,9 @@ is rolled back.
When a cursor exits the ``with`` block it is closed, releasing any resource
eventually associated with it. The state of the transaction is not affected.
A connection can be used in more than one ``with`` statement
Note that, unlike file objects or other resources, exiting the connection's
``with`` block *doesn't close the connection* but only the transaction
associated with it: a connection can be used in more than a ``with`` statement
and each ``with`` block is effectively wrapped in a separate transaction::
conn = psycopg2.connect(DSN)
@ -829,21 +814,6 @@ and each ``with`` block is effectively wrapped in a separate transaction::
conn.close()
.. warning::
Unlike file objects or other resources, exiting the connection's
``with`` block **doesn't close the connection**, but only the transaction
associated to it. If you want to make sure the connection is closed after
a certain point, you should still use a try-catch block::
conn = psycopg2.connect(DSN)
try:
# connection usage
finally:
conn.close()
.. versionchanged:: 2.9
``with connection`` starts a transaction also on autocommit connections.
.. index::
@ -860,7 +830,7 @@ Server side cursors
When a database query is executed, the Psycopg `cursor` usually fetches
all the records returned by the backend, transferring them to the client
process. If the query returns a huge amount of data, a proportionally large
process. If the query returned an huge amount of data, a proportionally large
amount of memory will be allocated by the client.
If the dataset is too large to be practically handled on the client side, it is
@ -1043,7 +1013,7 @@ using the |lo_import|_ and |lo_export|_ libpq functions.
.. _lo_export: https://www.postgresql.org/docs/current/static/lo-interfaces.html#LO-EXPORT
.. versionchanged:: 2.6
added support for large objects greater than 2GB. Note that the support is
added support for large objects greated than 2GB. Note that the support is
enabled only if all the following conditions are verified:
- the Python build is 64 bits;
@ -1052,8 +1022,8 @@ using the |lo_import|_ and |lo_export|_ libpq functions.
(`~connection.server_version` must be >= ``90300``).
If Psycopg was built with 64 bits large objects support (i.e. the first
two conditions above are verified), the `psycopg2.__version__` constant
will contain the ``lo64`` flag. If any of the condition is not met
two contidions above are verified), the `psycopg2.__version__` constant
will contain the ``lo64`` flag. If any of the contition is not met
several `!lobject` methods will fail if the arguments exceed 2GB.

View File

@ -6,7 +6,7 @@ provide new-style classes for connection and cursor objects and other sweet
candies. Like the original, psycopg 2 was written with the aim of being very
small and fast, and stable as a rock.
Homepage: https://psycopg.org/
Homepage: http://initd.org/projects/psycopg2
.. _PostgreSQL: https://www.postgresql.org/
.. _Python: https://www.python.org/
@ -19,7 +19,6 @@ Homepage: https://psycopg.org/
# psycopg/__init__.py - initialization of the psycopg module
#
# Copyright (C) 2003-2019 Federico Di Gregorio <fog@debian.org>
# Copyright (C) 2020-2021 The Psycopg Team
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
@ -61,6 +60,8 @@ from psycopg2._psycopg import ( # noqa
__version__, __libpq_version__,
)
from psycopg2 import tz # noqa
# Register default adapters.
@ -118,6 +119,9 @@ def connect(dsn=None, connection_factory=None, cursor_factory=None, **kwargs):
if 'async_' in kwargs:
kwasync['async_'] = kwargs.pop('async_')
if dsn is None and not kwargs:
raise TypeError('missing dsn and no parameters')
dsn = _ext.make_dsn(dsn, **kwargs)
conn = _connect(dsn, connection_factory=connection_factory, **kwasync)
if cursor_factory is not None:

View File

@ -4,7 +4,6 @@
# psycopg/_ipaddress.py - Ipaddres-based network types adaptation
#
# Copyright (C) 2016-2019 Daniele Varrazzo <daniele.varrazzo@gmail.com>
# Copyright (C) 2020-2021 The Psycopg Team
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
@ -26,6 +25,7 @@
from psycopg2.extensions import (
new_type, new_array_type, register_type, register_adapter, QuotedString)
from psycopg2.compat import text_type
# The module is imported on register_ipaddress
ipaddress = None
@ -77,13 +77,13 @@ def cast_interface(s, cur=None):
if s is None:
return None
# Py2 version force the use of unicode. meh.
return ipaddress.ip_interface(str(s))
return ipaddress.ip_interface(text_type(s))
def cast_network(s, cur=None):
if s is None:
return None
return ipaddress.ip_network(str(s))
return ipaddress.ip_network(text_type(s))
def adapt_ipaddress(obj):

View File

@ -8,7 +8,6 @@ extensions importing register_json from extras.
# psycopg/_json.py - Implementation of the JSON adaptation objects
#
# Copyright (C) 2012-2019 Daniele Varrazzo <daniele.varrazzo@gmail.com>
# Copyright (C) 2020-2021 The Psycopg Team
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
@ -32,6 +31,7 @@ import json
from psycopg2._psycopg import ISQLQuote, QuotedString
from psycopg2._psycopg import new_type, new_array_type, register_type
from psycopg2.compat import PY2
# oids from PostgreSQL 9.2
@ -43,7 +43,7 @@ JSONB_OID = 3802
JSONBARRAY_OID = 3807
class Json:
class Json(object):
"""
An `~psycopg2.extensions.ISQLQuote` wrapper to adapt a Python object to
:sql:`json` data type.
@ -81,9 +81,13 @@ class Json:
qs.prepare(self._conn)
return qs.getquoted()
def __str__(self):
# getquoted is binary
return self.getquoted().decode('ascii', 'replace')
if PY2:
def __str__(self):
return self.getquoted()
else:
def __str__(self):
# getquoted is binary in Py3
return self.getquoted().decode('ascii', 'replace')
def register_json(conn_or_curs=None, globally=False, loads=None,
@ -163,7 +167,7 @@ def _create_json_typecasters(oid, array_oid, loads=None, name='JSON'):
JSON = new_type((oid, ), name, typecast_json)
if array_oid is not None:
JSONARRAY = new_array_type((array_oid, ), f"{name}ARRAY", JSON)
JSONARRAY = new_array_type((array_oid, ), "%sARRAY" % name, JSON)
else:
JSONARRAY = None
@ -194,6 +198,6 @@ def _get_json_oids(conn_or_curs, name='json'):
conn.rollback()
if not r:
raise conn.ProgrammingError(f"{name} data type not found")
raise conn.ProgrammingError("%s data type not found" % name)
return r

104
lib/_lru_cache.py Normal file
View File

@ -0,0 +1,104 @@
"""
LRU cache implementation for Python 2.7
Ported from http://code.activestate.com/recipes/578078/ and simplified for our
use (only support maxsize > 0 and positional arguments).
"""
from collections import namedtuple
from functools import update_wrapper
from threading import RLock
_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])
def lru_cache(maxsize=100):
"""Least-recently-used cache decorator.
Arguments to the cached function must be hashable.
See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
"""
def decorating_function(user_function):
cache = dict()
stats = [0, 0] # make statistics updateable non-locally
HITS, MISSES = 0, 1 # names for the stats fields
cache_get = cache.get # bound method to lookup key or return None
_len = len # localize the global len() function
lock = RLock() # linkedlist updates aren't threadsafe
root = [] # root of the circular doubly linked list
root[:] = [root, root, None, None] # initialize by pointing to self
nonlocal_root = [root] # make updateable non-locally
PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields
assert maxsize and maxsize > 0, "maxsize %s not supported" % maxsize
def wrapper(*args):
# size limited caching that tracks accesses by recency
key = args
with lock:
link = cache_get(key)
if link is not None:
# record recent use of the key by moving it to the
# front of the list
root, = nonlocal_root
link_prev, link_next, key, result = link
link_prev[NEXT] = link_next
link_next[PREV] = link_prev
last = root[PREV]
last[NEXT] = root[PREV] = link
link[PREV] = last
link[NEXT] = root
stats[HITS] += 1
return result
result = user_function(*args)
with lock:
root, = nonlocal_root
if key in cache:
# getting here means that this same key was added to the
# cache while the lock was released. since the link
# update is already done, we need only return the
# computed result and update the count of misses.
pass
elif _len(cache) >= maxsize:
# use the old root to store the new key and result
oldroot = root
oldroot[KEY] = key
oldroot[RESULT] = result
# empty the oldest link and make it the new root
root = nonlocal_root[0] = oldroot[NEXT]
oldkey = root[KEY]
# oldvalue = root[RESULT]
root[KEY] = root[RESULT] = None
# now update the cache dictionary for the new links
del cache[oldkey]
cache[key] = oldroot
else:
# put result in a new link at the front of the list
last = root[PREV]
link = [last, root, key, result]
last[NEXT] = root[PREV] = cache[key] = link
stats[MISSES] += 1
return result
def cache_info():
"""Report cache statistics"""
with lock:
return _CacheInfo(stats[HITS], stats[MISSES], maxsize, len(cache))
def cache_clear():
"""Clear the cache and cache statistics"""
with lock:
cache.clear()
root = nonlocal_root[0]
root[:] = [root, root, None, None]
stats[:] = [0, 0]
wrapper.__wrapped__ = user_function
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return update_wrapper(wrapper, user_function)
return decorating_function

View File

@ -5,7 +5,6 @@
# psycopg/_range.py - Implementation of the Range type and adaptation
#
# Copyright (C) 2012-2019 Daniele Varrazzo <daniele.varrazzo@gmail.com>
# Copyright (C) 2020-2021 The Psycopg Team
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
@ -30,9 +29,10 @@ import re
from psycopg2._psycopg import ProgrammingError, InterfaceError
from psycopg2.extensions import ISQLQuote, adapt, register_adapter
from psycopg2.extensions import new_type, new_array_type, register_type
from psycopg2.compat import string_types
class Range:
class Range(object):
"""Python representation for a PostgreSQL |range|_ type.
:param lower: lower bound for the range. `!None` means unbound
@ -47,7 +47,7 @@ class Range:
def __init__(self, lower=None, upper=None, bounds='[)', empty=False):
if not empty:
if bounds not in ('[)', '(]', '()', '[]'):
raise ValueError(f"bound flags not valid: {bounds!r}")
raise ValueError("bound flags not valid: %r" % bounds)
self._lower = lower
self._upper = upper
@ -57,9 +57,9 @@ class Range:
def __repr__(self):
if self._bounds is None:
return f"{self.__class__.__name__}(empty=True)"
return "%s(empty=True)" % self.__class__.__name__
else:
return "{}({!r}, {!r}, {!r})".format(self.__class__.__name__,
return "%s(%r, %r, %r)" % (self.__class__.__name__,
self._lower, self._upper, self._bounds)
def __str__(self):
@ -143,6 +143,10 @@ class Range:
def __bool__(self):
return self._bounds is not None
def __nonzero__(self):
# Python 2 compatibility
return type(self).__bool__(self)
def __eq__(self, other):
if not isinstance(other, Range):
return False
@ -234,7 +238,7 @@ def register_range(pgrange, pyrange, conn_or_curs, globally=False):
return caster
class RangeAdapter:
class RangeAdapter(object):
"""`ISQLQuote` adapter for `Range` subclasses.
This is an abstract class: concrete classes must set a `name` class
@ -282,7 +286,7 @@ class RangeAdapter:
+ b", '" + r._bounds.encode('utf8') + b"')"
class RangeCaster:
class RangeCaster(object):
"""Helper class to convert between `Range` and PostgreSQL range types.
Objects of this class are usually created by `register_range()`. Manual
@ -310,7 +314,7 @@ class RangeCaster:
# an implementation detail and is not documented. It is currently used
# for the numeric ranges.
self.adapter = None
if isinstance(pgrange, str):
if isinstance(pgrange, string_types):
self.adapter = type(pgrange, (RangeAdapter,), {})
self.adapter.name = pgrange
else:
@ -327,7 +331,7 @@ class RangeCaster:
self.range = None
try:
if isinstance(pyrange, str):
if isinstance(pyrange, string_types):
self.range = type(pyrange, (Range,), {})
if issubclass(pyrange, Range) and pyrange is not Range:
self.range = pyrange
@ -363,54 +367,33 @@ class RangeCaster:
schema = 'public'
# get the type oid and attributes
curs.execute("""\
select rngtypid, rngsubtype, typarray
try:
curs.execute("""\
select rngtypid, rngsubtype,
(select typarray from pg_type where oid = rngtypid)
from pg_range r
join pg_type t on t.oid = rngtypid
join pg_namespace ns on ns.oid = typnamespace
where typname = %s and ns.nspname = %s;
""", (tname, schema))
rec = curs.fetchone()
if not rec:
# The above algorithm doesn't work for customized seach_path
# (#1487) The implementation below works better, but, to guarantee
# backwards compatibility, use it only if the original one failed.
try:
savepoint = False
# Because we executed statements earlier, we are either INTRANS
# or we are IDLE only if the transaction is autocommit, in
# which case we don't need the savepoint anyway.
if conn.status == STATUS_IN_TRANSACTION:
curs.execute("SAVEPOINT register_type")
savepoint = True
except ProgrammingError:
if not conn.autocommit:
conn.rollback()
raise
else:
rec = curs.fetchone()
curs.execute("""\
SELECT rngtypid, rngsubtype, typarray, typname, nspname
from pg_range r
join pg_type t on t.oid = rngtypid
join pg_namespace ns on ns.oid = typnamespace
WHERE t.oid = %s::regtype
""", (name, ))
except ProgrammingError:
pass
else:
rec = curs.fetchone()
if rec:
tname, schema = rec[3:]
finally:
if savepoint:
curs.execute("ROLLBACK TO SAVEPOINT register_type")
# revert the status of the connection as before the command
if conn_status != STATUS_IN_TRANSACTION and not conn.autocommit:
conn.rollback()
# revert the status of the connection as before the command
if (conn_status != STATUS_IN_TRANSACTION
and not conn.autocommit):
conn.rollback()
if not rec:
raise ProgrammingError(
f"PostgreSQL range '{name}' not found")
"PostgreSQL type '%s' not found" % name)
type, subtype, array = rec[:3]
type, subtype, array = rec
return RangeCaster(name, pyrange,
oid=type, subtype_oid=subtype, array_oid=array)
@ -440,7 +423,7 @@ WHERE t.oid = %s::regtype
m = self._re_range.match(s)
if m is None:
raise InterfaceError(f"failed to parse range: '{s}'")
raise InterfaceError("failed to parse range: '%s'" % s)
lower = m.group(3)
if lower is None:
@ -520,7 +503,8 @@ class NumberRangeAdapter(RangeAdapter):
else:
upper = ''
return (f"'{r._bounds[0]}{lower},{upper}{r._bounds[1]}'").encode('ascii')
return ("'%s%s,%s%s'" % (
r._bounds[0], lower, upper, r._bounds[1])).encode('ascii')
# TODO: probably won't work with infs, nans and other tricky cases.

19
lib/compat.py Normal file
View File

@ -0,0 +1,19 @@
import sys
__all__ = ['string_types', 'text_type', 'lru_cache']
if sys.version_info[0] == 2:
# Python 2
PY2 = True
PY3 = False
string_types = basestring,
text_type = unicode
from ._lru_cache import lru_cache
else:
# Python 3
PY2 = False
PY3 = True
string_types = str,
text_type = str
from functools import lru_cache

View File

@ -1,11 +1,10 @@
"""Error codes for PostgreSQL
"""Error codes for PostgresSQL
This module contains symbolic names for all PostgreSQL error codes.
"""
# psycopg2/errorcodes.py - PostgreSQL error codes
#
# Copyright (C) 2006-2019 Johan Dahlin <jdahlin@async.com.br>
# Copyright (C) 2020-2021 The Psycopg Team
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
@ -43,8 +42,7 @@ def lookup(code, _cache={}):
tmp = {}
for k, v in globals().items():
if isinstance(v, str) and len(v) in (2, 5):
# Strip trailing underscore used to disambiguate duplicate values
tmp[v] = k.rstrip("_")
tmp[v] = k
assert tmp
@ -107,7 +105,7 @@ SUCCESSFUL_COMPLETION = '00000'
# Class 01 - Warning
WARNING = '01000'
NULL_VALUE_ELIMINATED_IN_SET_FUNCTION = '01003'
STRING_DATA_RIGHT_TRUNCATION_ = '01004'
STRING_DATA_RIGHT_TRUNCATION = '01004'
PRIVILEGE_NOT_REVOKED = '01006'
PRIVILEGE_NOT_GRANTED = '01007'
IMPLICIT_ZERO_BIT_PADDING = '01008'
@ -165,7 +163,7 @@ DATA_EXCEPTION = '22000'
STRING_DATA_RIGHT_TRUNCATION = '22001'
NULL_VALUE_NO_INDICATOR_PARAMETER = '22002'
NUMERIC_VALUE_OUT_OF_RANGE = '22003'
NULL_VALUE_NOT_ALLOWED_ = '22004'
NULL_VALUE_NOT_ALLOWED = '22004'
ERROR_IN_ASSIGNMENT = '22005'
INVALID_DATETIME_FORMAT = '22007'
DATETIME_FIELD_OVERFLOW = '22008'
@ -207,23 +205,6 @@ TRIM_ERROR = '22027'
ARRAY_SUBSCRIPT_ERROR = '2202E'
INVALID_TABLESAMPLE_REPEAT = '2202G'
INVALID_TABLESAMPLE_ARGUMENT = '2202H'
DUPLICATE_JSON_OBJECT_KEY_VALUE = '22030'
INVALID_ARGUMENT_FOR_SQL_JSON_DATETIME_FUNCTION = '22031'
INVALID_JSON_TEXT = '22032'
INVALID_SQL_JSON_SUBSCRIPT = '22033'
MORE_THAN_ONE_SQL_JSON_ITEM = '22034'
NO_SQL_JSON_ITEM = '22035'
NON_NUMERIC_SQL_JSON_ITEM = '22036'
NON_UNIQUE_KEYS_IN_A_JSON_OBJECT = '22037'
SINGLETON_SQL_JSON_ITEM_REQUIRED = '22038'
SQL_JSON_ARRAY_NOT_FOUND = '22039'
SQL_JSON_MEMBER_NOT_FOUND = '2203A'
SQL_JSON_NUMBER_NOT_FOUND = '2203B'
SQL_JSON_OBJECT_NOT_FOUND = '2203C'
TOO_MANY_JSON_ARRAY_ELEMENTS = '2203D'
TOO_MANY_JSON_OBJECT_MEMBERS = '2203E'
SQL_JSON_SCALAR_REQUIRED = '2203F'
SQL_JSON_ITEM_CANNOT_BE_CAST_TO_TARGET_TYPE = '2203G'
FLOATING_POINT_EXCEPTION = '22P01'
INVALID_TEXT_REPRESENTATION = '22P02'
INVALID_BINARY_REPRESENTATION = '22P03'
@ -256,7 +237,6 @@ HELD_CURSOR_REQUIRES_SAME_ISOLATION_LEVEL = '25008'
NO_ACTIVE_SQL_TRANSACTION = '25P01'
IN_FAILED_SQL_TRANSACTION = '25P02'
IDLE_IN_TRANSACTION_SESSION_TIMEOUT = '25P03'
TRANSACTION_TIMEOUT = '25P04'
# Class 26 - Invalid SQL Statement Name
INVALID_SQL_STATEMENT_NAME = '26000'
@ -277,9 +257,9 @@ INVALID_TRANSACTION_TERMINATION = '2D000'
# Class 2F - SQL Routine Exception
SQL_ROUTINE_EXCEPTION = '2F000'
MODIFYING_SQL_DATA_NOT_PERMITTED_ = '2F002'
PROHIBITED_SQL_STATEMENT_ATTEMPTED_ = '2F003'
READING_SQL_DATA_NOT_PERMITTED_ = '2F004'
MODIFYING_SQL_DATA_NOT_PERMITTED = '2F002'
PROHIBITED_SQL_STATEMENT_ATTEMPTED = '2F003'
READING_SQL_DATA_NOT_PERMITTED = '2F004'
FUNCTION_EXECUTED_NO_RETURN_STATEMENT = '2F005'
# Class 34 - Invalid Cursor Name
@ -393,7 +373,6 @@ ADMIN_SHUTDOWN = '57P01'
CRASH_SHUTDOWN = '57P02'
CANNOT_CONNECT_NOW = '57P03'
DATABASE_DROPPED = '57P04'
IDLE_SESSION_TIMEOUT = '57P05'
# Class 58 - System Error (errors external to PostgreSQL itself)
SYSTEM_ERROR = '58000'

View File

@ -4,7 +4,6 @@
# psycopg/errors.py - SQLSTATE and DB-API exceptions
#
# Copyright (C) 2018-2019 Daniele Varrazzo <daniele.varrazzo@gmail.com>
# Copyright (C) 2020-2021 The Psycopg Team
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published

View File

@ -13,7 +13,6 @@ This module holds all the extensions to the DBAPI-2.0 provided by psycopg.
# psycopg/extensions.py - DBAPI-2.0 extensions specific to psycopg
#
# Copyright (C) 2003-2019 Federico Di Gregorio <fog@debian.org>
# Copyright (C) 2020-2021 The Psycopg Team
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
@ -42,6 +41,14 @@ from psycopg2._psycopg import ( # noqa
ROWIDARRAY, STRINGARRAY, TIME, TIMEARRAY, UNICODE, UNICODEARRAY,
AsIs, Binary, Boolean, Float, Int, QuotedString, )
try:
from psycopg2._psycopg import ( # noqa
MXDATE, MXDATETIME, MXDATETIMETZ, MXINTERVAL, MXTIME, MXDATEARRAY,
MXDATETIMEARRAY, MXDATETIMETZARRAY, MXINTERVALARRAY, MXTIMEARRAY,
DateFromMx, TimeFromMx, TimestampFromMx, IntervalFromMx, )
except ImportError:
pass
from psycopg2._psycopg import ( # noqa
PYDATE, PYDATETIME, PYDATETIMETZ, PYINTERVAL, PYTIME, PYDATEARRAY,
PYDATETIMEARRAY, PYDATETIMETZARRAY, PYINTERVALARRAY, PYTIMEARRAY,
@ -98,7 +105,7 @@ def register_adapter(typ, callable):
# The SQL_IN class is the official adapter for tuples starting from 2.0.6.
class SQL_IN:
class SQL_IN(object):
"""Adapt any iterable to an SQL quotable object."""
def __init__(self, seq):
self._seq = seq
@ -122,7 +129,7 @@ class SQL_IN:
return str(self.getquoted())
class NoneAdapter:
class NoneAdapter(object):
"""Adapt None to NULL.
This adapter is not used normally as a fast path in mogrify uses NULL,
@ -160,7 +167,7 @@ def make_dsn(dsn=None, **kwargs):
tmp.update(kwargs)
kwargs = tmp
dsn = " ".join(["{}={}".format(k, _param_escape(str(v)))
dsn = " ".join(["%s=%s" % (k, _param_escape(str(v)))
for (k, v) in kwargs.items()])
# verify that the returned dsn is valid

View File

@ -6,7 +6,6 @@ and classes until a better place in the distribution is found.
# psycopg/extras.py - miscellaneous extra goodies for psycopg
#
# Copyright (C) 2003-2019 Federico Di Gregorio <fog@debian.org>
# Copyright (C) 2020-2021 The Psycopg Team
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
@ -38,7 +37,7 @@ from psycopg2 import extensions as _ext
from .extensions import cursor as _cursor
from .extensions import connection as _connection
from .extensions import adapt as _A, quote_ident
from functools import lru_cache
from .compat import PY2, PY3, lru_cache
from psycopg2._psycopg import ( # noqa
REPLICATION_PHYSICAL, REPLICATION_LOGICAL,
@ -72,47 +71,47 @@ class DictCursorBase(_cursor):
else:
raise NotImplementedError(
"DictCursorBase can't be instantiated without a row factory.")
super().__init__(*args, **kwargs)
super(DictCursorBase, self).__init__(*args, **kwargs)
self._query_executed = False
self._prefetch = False
self.row_factory = row_factory
def fetchone(self):
if self._prefetch:
res = super().fetchone()
res = super(DictCursorBase, self).fetchone()
if self._query_executed:
self._build_index()
if not self._prefetch:
res = super().fetchone()
res = super(DictCursorBase, self).fetchone()
return res
def fetchmany(self, size=None):
if self._prefetch:
res = super().fetchmany(size)
res = super(DictCursorBase, self).fetchmany(size)
if self._query_executed:
self._build_index()
if not self._prefetch:
res = super().fetchmany(size)
res = super(DictCursorBase, self).fetchmany(size)
return res
def fetchall(self):
if self._prefetch:
res = super().fetchall()
res = super(DictCursorBase, self).fetchall()
if self._query_executed:
self._build_index()
if not self._prefetch:
res = super().fetchall()
res = super(DictCursorBase, self).fetchall()
return res
def __iter__(self):
try:
if self._prefetch:
res = super().__iter__()
res = super(DictCursorBase, self).__iter__()
first = next(res)
if self._query_executed:
self._build_index()
if not self._prefetch:
res = super().__iter__()
res = super(DictCursorBase, self).__iter__()
first = next(res)
yield first
@ -125,30 +124,27 @@ class DictCursorBase(_cursor):
class DictConnection(_connection):
"""A connection that uses `DictCursor` automatically."""
def cursor(self, *args, **kwargs):
kwargs.setdefault('cursor_factory', self.cursor_factory or DictCursor)
return super().cursor(*args, **kwargs)
kwargs.setdefault('cursor_factory', DictCursor)
return super(DictConnection, self).cursor(*args, **kwargs)
class DictCursor(DictCursorBase):
"""A cursor that keeps a list of column name -> index mappings__.
.. __: https://docs.python.org/glossary.html#term-mapping
"""
"""A cursor that keeps a list of column name -> index mappings."""
def __init__(self, *args, **kwargs):
kwargs['row_factory'] = DictRow
super().__init__(*args, **kwargs)
super(DictCursor, self).__init__(*args, **kwargs)
self._prefetch = True
def execute(self, query, vars=None):
self.index = OrderedDict()
self._query_executed = True
return super().execute(query, vars)
return super(DictCursor, self).execute(query, vars)
def callproc(self, procname, vars=None):
self.index = OrderedDict()
self._query_executed = True
return super().callproc(procname, vars)
return super(DictCursor, self).callproc(procname, vars)
def _build_index(self):
if self._query_executed and self.description:
@ -169,22 +165,22 @@ class DictRow(list):
def __getitem__(self, x):
if not isinstance(x, (int, slice)):
x = self._index[x]
return super().__getitem__(x)
return super(DictRow, self).__getitem__(x)
def __setitem__(self, x, v):
if not isinstance(x, (int, slice)):
x = self._index[x]
super().__setitem__(x, v)
super(DictRow, self).__setitem__(x, v)
def items(self):
g = super().__getitem__
g = super(DictRow, self).__getitem__
return ((n, g(self._index[n])) for n in self._index)
def keys(self):
return iter(self._index)
def values(self):
g = super().__getitem__
g = super(DictRow, self).__getitem__
return (g(self._index[n]) for n in self._index)
def get(self, x, default=None):
@ -199,10 +195,6 @@ class DictRow(list):
def __contains__(self, x):
return x in self._index
def __reduce__(self):
# this is apparently useless, but it fixes #1073
return super().__reduce__()
def __getstate__(self):
return self[:], self._index.copy()
@ -210,12 +202,27 @@ class DictRow(list):
self[:] = data[0]
self._index = data[1]
if PY2:
iterkeys = keys
itervalues = values
iteritems = items
has_key = __contains__
def keys(self):
return list(self.iterkeys())
def values(self):
return tuple(self.itervalues())
def items(self):
return list(self.iteritems())
class RealDictConnection(_connection):
"""A connection that uses `RealDictCursor` automatically."""
def cursor(self, *args, **kwargs):
kwargs.setdefault('cursor_factory', self.cursor_factory or RealDictCursor)
return super().cursor(*args, **kwargs)
kwargs.setdefault('cursor_factory', RealDictCursor)
return super(RealDictConnection, self).cursor(*args, **kwargs)
class RealDictCursor(DictCursorBase):
@ -228,17 +235,17 @@ class RealDictCursor(DictCursorBase):
"""
def __init__(self, *args, **kwargs):
kwargs['row_factory'] = RealDictRow
super().__init__(*args, **kwargs)
super(RealDictCursor, self).__init__(*args, **kwargs)
def execute(self, query, vars=None):
self.column_mapping = []
self._query_executed = True
return super().execute(query, vars)
return super(RealDictCursor, self).execute(query, vars)
def callproc(self, procname, vars=None):
self.column_mapping = []
self._query_executed = True
return super().callproc(procname, vars)
return super(RealDictCursor, self).callproc(procname, vars)
def _build_index(self):
if self._query_executed and self.description:
@ -246,46 +253,63 @@ class RealDictCursor(DictCursorBase):
self._query_executed = False
class RealDictRow(OrderedDict):
class RealDictRow(dict):
"""A `!dict` subclass representing a data record."""
def __init__(self, *args, **kwargs):
if args and isinstance(args[0], _cursor):
cursor = args[0]
args = args[1:]
else:
cursor = None
__slots__ = ('_column_mapping',)
super().__init__(*args, **kwargs)
def __init__(self, cursor):
super(RealDictRow, self).__init__()
# Required for named cursors
if cursor.description and not cursor.column_mapping:
cursor._build_index()
if cursor is not None:
# Required for named cursors
if cursor.description and not cursor.column_mapping:
cursor._build_index()
self._column_mapping = cursor.column_mapping
# Store the cols mapping in the dict itself until the row is fully
# populated, so we don't need to add attributes to the class
# (hence keeping its maintenance, special pickle support, etc.)
self[RealDictRow] = cursor.column_mapping
def __setitem__(self, name, value):
if type(name) == int:
name = self._column_mapping[name]
super(RealDictRow, self).__setitem__(name, value)
def __setitem__(self, key, value):
if RealDictRow in self:
# We are in the row building phase
mapping = self[RealDictRow]
super().__setitem__(mapping[key], value)
if key == len(mapping) - 1:
# Row building finished
del self[RealDictRow]
return
def __getstate__(self):
return self.copy(), self._column_mapping[:]
super().__setitem__(key, value)
def __setstate__(self, data):
self.update(data[0])
self._column_mapping = data[1]
def __iter__(self):
return iter(self._column_mapping)
def keys(self):
return iter(self._column_mapping)
def values(self):
return (self[k] for k in self._column_mapping)
def items(self):
return ((k, self[k]) for k in self._column_mapping)
if PY2:
iterkeys = keys
itervalues = values
iteritems = items
def keys(self):
return list(self.iterkeys())
def values(self):
return list(self.itervalues())
def items(self):
return list(self.iteritems())
class NamedTupleConnection(_connection):
"""A connection that uses `NamedTupleCursor` automatically."""
def cursor(self, *args, **kwargs):
kwargs.setdefault('cursor_factory', self.cursor_factory or NamedTupleCursor)
return super().cursor(*args, **kwargs)
kwargs.setdefault('cursor_factory', NamedTupleCursor)
return super(NamedTupleConnection, self).cursor(*args, **kwargs)
class NamedTupleCursor(_cursor):
@ -309,18 +333,18 @@ class NamedTupleCursor(_cursor):
def execute(self, query, vars=None):
self.Record = None
return super().execute(query, vars)
return super(NamedTupleCursor, self).execute(query, vars)
def executemany(self, query, vars):
self.Record = None
return super().executemany(query, vars)
return super(NamedTupleCursor, self).executemany(query, vars)
def callproc(self, procname, vars=None):
self.Record = None
return super().callproc(procname, vars)
return super(NamedTupleCursor, self).callproc(procname, vars)
def fetchone(self):
t = super().fetchone()
t = super(NamedTupleCursor, self).fetchone()
if t is not None:
nt = self.Record
if nt is None:
@ -328,14 +352,14 @@ class NamedTupleCursor(_cursor):
return nt._make(t)
def fetchmany(self, size=None):
ts = super().fetchmany(size)
ts = super(NamedTupleCursor, self).fetchmany(size)
nt = self.Record
if nt is None:
nt = self.Record = self._make_nt()
return list(map(nt._make, ts))
def fetchall(self):
ts = super().fetchall()
ts = super(NamedTupleCursor, self).fetchall()
nt = self.Record
if nt is None:
nt = self.Record = self._make_nt()
@ -343,7 +367,7 @@ class NamedTupleCursor(_cursor):
def __iter__(self):
try:
it = super().__iter__()
it = super(NamedTupleCursor, self).__iter__()
t = next(it)
nt = self.Record
@ -357,15 +381,18 @@ class NamedTupleCursor(_cursor):
except StopIteration:
return
# ascii except alnum and underscore
_re_clean = _re.compile(
'[' + _re.escape(' !"#$%&\'()*+,-./:;<=>?@[\\]^`{|}~') + ']')
def _make_nt(self):
key = tuple(d[0] for d in self.description) if self.description else ()
return self._cached_make_nt(key)
@classmethod
def _do_make_nt(cls, key):
def _do_make_nt(self, key):
fields = []
for s in key:
s = _re_clean.sub('_', s)
s = self._re_clean.sub('_', s)
# Python identifier cannot start with numbers, namedtuple fields
# cannot start with underscore. So...
if s[0] == '_' or '0' <= s[0] <= '9':
@ -375,15 +402,9 @@ class NamedTupleCursor(_cursor):
nt = namedtuple("Record", fields)
return nt
@lru_cache(512)
def _cached_make_nt(cls, key):
return cls._do_make_nt(key)
# Exposed for testability, and if someone wants to monkeypatch to tweak
# the cache size.
NamedTupleCursor._cached_make_nt = classmethod(_cached_make_nt)
# Exposed for testability, and if someone wants to monkeypatch to tweak
# the cache size.
_cached_make_nt = lru_cache(512)(_do_make_nt)
class LoggingConnection(_connection):
@ -395,12 +416,11 @@ class LoggingConnection(_connection):
def initialize(self, logobj):
"""Initialize the connection to log to `!logobj`.
The `!logobj` parameter can be an open file object or a Logger/LoggerAdapter
The `!logobj` parameter can be an open file object or a Logger
instance from the standard logging module.
"""
self._logobj = logobj
if _logging and isinstance(
logobj, (_logging.Logger, _logging.LoggerAdapter)):
if _logging and isinstance(logobj, _logging.Logger):
self.log = self._logtologger
else:
self.log = self._logtofile
@ -417,7 +437,7 @@ class LoggingConnection(_connection):
def _logtofile(self, msg, curs):
msg = self.filter(msg, curs)
if msg:
if isinstance(msg, bytes):
if PY3 and isinstance(msg, bytes):
msg = msg.decode(_ext.encodings[self.encoding], 'replace')
self._logobj.write(msg + _os.linesep)
@ -433,8 +453,8 @@ class LoggingConnection(_connection):
def cursor(self, *args, **kwargs):
self._check()
kwargs.setdefault('cursor_factory', self.cursor_factory or LoggingCursor)
return super().cursor(*args, **kwargs)
kwargs.setdefault('cursor_factory', LoggingCursor)
return super(LoggingConnection, self).cursor(*args, **kwargs)
class LoggingCursor(_cursor):
@ -442,13 +462,13 @@ class LoggingCursor(_cursor):
def execute(self, query, vars=None):
try:
return super().execute(query, vars)
return super(LoggingCursor, self).execute(query, vars)
finally:
self.connection.log(self.query, self)
def callproc(self, procname, vars=None):
try:
return super().callproc(procname, vars)
return super(LoggingCursor, self).callproc(procname, vars)
finally:
self.connection.log(self.query, self)
@ -471,13 +491,12 @@ class MinTimeLoggingConnection(LoggingConnection):
def filter(self, msg, curs):
t = (_time.time() - curs.timestamp) * 1000
if t > self._mintime:
if isinstance(msg, bytes):
if PY3 and isinstance(msg, bytes):
msg = msg.decode(_ext.encodings[self.encoding], 'replace')
return f"{msg}{_os.linesep} (execution time: {t} ms)"
return msg + _os.linesep + " (execution time: %d ms)" % t
def cursor(self, *args, **kwargs):
kwargs.setdefault('cursor_factory',
self.cursor_factory or MinTimeLoggingCursor)
kwargs.setdefault('cursor_factory', MinTimeLoggingCursor)
return LoggingConnection.cursor(self, *args, **kwargs)
@ -497,14 +516,14 @@ class LogicalReplicationConnection(_replicationConnection):
def __init__(self, *args, **kwargs):
kwargs['replication_type'] = REPLICATION_LOGICAL
super().__init__(*args, **kwargs)
super(LogicalReplicationConnection, self).__init__(*args, **kwargs)
class PhysicalReplicationConnection(_replicationConnection):
def __init__(self, *args, **kwargs):
kwargs['replication_type'] = REPLICATION_PHYSICAL
super().__init__(*args, **kwargs)
super(PhysicalReplicationConnection, self).__init__(*args, **kwargs)
class StopReplication(Exception):
@ -525,7 +544,7 @@ class ReplicationCursor(_replicationCursor):
def create_replication_slot(self, slot_name, slot_type=None, output_plugin=None):
"""Create streaming replication slot."""
command = f"CREATE_REPLICATION_SLOT {quote_ident(slot_name, self)} "
command = "CREATE_REPLICATION_SLOT %s " % quote_ident(slot_name, self)
if slot_type is None:
slot_type = self.connection.replication_type
@ -536,7 +555,7 @@ class ReplicationCursor(_replicationCursor):
"output plugin name is required to create "
"logical replication slot")
command += f"LOGICAL {quote_ident(output_plugin, self)}"
command += "LOGICAL %s" % quote_ident(output_plugin, self)
elif slot_type == REPLICATION_PHYSICAL:
if output_plugin is not None:
@ -548,19 +567,18 @@ class ReplicationCursor(_replicationCursor):
else:
raise psycopg2.ProgrammingError(
f"unrecognized replication type: {repr(slot_type)}")
"unrecognized replication type: %s" % repr(slot_type))
self.execute(command)
def drop_replication_slot(self, slot_name):
"""Drop streaming replication slot."""
command = f"DROP_REPLICATION_SLOT {quote_ident(slot_name, self)}"
command = "DROP_REPLICATION_SLOT %s" % quote_ident(slot_name, self)
self.execute(command)
def start_replication(
self, slot_name=None, slot_type=None, start_lsn=0,
timeline=0, options=None, decode=False, status_interval=10):
def start_replication(self, slot_name=None, slot_type=None, start_lsn=0,
timeline=0, options=None, decode=False):
"""Start replication stream."""
command = "START_REPLICATION "
@ -570,7 +588,7 @@ class ReplicationCursor(_replicationCursor):
if slot_type == REPLICATION_LOGICAL:
if slot_name:
command += f"SLOT {quote_ident(slot_name, self)} "
command += "SLOT %s " % quote_ident(slot_name, self)
else:
raise psycopg2.ProgrammingError(
"slot name is required for logical replication")
@ -579,18 +597,19 @@ class ReplicationCursor(_replicationCursor):
elif slot_type == REPLICATION_PHYSICAL:
if slot_name:
command += f"SLOT {quote_ident(slot_name, self)} "
command += "SLOT %s " % quote_ident(slot_name, self)
# don't add "PHYSICAL", before 9.4 it was just START_REPLICATION XXX/XXX
else:
raise psycopg2.ProgrammingError(
f"unrecognized replication type: {repr(slot_type)}")
"unrecognized replication type: %s" % repr(slot_type))
if type(start_lsn) is str:
lsn = start_lsn.split('/')
lsn = f"{int(lsn[0], 16):X}/{int(lsn[1], 16):08X}"
lsn = "%X/%08X" % (int(lsn[0], 16), int(lsn[1], 16))
else:
lsn = f"{start_lsn >> 32 & 4294967295:X}/{start_lsn & 4294967295:08X}"
lsn = "%X/%08X" % ((start_lsn >> 32) & 0xFFFFFFFF,
start_lsn & 0xFFFFFFFF)
command += lsn
@ -599,7 +618,7 @@ class ReplicationCursor(_replicationCursor):
raise psycopg2.ProgrammingError(
"cannot specify timeline for logical replication")
command += f" TIMELINE {timeline}"
command += " TIMELINE %d" % timeline
if options:
if slot_type == REPLICATION_PHYSICAL:
@ -610,11 +629,10 @@ class ReplicationCursor(_replicationCursor):
for k, v in options.items():
if not command.endswith('('):
command += ", "
command += f"{quote_ident(k, self)} {_A(str(v))}"
command += "%s %s" % (quote_ident(k, self), _A(str(v)))
command += ")"
self.start_replication_expert(
command, decode=decode, status_interval=status_interval)
self.start_replication_expert(command, decode=decode)
# allows replication cursors to be used in select.select() directly
def fileno(self):
@ -623,7 +641,7 @@ class ReplicationCursor(_replicationCursor):
# a dbtype and adapter for Python UUID type
class UUID_adapter:
class UUID_adapter(object):
"""Adapt Python's uuid.UUID__ type to PostgreSQL's uuid__.
.. __: https://docs.python.org/library/uuid.html
@ -638,10 +656,10 @@ class UUID_adapter:
return self
def getquoted(self):
return (f"'{self._uuid}'::uuid").encode('utf8')
return ("'%s'::uuid" % self._uuid).encode('utf8')
def __str__(self):
return f"'{self._uuid}'::uuid"
return "'%s'::uuid" % self._uuid
def register_uuid(oids=None, conn_or_curs=None):
@ -678,7 +696,7 @@ def register_uuid(oids=None, conn_or_curs=None):
# a type, dbtype and adapter for PostgreSQL inet type
class Inet:
class Inet(object):
"""Wrap a string to allow for correct SQL-quoting of inet values.
Note that this adapter does NOT check the passed value to make
@ -690,7 +708,7 @@ class Inet:
self.addr = addr
def __repr__(self):
return f"{self.__class__.__name__}({self.addr!r})"
return "%s(%r)" % (self.__class__.__name__, self.addr)
def prepare(self, conn):
self._conn = conn
@ -763,7 +781,7 @@ def wait_select(conn):
elif state == POLL_WRITE:
select.select([], [conn.fileno()], [])
else:
raise conn.OperationalError(f"bad state from poll: {state}")
raise conn.OperationalError("bad state from poll: %s" % state)
except KeyboardInterrupt:
conn.cancel()
# the loop will be broken by a server error
@ -785,7 +803,7 @@ def _solve_conn_curs(conn_or_curs):
return conn, curs
class HstoreAdapter:
class HstoreAdapter(object):
"""Adapt a Python dict to the hstore syntax."""
def __init__(self, wrapped):
self.wrapped = wrapped
@ -865,7 +883,7 @@ class HstoreAdapter:
for m in self._re_hstore.finditer(s):
if m is None or m.start() != start:
raise psycopg2.InterfaceError(
f"error parsing hstore pair at char {start}")
"error parsing hstore pair at char %d" % start)
k = _bsdec.sub(r'\1', m.group(1))
v = m.group(2)
if v is not None:
@ -876,7 +894,7 @@ class HstoreAdapter:
if start < len(s):
raise psycopg2.InterfaceError(
f"error parsing hstore: unparsed data after char {start}")
"error parsing hstore: unparsed data after char %d" % start)
return rv
@ -904,11 +922,12 @@ class HstoreAdapter:
rv0, rv1 = [], []
# get the oid for the hstore
curs.execute(f"""SELECT t.oid, {typarray}
curs.execute("""\
SELECT t.oid, %s
FROM pg_type t JOIN pg_namespace ns
ON typnamespace = ns.oid
WHERE typname = 'hstore';
""")
""" % typarray)
for oids in curs:
rv0.append(oids[0])
rv1.append(oids[1])
@ -972,7 +991,12 @@ def register_hstore(conn_or_curs, globally=False, unicode=False,
array_oid = tuple([x for x in array_oid if x])
# create and register the typecaster
HSTORE = _ext.new_type(oid, "HSTORE", HstoreAdapter.parse)
if PY2 and unicode:
cast = HstoreAdapter.parse_unicode
else:
cast = HstoreAdapter.parse
HSTORE = _ext.new_type(oid, "HSTORE", cast)
_ext.register_type(HSTORE, not globally and conn_or_curs or None)
_ext.register_adapter(dict, HstoreAdapter)
@ -981,7 +1005,7 @@ def register_hstore(conn_or_curs, globally=False, unicode=False,
_ext.register_type(HSTOREARRAY, not globally and conn_or_curs or None)
class CompositeCaster:
class CompositeCaster(object):
"""Helps conversion of a PostgreSQL composite type into a Python object.
The class is usually created by the `register_composite()` function.
@ -1002,7 +1026,7 @@ class CompositeCaster:
self.typecaster = _ext.new_type((oid,), name, self.parse)
if array_oid:
self.array_typecaster = _ext.new_array_type(
(array_oid,), f"{name}ARRAY", self.typecaster)
(array_oid,), "%sARRAY" % name, self.typecaster)
else:
self.array_typecaster = None
@ -1046,7 +1070,7 @@ class CompositeCaster:
rv = []
for m in self._re_tokenize.finditer(s):
if m is None:
raise psycopg2.InterfaceError(f"can't parse type: {s!r}")
raise psycopg2.InterfaceError("can't parse type: %r" % s)
if m.group(1) is not None:
rv.append(None)
elif m.group(2) is not None:
@ -1057,7 +1081,6 @@ class CompositeCaster:
return rv
def _create_type(self, name, attnames):
name = _re_clean.sub('_', name)
self.type = namedtuple(name, attnames)
self._ctor = self.type._make
@ -1095,46 +1118,14 @@ ORDER BY attnum;
recs = curs.fetchall()
if not recs:
# The above algorithm doesn't work for customized seach_path
# (#1487) The implementation below works better, but, to guarantee
# backwards compatibility, use it only if the original one failed.
try:
savepoint = False
# Because we executed statements earlier, we are either INTRANS
# or we are IDLE only if the transaction is autocommit, in
# which case we don't need the savepoint anyway.
if conn.status == _ext.STATUS_IN_TRANSACTION:
curs.execute("SAVEPOINT register_type")
savepoint = True
curs.execute("""\
SELECT t.oid, %s, attname, atttypid, typname, nspname
FROM pg_type t
JOIN pg_namespace ns ON typnamespace = ns.oid
JOIN pg_attribute a ON attrelid = typrelid
WHERE t.oid = %%s::regtype
AND attnum > 0 AND NOT attisdropped
ORDER BY attnum;
""" % typarray, (name, ))
except psycopg2.ProgrammingError:
pass
else:
recs = curs.fetchall()
if recs:
tname = recs[0][4]
schema = recs[0][5]
finally:
if savepoint:
curs.execute("ROLLBACK TO SAVEPOINT register_type")
# revert the status of the connection as before the command
if conn_status != _ext.STATUS_IN_TRANSACTION and not conn.autocommit:
if (conn_status != _ext.STATUS_IN_TRANSACTION
and not conn.autocommit):
conn.rollback()
if not recs:
raise psycopg2.ProgrammingError(
f"PostgreSQL type '{name}' not found")
"PostgreSQL type '%s' not found" % name)
type_oid = recs[0][0]
array_oid = recs[0][1]
@ -1333,8 +1324,3 @@ def _split_sql(sql):
raise ValueError("the query doesn't contain any '%s' placeholder")
return pre, post
# ascii except alnum and underscore
_re_clean = _re.compile(
'[' + _re.escape(' !"#$%&\'()*+,-./:;<=>?@[\\]^`{|}~') + ']')

View File

@ -5,7 +5,6 @@ This module implements thread-safe (and not) connection pools.
# psycopg/pool.py - pooling code for psycopg
#
# Copyright (C) 2003-2019 Federico Di Gregorio <fog@debian.org>
# Copyright (C) 2020-2021 The Psycopg Team
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
@ -33,7 +32,7 @@ class PoolError(psycopg2.Error):
pass
class AbstractConnectionPool:
class AbstractConnectionPool(object):
"""Generic key-based pooling code."""
def __init__(self, minconn, maxconn, *args, **kwargs):

View File

@ -4,7 +4,6 @@
# psycopg/sql.py - SQL composition utility module
#
# Copyright (C) 2016-2019 Daniele Varrazzo <daniele.varrazzo@gmail.com>
# Copyright (C) 2020-2021 The Psycopg Team
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
@ -27,12 +26,13 @@
import string
from psycopg2 import extensions as ext
from psycopg2.compat import PY3, string_types
_formatter = string.Formatter()
class Composable:
class Composable(object):
"""
Abstract base class for objects that can be used to compose an SQL string.
@ -50,7 +50,7 @@ class Composable:
self._wrapped = wrapped
def __repr__(self):
return f"{self.__class__.__name__}({self._wrapped!r})"
return "%s(%r)" % (self.__class__.__name__, self._wrapped)
def as_string(self, context):
"""
@ -106,10 +106,10 @@ class Composed(Composable):
for i in seq:
if not isinstance(i, Composable):
raise TypeError(
f"Composed elements must be Composable, got {i!r} instead")
"Composed elements must be Composable, got %r instead" % i)
wrapped.append(i)
super().__init__(wrapped)
super(Composed, self).__init__(wrapped)
@property
def seq(self):
@ -147,7 +147,7 @@ class Composed(Composable):
"foo", "bar"
"""
if isinstance(joiner, str):
if isinstance(joiner, string_types):
joiner = SQL(joiner)
elif not isinstance(joiner, SQL):
raise TypeError(
@ -179,9 +179,9 @@ class SQL(Composable):
select "foo", "bar" from "table"
"""
def __init__(self, string):
if not isinstance(string, str):
if not isinstance(string, string_types):
raise TypeError("SQL values must be strings")
super().__init__(string)
super(SQL, self).__init__(string)
@property
def string(self):
@ -323,10 +323,10 @@ class Identifier(Composable):
raise TypeError("Identifier cannot be empty")
for s in strings:
if not isinstance(s, str):
if not isinstance(s, string_types):
raise TypeError("SQL identifier parts must be strings")
super().__init__(strings)
super(Identifier, self).__init__(strings)
@property
def strings(self):
@ -344,7 +344,9 @@ class Identifier(Composable):
"the Identifier wraps more than one than one string")
def __repr__(self):
return f"{self.__class__.__name__}({', '.join(map(repr, self._wrapped))})"
return "%s(%s)" % (
self.__class__.__name__,
', '.join(map(repr, self._wrapped)))
def as_string(self, context):
return '.'.join(ext.quote_ident(s, context) for s in self._wrapped)
@ -389,7 +391,7 @@ class Literal(Composable):
a.prepare(conn)
rv = a.getquoted()
if isinstance(rv, bytes):
if PY3 and isinstance(rv, bytes):
rv = rv.decode(ext.encodings[conn.encoding])
return rv
@ -423,14 +425,14 @@ class Placeholder(Composable):
"""
def __init__(self, name=None):
if isinstance(name, str):
if isinstance(name, string_types):
if ')' in name:
raise ValueError(f"invalid name: {name!r}")
raise ValueError("invalid name: %r" % name)
elif name is not None:
raise TypeError(f"expected string or None as name, got {name!r}")
raise TypeError("expected string or None as name, got %r" % name)
super().__init__(name)
super(Placeholder, self).__init__(name)
@property
def name(self):
@ -438,14 +440,12 @@ class Placeholder(Composable):
return self._wrapped
def __repr__(self):
if self._wrapped is None:
return f"{self.__class__.__name__}()"
else:
return f"{self.__class__.__name__}({self._wrapped!r})"
return "Placeholder(%r)" % (
self._wrapped if self._wrapped is not None else '',)
def as_string(self, context):
if self._wrapped is not None:
return f"%({self._wrapped})s"
return "%%(%s)s" % self._wrapped
else:
return "%s"

View File

@ -7,7 +7,6 @@ functions or used to set the .tzinfo_factory attribute in cursors.
# psycopg/tz.py - tzinfo implementation
#
# Copyright (C) 2003-2019 Federico Di Gregorio <fog@debian.org>
# Copyright (C) 2020-2021 The Psycopg Team
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
@ -45,11 +44,6 @@ class FixedOffsetTimezone(datetime.tzinfo):
offset and name that instance will be returned. This saves memory and
improves comparability.
.. versionchanged:: 2.9
The constructor can take either a timedelta or a number of minutes of
offset. Previously only minutes were supported.
.. __: https://docs.python.org/library/datetime.html
"""
_name = None
@ -59,9 +53,7 @@ class FixedOffsetTimezone(datetime.tzinfo):
def __init__(self, offset=None, name=None):
if offset is not None:
if not isinstance(offset, datetime.timedelta):
offset = datetime.timedelta(minutes=offset)
self._offset = offset
self._offset = datetime.timedelta(minutes=offset)
if name is not None:
self._name = name
@ -72,28 +64,18 @@ class FixedOffsetTimezone(datetime.tzinfo):
try:
return cls._cache[key]
except KeyError:
tz = super().__new__(cls, offset, name)
tz = super(FixedOffsetTimezone, cls).__new__(cls, offset, name)
cls._cache[key] = tz
return tz
def __repr__(self):
offset_mins = self._offset.seconds // 60 + self._offset.days * 24 * 60
return "psycopg2.tz.FixedOffsetTimezone(offset=%r, name=%r)" \
% (self._offset, self._name)
def __eq__(self, other):
if isinstance(other, FixedOffsetTimezone):
return self._offset == other._offset
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, FixedOffsetTimezone):
return self._offset != other._offset
else:
return NotImplemented
% (offset_mins, self._name)
def __getinitargs__(self):
return self._offset, self._name
offset_mins = self._offset.seconds // 60 + self._offset.days * 24 * 60
return offset_mins, self._name
def utcoffset(self, dt):
return self._offset
@ -101,16 +83,14 @@ class FixedOffsetTimezone(datetime.tzinfo):
def tzname(self, dt):
if self._name is not None:
return self._name
minutes, seconds = divmod(self._offset.total_seconds(), 60)
hours, minutes = divmod(minutes, 60)
rv = "%+03d" % hours
if minutes or seconds:
rv += ":%02d" % minutes
if seconds:
rv += ":%02d" % seconds
return rv
else:
seconds = self._offset.seconds + self._offset.days * 86400
hours, seconds = divmod(seconds, 3600)
minutes = seconds / 60
if minutes:
return "%+03d:%d" % (hours, minutes)
else:
return "%+03d" % hours
def dst(self, dt):
return ZERO

View File

@ -1,7 +1,6 @@
/* adapter_asis.c - adapt types as they are
*
* Copyright (C) 2003-2019 Federico Di Gregorio <fog@debian.org>
* Copyright (C) 2020-2021 The Psycopg Team
*
* This file is part of psycopg.
*
@ -45,12 +44,14 @@ asis_getquoted(asisObject *self, PyObject *args)
}
else {
rv = PyObject_Str(self->wrapped);
/* unicode to bytes */
#if PY_3
/* unicode to bytes in Py3 */
if (rv) {
PyObject *tmp = PyUnicode_AsUTF8String(rv);
Py_DECREF(rv);
rv = tmp;
}
#endif
}
return rv;

View File

@ -1,7 +1,6 @@
/* adapter_asis.h - definition for the psycopg AsIs type wrapper
*
* Copyright (C) 2003-2019 Federico Di Gregorio <fog@debian.org>
* Copyright (C) 2020-2021 The Psycopg Team
*
* This file is part of psycopg.
*

View File

@ -1,7 +1,6 @@
/* adapter_binary.c - Binary objects
*
* Copyright (C) 2003-2019 Federico Di Gregorio <fog@debian.org>
* Copyright (C) 2020-2021 The Psycopg Team
*
* This file is part of psycopg.
*
@ -76,6 +75,15 @@ binary_quote(binaryObject *self)
buffer_len = view.len;
}
#if PY_2
if (!buffer && (Bytes_Check(self->wrapped) || PyBuffer_Check(self->wrapped))) {
if (PyObject_AsReadBuffer(self->wrapped, (const void **)&buffer,
&buffer_len) < 0) {
goto exit;
}
}
#endif
if (!buffer) {
goto exit;
}

View File

@ -1,7 +1,6 @@
/* adapter_binary.h - definition for the Binary type
*
* Copyright (C) 2003-2019 Federico Di Gregorio <fog@debian.org>
* Copyright (C) 2020-2021 The Psycopg Team
*
* This file is part of psycopg.
*

View File

@ -1,7 +1,6 @@
/* adapter_datetime.c - python date/time objects
*
* Copyright (C) 2003-2019 Federico Di Gregorio <fog@debian.org>
* Copyright (C) 2020-2021 The Psycopg Team
*
* This file is part of psycopg.
*
@ -423,8 +422,8 @@ psyco_TimeFromTicks(PyObject *self, PyObject *args)
PyObject *
psyco_TimestampFromTicks(PyObject *self, PyObject *args)
{
pydatetimeObject *wrapper = NULL;
PyObject *dt_aware = NULL;
PyObject *m = NULL;
PyObject *tz = NULL;
PyObject *res = NULL;
struct tm tm;
time_t t;
@ -433,6 +432,10 @@ psyco_TimestampFromTicks(PyObject *self, PyObject *args)
if (!PyArg_ParseTuple(args, "d", &ticks))
return NULL;
/* get psycopg2.tz.LOCAL from pythonland */
if (!(m = PyImport_ImportModule("psycopg2.tz"))) { goto exit; }
if (!(tz = PyObject_GetAttrString(m, "LOCAL"))) { goto exit; }
t = (time_t)floor(ticks);
ticks -= (double)t;
if (!localtime_r(&t, &tm)) {
@ -440,29 +443,14 @@ psyco_TimestampFromTicks(PyObject *self, PyObject *args)
goto exit;
}
/* Convert the tm to a wrapper containing a naive datetime.datetime */
if (!(wrapper = (pydatetimeObject *)_psyco_Timestamp(
tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
tm.tm_hour, tm.tm_min, (double)tm.tm_sec + ticks, NULL))) {
goto exit;
}
/* Localize the datetime and assign it back to the wrapper */
if (!(dt_aware = PyObject_CallMethod(
wrapper->wrapped, "astimezone", NULL))) {
goto exit;
}
Py_CLEAR(wrapper->wrapped);
wrapper->wrapped = dt_aware;
dt_aware = NULL;
/* the wrapper is ready to be returned */
res = (PyObject *)wrapper;
wrapper = NULL;
res = _psyco_Timestamp(
tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
tm.tm_hour, tm.tm_min, (double)tm.tm_sec + ticks,
tz);
exit:
Py_XDECREF(dt_aware);
Py_XDECREF(wrapper);
Py_XDECREF(tz);
Py_XDECREF(m);
return res;
}

View File

@ -1,7 +1,6 @@
/* adapter_datetime.h - definition for the python date/time types
*
* Copyright (C) 2003-2019 Federico Di Gregorio <fog@debian.org>
* Copyright (C) 2020-2021 The Psycopg Team
*
* This file is part of psycopg.
*

View File

@ -1,7 +1,6 @@
/* adapter_list.c - python list objects
*
* Copyright (C) 2004-2019 Federico Di Gregorio <fog@debian.org>
* Copyright (C) 2020-2021 The Psycopg Team
*
* This file is part of psycopg.
*

View File

@ -1,7 +1,6 @@
/* adapter_list.h - definition for the python list types
*
* Copyright (C) 2004-2019 Federico Di Gregorio <fog@debian.org>
* Copyright (C) 2020-2021 The Psycopg Team
*
* This file is part of psycopg.
*

View File

@ -0,0 +1,301 @@
/* adapter_mxdatetime.c - mx date/time objects
*
* Copyright (C) 2003-2019 Federico Di Gregorio <fog@debian.org>
*
* This file is part of psycopg.
*
* psycopg2 is free software: you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* In addition, as a special exception, the copyright holders give
* permission to link this program with the OpenSSL library (or with
* modified versions of OpenSSL that use the same license as OpenSSL),
* and distribute linked combinations including the two.
*
* You must obey the GNU Lesser General Public License in all respects for
* all of the code used other than OpenSSL.
*
* psycopg2 is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
* License for more details.
*/
#define PSYCOPG_MODULE
#include "psycopg/psycopg.h"
#include "psycopg/adapter_mxdatetime.h"
#include "psycopg/microprotocols_proto.h"
#include <mxDateTime.h>
#include <string.h>
/* Return 0 on success, -1 on failure, but don't set an exception */
int
psyco_adapter_mxdatetime_init(void)
{
if (mxDateTime_ImportModuleAndAPI()) {
Dprintf("psyco_adapter_mxdatetime_init: mx.DateTime initialization failed");
PyErr_Clear();
return -1;
}
return 0;
}
/* mxdatetime_str, mxdatetime_getquoted - return result of quoting */
static PyObject *
mxdatetime_str(mxdatetimeObject *self)
{
mxDateTimeObject *dt;
mxDateTimeDeltaObject *dtd;
char buf[128] = { 0, };
switch (self->type) {
case PSYCO_MXDATETIME_DATE:
dt = (mxDateTimeObject *)self->wrapped;
if (dt->year >= 1)
PyOS_snprintf(buf, sizeof(buf) - 1, "'%04ld-%02d-%02d'::date",
dt->year, (int)dt->month, (int)dt->day);
else
PyOS_snprintf(buf, sizeof(buf) - 1, "'%04ld-%02d-%02d BC'::date",
1 - dt->year, (int)dt->month, (int)dt->day);
break;
case PSYCO_MXDATETIME_TIMESTAMP:
dt = (mxDateTimeObject *)self->wrapped;
if (dt->year >= 1)
PyOS_snprintf(buf, sizeof(buf) - 1,
"'%04ld-%02d-%02dT%02d:%02d:%09.6f'::timestamp",
dt->year, (int)dt->month, (int)dt->day,
(int)dt->hour, (int)dt->minute, dt->second);
else
PyOS_snprintf(buf, sizeof(buf) - 1,
"'%04ld-%02d-%02dT%02d:%02d:%09.6f BC'::timestamp",
1 - dt->year, (int)dt->month, (int)dt->day,
(int)dt->hour, (int)dt->minute, dt->second);
break;
case PSYCO_MXDATETIME_TIME:
case PSYCO_MXDATETIME_INTERVAL:
/* given the limitation of the mx.DateTime module that uses the same
type for both time and delta values we need to do some black magic
and make sure we're not using an adapt()ed interval as a simple
time */
dtd = (mxDateTimeDeltaObject *)self->wrapped;
if (0 <= dtd->seconds && dtd->seconds < 24*3600) {
PyOS_snprintf(buf, sizeof(buf) - 1, "'%02d:%02d:%09.6f'::time",
(int)dtd->hour, (int)dtd->minute, dtd->second);
} else {
double ss = dtd->hour*3600.0 + dtd->minute*60.0 + dtd->second;
if (dtd->seconds >= 0)
PyOS_snprintf(buf, sizeof(buf) - 1, "'%ld days %.6f seconds'::interval",
dtd->day, ss);
else
PyOS_snprintf(buf, sizeof(buf) - 1, "'-%ld days -%.6f seconds'::interval",
dtd->day, ss);
}
break;
}
return PyString_FromString(buf);
}
static PyObject *
mxdatetime_getquoted(mxdatetimeObject *self, PyObject *args)
{
return mxdatetime_str(self);
}
static PyObject *
mxdatetime_conform(mxdatetimeObject *self, PyObject *args)
{
PyObject *res, *proto;
if (!PyArg_ParseTuple(args, "O", &proto)) return NULL;
if (proto == (PyObject*)&isqlquoteType)
res = (PyObject*)self;
else
res = Py_None;
Py_INCREF(res);
return res;
}
/** the MxDateTime object **/
/* object member list */
static struct PyMemberDef mxdatetimeObject_members[] = {
{"adapted", T_OBJECT, offsetof(mxdatetimeObject, wrapped), READONLY},
{"type", T_INT, offsetof(mxdatetimeObject, type), READONLY},
{NULL}
};
/* object method table */
static PyMethodDef mxdatetimeObject_methods[] = {
{"getquoted", (PyCFunction)mxdatetime_getquoted, METH_NOARGS,
"getquoted() -> wrapped object value as SQL date/time"},
{"__conform__", (PyCFunction)mxdatetime_conform, METH_VARARGS, NULL},
{NULL} /* Sentinel */
};
/* initialization and finalization methods */
static int
mxdatetime_setup(mxdatetimeObject *self, PyObject *obj, int type)
{
Dprintf("mxdatetime_setup: init mxdatetime object at %p, refcnt = "
FORMAT_CODE_PY_SSIZE_T,
self, Py_REFCNT(self)
);
self->type = type;
Py_INCREF(obj);
self->wrapped = obj;
Dprintf("mxdatetime_setup: good mxdatetime object at %p, refcnt = "
FORMAT_CODE_PY_SSIZE_T,
self, Py_REFCNT(self)
);
return 0;
}
static void
mxdatetime_dealloc(PyObject* obj)
{
mxdatetimeObject *self = (mxdatetimeObject *)obj;
Py_CLEAR(self->wrapped);
Dprintf("mxdatetime_dealloc: deleted mxdatetime object at %p, refcnt = "
FORMAT_CODE_PY_SSIZE_T,
obj, Py_REFCNT(obj)
);
Py_TYPE(obj)->tp_free(obj);
}
static int
mxdatetime_init(PyObject *obj, PyObject *args, PyObject *kwds)
{
PyObject *mx;
int type = -1; /* raise an error if type was not passed! */
if (!PyArg_ParseTuple(args, "O|i", &mx, &type))
return -1;
return mxdatetime_setup((mxdatetimeObject *)obj, mx, type);
}
static PyObject *
mxdatetime_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
{
return type->tp_alloc(type, 0);
}
/* object type */
#define mxdatetimeType_doc \
"MxDateTime(mx, type) -> new mx.DateTime wrapper object"
PyTypeObject mxdatetimeType = {
PyVarObject_HEAD_INIT(NULL, 0)
"psycopg2._psycopg.MxDateTime",
sizeof(mxdatetimeObject), 0,
mxdatetime_dealloc, /*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
0, /*tp_compare*/
0, /*tp_repr*/
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash */
0, /*tp_call*/
(reprfunc)mxdatetime_str, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, /*tp_flags*/
mxdatetimeType_doc, /*tp_doc*/
0, /*tp_traverse*/
0, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
mxdatetimeObject_methods, /*tp_methods*/
mxdatetimeObject_members, /*tp_members*/
0, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
mxdatetime_init, /*tp_init*/
0, /*tp_alloc*/
mxdatetime_new, /*tp_new*/
};
/** module-level functions **/
PyObject *
psyco_DateFromMx(PyObject *self, PyObject *args)
{
PyObject *mx;
if (!PyArg_ParseTuple(args, "O!", mxDateTime.DateTime_Type, &mx))
return NULL;
return PyObject_CallFunction((PyObject *)&mxdatetimeType, "Oi", mx,
PSYCO_MXDATETIME_DATE);
}
PyObject *
psyco_TimeFromMx(PyObject *self, PyObject *args)
{
PyObject *mx;
if (!PyArg_ParseTuple(args, "O!", mxDateTime.DateTimeDelta_Type, &mx))
return NULL;
return PyObject_CallFunction((PyObject *)&mxdatetimeType, "Oi", mx,
PSYCO_MXDATETIME_TIME);
}
PyObject *
psyco_TimestampFromMx(PyObject *self, PyObject *args)
{
PyObject *mx;
if (!PyArg_ParseTuple(args, "O!", mxDateTime.DateTime_Type, &mx))
return NULL;
return PyObject_CallFunction((PyObject *)&mxdatetimeType, "Oi", mx,
PSYCO_MXDATETIME_TIMESTAMP);
}
PyObject *
psyco_IntervalFromMx(PyObject *self, PyObject *args)
{
PyObject *mx;
if (!PyArg_ParseTuple(args, "O!", mxDateTime.DateTime_Type, &mx))
return NULL;
return PyObject_CallFunction((PyObject *)&mxdatetimeType, "Oi", mx,
PSYCO_MXDATETIME_INTERVAL);
}

View File

@ -0,0 +1,69 @@
/* adapter_mxdatetime.h - definition for the mx date/time types
*
* Copyright (C) 2003-2019 Federico Di Gregorio <fog@debian.org>
*
* This file is part of psycopg.
*
* psycopg2 is free software: you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* In addition, as a special exception, the copyright holders give
* permission to link this program with the OpenSSL library (or with
* modified versions of OpenSSL that use the same license as OpenSSL),
* and distribute linked combinations including the two.
*
* You must obey the GNU Lesser General Public License in all respects for
* all of the code used other than OpenSSL.
*
* psycopg2 is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
* License for more details.
*/
#ifndef PSYCOPG_MXDATETIME_H
#define PSYCOPG_MXDATETIME_H 1
#ifdef __cplusplus
extern "C" {
#endif
extern HIDDEN PyTypeObject mxdatetimeType;
typedef struct {
PyObject_HEAD
PyObject *wrapped;
int type;
#define PSYCO_MXDATETIME_TIME 0
#define PSYCO_MXDATETIME_DATE 1
#define PSYCO_MXDATETIME_TIMESTAMP 2
#define PSYCO_MXDATETIME_INTERVAL 3
} mxdatetimeObject;
HIDDEN int psyco_adapter_mxdatetime_init(void);
HIDDEN PyObject *psyco_DateFromMx(PyObject *module, PyObject *args);
#define psyco_DateFromMx_doc \
"DateFromMx(mx) -> new date"
HIDDEN PyObject *psyco_TimeFromMx(PyObject *module, PyObject *args);
#define psyco_TimeFromMx_doc \
"TimeFromMx(mx) -> new time"
HIDDEN PyObject *psyco_TimestampFromMx(PyObject *module, PyObject *args);
#define psyco_TimestampFromMx_doc \
"TimestampFromMx(mx) -> new timestamp"
HIDDEN PyObject *psyco_IntervalFromMx(PyObject *module, PyObject *args);
#define psyco_IntervalFromMx_doc \
"IntervalFromMx(mx) -> new interval"
#ifdef __cplusplus
}
#endif
#endif /* !defined(PSYCOPG_MXDATETIME_H) */

View File

@ -1,7 +1,6 @@
/* adapter_pboolean.c - psycopg boolean type wrapper implementation
*
* Copyright (C) 2003-2019 Federico Di Gregorio <fog@debian.org>
* Copyright (C) 2020-2021 The Psycopg Team
*
* This file is part of psycopg.
*

View File

@ -1,7 +1,6 @@
/* adapter_pboolean.h - definition for the psycopg boolean type wrapper
*
* Copyright (C) 2003-2019 Federico Di Gregorio <fog@debian.org>
* Copyright (C) 2020-2021 The Psycopg Team
*
* This file is part of psycopg.
*

View File

@ -1,7 +1,6 @@
/* adapter_pdecimal.c - psycopg Decimal type wrapper implementation
*
* Copyright (C) 2003-2019 Federico Di Gregorio <fog@debian.org>
* Copyright (C) 2020-2021 The Psycopg Team
*
* This file is part of psycopg.
*
@ -81,7 +80,8 @@ pdecimal_getquoted(pdecimalObject *self, PyObject *args)
/* res may be unicode and may suffer for issue #57 */
output:
/* unicode to bytes */
#if PY_3
/* unicode to bytes in Py3 */
{
PyObject *tmp = PyUnicode_AsUTF8String(res);
Py_DECREF(res);
@ -89,6 +89,7 @@ output:
goto end;
}
}
#endif
if ('-' == Bytes_AS_STRING(res)[0]) {
/* Prepend a space in front of negative numbers (ticket #57) */

View File

@ -1,7 +1,6 @@
/* adapter_pdecimal.h - definition for the psycopg Decimal type wrapper
*
* Copyright (C) 2003-2019 Federico Di Gregorio <fog@debian.org>
* Copyright (C) 2020-2021 The Psycopg Team
*
* This file is part of psycopg.
*

View File

@ -1,7 +1,6 @@
/* adapter_float.c - psycopg pfloat type wrapper implementation
*
* Copyright (C) 2003-2019 Federico Di Gregorio <fog@debian.org>
* Copyright (C) 2020-2021 The Psycopg Team
*
* This file is part of psycopg.
*
@ -54,7 +53,8 @@ pfloat_getquoted(pfloatObject *self, PyObject *args)
goto exit;
}
/* unicode to bytes */
#if PY_3
/* unicode to bytes in Py3 */
{
PyObject *tmp = PyUnicode_AsUTF8String(rv);
Py_DECREF(rv);
@ -62,6 +62,7 @@ pfloat_getquoted(pfloatObject *self, PyObject *args)
goto exit;
}
}
#endif
if ('-' == Bytes_AS_STRING(rv)[0]) {
/* Prepend a space in front of negative numbers (ticket #57) */

View File

@ -1,7 +1,6 @@
/* adapter_pfloat.h - definition for the psycopg float type wrapper
*
* Copyright (C) 2003-2019 Federico Di Gregorio <fog@debian.org>
* Copyright (C) 2020-2021 The Psycopg Team
*
* This file is part of psycopg.
*

View File

@ -1,7 +1,6 @@
/* adapter_int.c - psycopg pint type wrapper implementation
*
* Copyright (C) 2011-2019 Daniele Varrazzo <daniele.varrazzo@gmail.com>
* Copyright (C) 2020-2021 The Psycopg Team
*
* This file is part of psycopg.
*
@ -40,7 +39,11 @@ pint_getquoted(pintObject *self, PyObject *args)
/* Convert subclass to int to handle IntEnum and other subclasses
* whose str() is not the number. */
if (PyLong_CheckExact(self->wrapped)) {
if (PyLong_CheckExact(self->wrapped)
#if PY_2
|| PyInt_CheckExact(self->wrapped)
#endif
) {
res = PyObject_Str(self->wrapped);
} else {
PyObject *tmp;
@ -56,7 +59,8 @@ pint_getquoted(pintObject *self, PyObject *args)
goto exit;
}
/* unicode to bytes */
#if PY_3
/* unicode to bytes in Py3 */
{
PyObject *tmp = PyUnicode_AsUTF8String(res);
Py_DECREF(res);
@ -64,6 +68,7 @@ pint_getquoted(pintObject *self, PyObject *args)
goto exit;
}
}
#endif
if ('-' == Bytes_AS_STRING(res)[0]) {
/* Prepend a space in front of negative numbers (ticket #57) */

View File

@ -1,7 +1,6 @@
/* adapter_pint.h - definition for the psycopg int type wrapper
*
* Copyright (C) 2011-2019 Daniele Varrazzo <daniele.varrazzo@gmail.com>
* Copyright (C) 2020-2021 The Psycopg Team
*
* This file is part of psycopg.
*

View File

@ -1,7 +1,6 @@
/* adapter_qstring.c - QuotedString objects
*
* Copyright (C) 2003-2019 Federico Di Gregorio <fog@debian.org>
* Copyright (C) 2020-2021 The Psycopg Team
*
* This file is part of psycopg.
*

View File

@ -1,7 +1,6 @@
/* adapter_qstring.h - definition for the QuotedString type
*
* Copyright (C) 2003-2019 Federico Di Gregorio <fog@debian.org>
* Copyright (C) 2020-2021 The Psycopg Team
*
* This file is part of psycopg.
*

View File

@ -1,58 +0,0 @@
/* aix_support.c - emulate functions missing on AIX
*
* Copyright (C) 2017 My Karlsson <mk@acc.umu.se>
* Copyright (c) 2018, Joyent, Inc.
* Copyright (C) 2020-2021 The Psycopg Team
*
* This file is part of psycopg.
*
* psycopg2 is free software: you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* In addition, as a special exception, the copyright holders give
* permission to link this program with the OpenSSL library (or with
* modified versions of OpenSSL that use the same license as OpenSSL),
* and distribute linked combinations including the two.
*
* You must obey the GNU Lesser General Public License in all respects for
* all of the code used other than OpenSSL.
*
* psycopg2 is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
* License for more details.
*/
#define PSYCOPG_MODULE
#include "psycopg/psycopg.h"
#include "psycopg/aix_support.h"
#if defined(_AIX)
/* timeradd is missing on AIX */
#ifndef timeradd
void
timeradd(struct timeval *a, struct timeval *b, struct timeval *c)
{
c->tv_sec = a->tv_sec + b->tv_sec;
c->tv_usec = a->tv_usec + b->tv_usec;
if (c->tv_usec >= 1000000) {
c->tv_usec -= 1000000;
c->tv_sec += 1;
}
}
/* timersub is missing on AIX */
void
timersub(struct timeval *a, struct timeval *b, struct timeval *c)
{
c->tv_sec = a->tv_sec - b->tv_sec;
c->tv_usec = a->tv_usec - b->tv_usec;
if (c->tv_usec < 0) {
c->tv_usec += 1000000;
c->tv_sec -= 1;
}
}
#endif /* timeradd */
#endif /* defined(_AIX)*/

View File

@ -1,48 +0,0 @@
/* aix_support.h - definitions for aix_support.c
*
* Copyright (C) 2017 My Karlsson <mk@acc.umu.se>
* Copyright (c) 2018-2019, Joyent, Inc.
* Copyright (C) 2020-2021 The Psycopg Team
*
* This file is part of psycopg.
*
* psycopg2 is free software: you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* In addition, as a special exception, the copyright holders give
* permission to link this program with the OpenSSL library (or with
* modified versions of OpenSSL that use the same license as OpenSSL),
* and distribute linked combinations including the two.
*
* You must obey the GNU Lesser General Public License in all respects for
* all of the code used other than OpenSSL.
*
* psycopg2 is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
* License for more details.
*/
#ifndef PSYCOPG_AIX_SUPPORT_H
#define PSYCOPG_AIX_SUPPORT_H
#include "psycopg/config.h"
#ifdef _AIX
#include <sys/time.h>
#ifndef timeradd
extern HIDDEN void timeradd(struct timeval *a, struct timeval *b, struct timeval *c);
extern HIDDEN void timersub(struct timeval *a, struct timeval *b, struct timeval *c);
#endif
#ifndef timercmp
#define timercmp(a, b, cmp) \
(((a)->tv_sec == (b)->tv_sec) ? \
((a)->tv_usec cmp (b)->tv_usec) : \
((a)->tv_sec cmp (b)->tv_sec))
#endif
#endif
#endif /* !defined(PSYCOPG_AIX_SUPPORT_H) */

View File

@ -1,7 +1,6 @@
/* bytes_format.c - bytes-oriented version of PyString_Format
*
* Copyright (C) 2010-2019 Daniele Varrazzo <daniele.varrazzo@gmail.com>
* Copyright (C) 2020-2021 The Psycopg Team
*
* This file is part of psycopg.
*

View File

@ -1,7 +1,6 @@
/* column.h - definition for a column in cursor.description type
*
* Copyright (C) 2018-2019 Daniele Varrazzo <daniele.varrazzo@gmail.com>
* Copyright (C) 2020-2021 The Psycopg Team
*
* This file is part of psycopg.
*

View File

@ -1,7 +1,6 @@
/* column_type.c - python interface to cursor.description objects
*
* Copyright (C) 2018-2019 Daniele Varrazzo <daniele.varrazzo@gmail.com>
* Copyright (C) 2020-2021 The Psycopg Team
*
* This file is part of psycopg.
*
@ -97,36 +96,17 @@ column_new(PyTypeObject *type, PyObject *args, PyObject *kwargs)
static int
column_init(columnObject *self, PyObject *args, PyObject *kwargs)
{
PyObject *name = NULL;
PyObject *type_code = NULL;
PyObject *display_size = NULL;
PyObject *internal_size = NULL;
PyObject *precision = NULL;
PyObject *scale = NULL;
PyObject *null_ok = NULL;
PyObject *table_oid = NULL;
PyObject *table_column = NULL;
static char *kwlist[] = {
"name", "type_code", "display_size", "internal_size",
"precision", "scale", "null_ok", "table_oid", "table_column", NULL};
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOOOOOOOO", kwlist,
&name, &type_code, &display_size, &internal_size, &precision,
&scale, &null_ok, &table_oid, &table_column)) {
&self->name, &self->type_code, &self->display_size,
&self->internal_size, &self->precision, &self->scale,
&self->null_ok, &self->table_oid, &self->table_column)) {
return -1;
}
Py_XINCREF(name); self->name = name;
Py_XINCREF(type_code); self->type_code = type_code;
Py_XINCREF(display_size); self->display_size = display_size;
Py_XINCREF(internal_size); self->internal_size = internal_size;
Py_XINCREF(precision); self->precision = precision;
Py_XINCREF(scale); self->scale = scale;
Py_XINCREF(null_ok); self->null_ok = null_ok;
Py_XINCREF(table_oid); self->table_oid = table_oid;
Py_XINCREF(table_column); self->table_column = table_column;
return 0;
}
@ -252,32 +232,6 @@ column_getitem(columnObject *self, Py_ssize_t item)
}
static PyObject*
column_subscript(columnObject* self, PyObject* item)
{
PyObject *t = NULL;
PyObject *rv = NULL;
/* t = tuple(self) */
if (!(t = PyObject_CallFunctionObjArgs(
(PyObject *)&PyTuple_Type, (PyObject *)self, NULL))) {
goto exit;
}
/* rv = t[item] */
rv = PyObject_GetItem(t, item);
exit:
Py_XDECREF(t);
return rv;
}
static PyMappingMethods column_mapping = {
(lenfunc)column_len, /* mp_length */
(binaryfunc)column_subscript, /* mp_subscript */
0 /* mp_ass_subscript */
};
static PySequenceMethods column_sequence = {
(lenfunc)column_len, /* sq_length */
0, /* sq_concat */
@ -391,7 +345,7 @@ PyTypeObject columnType = {
(reprfunc)column_repr, /*tp_repr*/
0, /*tp_as_number*/
&column_sequence, /*tp_as_sequence*/
&column_mapping, /*tp_as_mapping*/
0, /*tp_as_mapping*/
0, /*tp_hash */
0, /*tp_call*/
0, /*tp_str*/

View File

@ -1,7 +1,6 @@
/* config.h - general config and Dprintf macro
*
* Copyright (C) 2003-2019 Federico Di Gregorio <fog@debian.org>
* Copyright (C) 2020-2021 The Psycopg Team
*
* This file is part of psycopg.
*

View File

@ -1,7 +1,6 @@
/* connection.h - definition for the psycopg connection type
*
* Copyright (C) 2003-2019 Federico Di Gregorio <fog@debian.org>
* Copyright (C) 2020-2021 The Psycopg Team
*
* This file is part of psycopg.
*
@ -145,9 +144,6 @@ struct connectionObject {
/* the pid this connection was created into */
pid_t procpid;
/* inside a with block */
int entered;
};
/* map isolation level values into a numeric const */
@ -168,8 +164,7 @@ HIDDEN void conn_notice_process(connectionObject *self);
HIDDEN void conn_notice_clean(connectionObject *self);
HIDDEN void conn_notifies_process(connectionObject *self);
RAISES_NEG HIDDEN int conn_setup(connectionObject *self);
HIDDEN int conn_connect(connectionObject *self, const char *dsn, long int async);
HIDDEN char *conn_obscure_password(const char *dsn);
HIDDEN int conn_connect(connectionObject *self, long int async);
HIDDEN void conn_close(connectionObject *self);
HIDDEN void conn_close_locked(connectionObject *self);
RAISES_NEG HIDDEN int conn_commit(connectionObject *self);

View File

@ -1,7 +1,6 @@
/* connection_int.c - code used by the connection object
*
* Copyright (C) 2003-2019 Federico Di Gregorio <fog@debian.org>
* Copyright (C) 2020-2021 The Psycopg Team
*
* This file is part of psycopg.
*
@ -33,7 +32,6 @@
#include "psycopg/green.h"
#include "psycopg/notify.h"
#include <stdlib.h>
#include <string.h>
/* String indexes match the ISOLATION_LEVEL_* consts */
@ -707,7 +705,7 @@ exit:
/* conn_connect - execute a connection to the database */
static int
_conn_sync_connect(connectionObject *self, const char *dsn)
_conn_sync_connect(connectionObject *self)
{
int green;
@ -716,26 +714,26 @@ _conn_sync_connect(connectionObject *self, const char *dsn)
green = psyco_green();
if (!green) {
Py_BEGIN_ALLOW_THREADS;
self->pgconn = PQconnectdb(dsn);
self->pgconn = PQconnectdb(self->dsn);
Py_END_ALLOW_THREADS;
Dprintf("conn_connect: new PG connection at %p", self->pgconn);
}
else {
Py_BEGIN_ALLOW_THREADS;
self->pgconn = PQconnectStart(dsn);
self->pgconn = PQconnectStart(self->dsn);
Py_END_ALLOW_THREADS;
Dprintf("conn_connect: new green PG connection at %p", self->pgconn);
}
if (!self->pgconn)
{
Dprintf("conn_connect: PQconnectdb(%s) FAILED", dsn);
Dprintf("conn_connect: PQconnectdb(%s) FAILED", self->dsn);
PyErr_SetString(OperationalError, "PQconnectdb() failed");
return -1;
}
else if (PQstatus(self->pgconn) == CONNECTION_BAD)
{
Dprintf("conn_connect: PQconnectdb(%s) returned BAD", dsn);
Dprintf("conn_connect: PQconnectdb(%s) returned BAD", self->dsn);
PyErr_SetString(OperationalError, PQerrorMessage(self->pgconn));
return -1;
}
@ -765,23 +763,23 @@ _conn_sync_connect(connectionObject *self, const char *dsn)
}
static int
_conn_async_connect(connectionObject *self, const char *dsn)
_conn_async_connect(connectionObject *self)
{
PGconn *pgconn;
self->pgconn = pgconn = PQconnectStart(dsn);
self->pgconn = pgconn = PQconnectStart(self->dsn);
Dprintf("conn_connect: new postgresql connection at %p", pgconn);
if (pgconn == NULL)
{
Dprintf("conn_connect: PQconnectStart(%s) FAILED", dsn);
Dprintf("conn_connect: PQconnectStart(%s) FAILED", self->dsn);
PyErr_SetString(OperationalError, "PQconnectStart() failed");
return -1;
}
else if (PQstatus(pgconn) == CONNECTION_BAD)
{
Dprintf("conn_connect: PQconnectdb(%s) returned BAD", dsn);
Dprintf("conn_connect: PQconnectdb(%s) returned BAD", self->dsn);
PyErr_SetString(OperationalError, PQerrorMessage(pgconn));
return -1;
}
@ -802,17 +800,17 @@ _conn_async_connect(connectionObject *self, const char *dsn)
}
int
conn_connect(connectionObject *self, const char *dsn, long int async)
conn_connect(connectionObject *self, long int async)
{
int rv;
if (async == 1) {
Dprintf("con_connect: connecting in ASYNC mode");
rv = _conn_async_connect(self, dsn);
rv = _conn_async_connect(self);
}
else {
Dprintf("con_connect: connecting in SYNC mode");
rv = _conn_sync_connect(self, dsn);
rv = _conn_sync_connect(self);
}
if (rv != 0) {
@ -1047,6 +1045,12 @@ static cursorObject *
_conn_get_async_cursor(connectionObject *self) {
PyObject *py_curs;
if (!(self->async_cursor)) {
PyErr_SetString(PyExc_SystemError,
"unexpectedly, there's no async cursor here");
goto error;
}
if (!(py_curs = PyWeakref_GetObject(self->async_cursor))) {
PyErr_SetString(PyExc_SystemError,
"got null dereferencing cursor weakref");
@ -1104,7 +1108,7 @@ conn_poll(connectionObject *self)
Dprintf("conn_poll: status -> CONN_STATUS_*");
res = _conn_poll_query(self);
if (res == PSYCO_POLL_OK && self->async && self->async_cursor) {
if (res == PSYCO_POLL_OK && self->async) {
cursorObject *curs;
/* An async query has just finished: parse the tuple in the
@ -1159,60 +1163,6 @@ conn_close(connectionObject *self)
Py_END_ALLOW_THREADS;
}
/* Return a copy of the 'dsn' string with the password scrubbed.
*
* The string returned is allocated on the Python heap.
*
* In case of error return NULL and raise an exception.
*/
char *
conn_obscure_password(const char *dsn)
{
PQconninfoOption *options = NULL;
PyObject *d = NULL, *v = NULL, *pydsn = NULL;
char *rv = NULL;
if (!dsn) {
PyErr_SetString(InternalError, "unexpected null string");
goto exit;
}
if (!(options = PQconninfoParse(dsn, NULL))) {
/* unlikely: the dsn was already tested valid */
PyErr_SetString(InternalError, "the connection string is not valid");
goto exit;
}
if (!(d = psyco_dict_from_conninfo_options(
options, /* include_password = */ 1))) {
goto exit;
}
if (NULL == PyDict_GetItemString(d, "password")) {
/* the dsn doesn't have a password */
psyco_strdup(&rv, dsn, -1);
goto exit;
}
/* scrub the password and put back the connection string together */
if (!(v = Text_FromUTF8("xxx"))) { goto exit; }
if (0 > PyDict_SetItemString(d, "password", v)) { goto exit; }
if (!(pydsn = psyco_make_dsn(Py_None, d))) { goto exit; }
if (!(pydsn = psyco_ensure_bytes(pydsn))) { goto exit; }
/* Return the connection string with the password replaced */
psyco_strdup(&rv, Bytes_AS_STRING(pydsn), -1);
exit:
PQconninfoFree(options);
Py_XDECREF(v);
Py_XDECREF(d);
Py_XDECREF(pydsn);
return rv;
}
/* conn_close_locked - shut down the connection with the lock already taken */
void conn_close_locked(connectionObject *self)
@ -1344,11 +1294,6 @@ conn_set_session(connectionObject *self, int autocommit,
}
}
Py_BLOCK_THREADS;
conn_notifies_process(self);
conn_notice_process(self);
Py_UNBLOCK_THREADS;
if (autocommit != SRV_STATE_UNCHANGED) {
self->autocommit = autocommit;
}
@ -1395,10 +1340,7 @@ conn_set_client_encoding(connectionObject *self, const char *pgenc)
/* If the current encoding is equal to the requested one we don't
issue any query to the backend */
if (strcmp(self->encoding, clean_enc) == 0) {
res = 0;
goto exit;
}
if (strcmp(self->encoding, clean_enc) == 0) return 0;
Py_BEGIN_ALLOW_THREADS;
pthread_mutex_lock(&self->lock);
@ -1413,11 +1355,6 @@ conn_set_client_encoding(connectionObject *self, const char *pgenc)
goto endlock;
}
Py_BLOCK_THREADS;
conn_notifies_process(self);
conn_notice_process(self);
Py_UNBLOCK_THREADS;
endlock:
pthread_mutex_unlock(&self->lock);
Py_END_ALLOW_THREADS;

View File

@ -1,7 +1,6 @@
/* connection_type.c - python interface to connection objects
*
* Copyright (C) 2003-2019 Federico Di Gregorio <fog@debian.org>
* Copyright (C) 2020-2021 The Psycopg Team
*
* This file is part of psycopg.
*
@ -35,7 +34,6 @@
#include "psycopg/green.h"
#include "psycopg/xid.h"
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
@ -407,22 +405,10 @@ psyco_conn_tpc_recover(connectionObject *self, PyObject *dummy)
static PyObject *
psyco_conn_enter(connectionObject *self, PyObject *dummy)
{
PyObject *rv = NULL;
EXC_IF_CONN_CLOSED(self);
if (self->entered) {
PyErr_SetString(ProgrammingError,
"the connection cannot be re-entered recursively");
goto exit;
}
self->entered = 1;
Py_INCREF(self);
rv = (PyObject *)self;
exit:
return rv;
return (PyObject *)self;
}
@ -440,9 +426,6 @@ psyco_conn_exit(connectionObject *self, PyObject *args)
goto exit;
}
/* even if there will be an error, consider ourselves out */
self->entered = 0;
if (type == Py_None) {
if (!(tmp = PyObject_CallMethod((PyObject *)self, "commit", NULL))) {
goto exit;
@ -985,7 +968,7 @@ psyco_conn_lobject(connectionObject *self, PyObject *args, PyObject *keywds)
Dprintf("psyco_conn_lobject: new lobject for connection at %p", self);
Dprintf("psyco_conn_lobject: parameters: oid = %u, mode = %s",
oid, smode);
Dprintf("psyco_conn_lobject: parameters: new_oid = %u, new_file = %s",
Dprintf("psyco_conn_lobject: parameters: new_oid = %d, new_file = %s",
new_oid, new_file);
if (new_file)
@ -1314,17 +1297,66 @@ static struct PyGetSetDef connectionObject_getsets[] = {
/* initialization and finalization methods */
RAISES_NEG static int
obscure_password(connectionObject *conn)
{
PQconninfoOption *options;
PyObject *d = NULL, *v = NULL, *dsn = NULL;
char *tmp;
int rv = -1;
if (!conn || !conn->dsn) {
return 0;
}
if (!(options = PQconninfoParse(conn->dsn, NULL))) {
/* unlikely: the dsn was already tested valid */
return 0;
}
if (!(d = psyco_dict_from_conninfo_options(
options, /* include_password = */ 1))) {
goto exit;
}
if (NULL == PyDict_GetItemString(d, "password")) {
/* the dsn doesn't have a password */
rv = 0;
goto exit;
}
/* scrub the password and put back the connection string together */
if (!(v = Text_FromUTF8("xxx"))) { goto exit; }
if (0 > PyDict_SetItemString(d, "password", v)) { goto exit; }
if (!(dsn = psyco_make_dsn(Py_None, d))) { goto exit; }
if (!(dsn = psyco_ensure_bytes(dsn))) { goto exit; }
/* Replace the connection string on the connection object */
tmp = conn->dsn;
psyco_strdup(&conn->dsn, Bytes_AS_STRING(dsn), -1);
PyMem_Free(tmp);
rv = 0;
exit:
PQconninfoFree(options);
Py_XDECREF(v);
Py_XDECREF(d);
Py_XDECREF(dsn);
return rv;
}
static int
connection_setup(connectionObject *self, const char *dsn, long int async)
{
int rv = -1;
int res = -1;
Dprintf("connection_setup: init connection object at %p, "
"async %ld, refcnt = " FORMAT_CODE_PY_SSIZE_T,
self, async, Py_REFCNT(self)
);
if (!(self->dsn = conn_obscure_password(dsn))) { goto exit; }
if (0 > psyco_strdup(&self->dsn, dsn, -1)) { goto exit; }
if (!(self->notice_list = PyList_New(0))) { goto exit; }
if (!(self->notifies = PyList_New(0))) { goto exit; }
self->async = async;
@ -1341,24 +1373,29 @@ connection_setup(connectionObject *self, const char *dsn, long int async)
/* other fields have been zeroed by tp_alloc */
if (0 != pthread_mutex_init(&(self->lock), NULL)) {
PyErr_SetString(InternalError, "lock initialization failed");
goto exit;
}
pthread_mutex_init(&(self->lock), NULL);
if (conn_connect(self, dsn, async) != 0) {
if (conn_connect(self, async) != 0) {
Dprintf("connection_init: FAILED");
goto exit;
}
rv = 0;
Dprintf("connection_setup: good connection object at %p, refcnt = "
FORMAT_CODE_PY_SSIZE_T,
self, Py_REFCNT(self));
else {
Dprintf("connection_setup: good connection object at %p, refcnt = "
FORMAT_CODE_PY_SSIZE_T,
self, Py_REFCNT(self)
);
res = 0;
}
exit:
return rv;
/* here we obfuscate the password even if there was a connection error */
{
PyObject *ptype = NULL, *pvalue = NULL, *ptb = NULL;
PyErr_Fetch(&ptype, &pvalue, &ptb);
obscure_password(self);
PyErr_Restore(ptype, pvalue, ptb);
}
return res;
}

View File

@ -1,7 +1,6 @@
/* connection.h - definition for the psycopg ConnectionInfo type
*
* Copyright (C) 2018-2019 Daniele Varrazzo <daniele.varrazzo@gmail.com>
* Copyright (C) 2020-2021 The Psycopg Team
*
* This file is part of psycopg.
*

View File

@ -1,7 +1,6 @@
/* conninfo_type.c - present information about the libpq connection
*
* Copyright (C) 2018-2019 Daniele Varrazzo <daniele.varrazzo@gmail.com>
* Copyright (C) 2020-2021 The Psycopg Team
*
* This file is part of psycopg.
*
@ -148,7 +147,7 @@ port_get(connInfoObject *self)
static const char options_doc[] =
"The command-line options passed in the connection request.\n"
"The command-line options passed in the the connection request.\n"
"\n"
".. seealso:: libpq docs for `PQoptions()`__ for details.\n"
".. __: https://www.postgresql.org/docs/current/static/libpq-status.html"
@ -363,7 +362,7 @@ socket_get(connInfoObject *self)
static const char backend_pid_doc[] =
"The process ID (PID) of the backend process you connected to.\n"
"The process ID (PID) of the backend process handling this connection.\n"
"\n"
":type: `!int`\n"
"\n"

View File

@ -1,7 +1,6 @@
/* cursor.h - definition for the psycopg cursor type
*
* Copyright (C) 2003-2019 Federico Di Gregorio <fog@debian.org>
* Copyright (C) 2020-2021 The Psycopg Team
*
* This file is part of psycopg.
*

View File

@ -1,7 +1,6 @@
/* cursor_int.c - code used by the cursor object
*
* Copyright (C) 2003-2019 Federico Di Gregorio <fog@debian.org>
* Copyright (C) 2020-2021 The Psycopg Team
*
* This file is part of psycopg.
*

View File

@ -1,7 +1,6 @@
/* cursor_type.c - python interface to cursor objects
*
* Copyright (C) 2003-2019 Federico Di Gregorio <fog@debian.org>
* Copyright (C) 2020-2021 The Psycopg Team
*
* This file is part of psycopg.
*
@ -318,7 +317,7 @@ _psyco_curs_merge_query_args(cursorObject *self,
{
PyObject *fquery;
/* if PyString_Format() return NULL an error occurred: if the error is
/* if PyString_Format() return NULL an error occured: if the error is
a TypeError we need to check the exception.args[0] string for the
values:
@ -768,7 +767,7 @@ curs_fetchone(cursorObject *self, PyObject *dummy)
Dprintf("curs_fetchone: rowcount = %ld", self->rowcount);
if (self->row >= self->rowcount) {
/* we exhausted available data: return None */
/* we exausted available data: return None */
Py_RETURN_NONE;
}
@ -1133,7 +1132,7 @@ exit:
}
}
}
PyMem_Free(scpnames);
PyMem_Del(scpnames);
Py_XDECREF(pname);
Py_XDECREF(pnames);
Py_XDECREF(operation);
@ -1303,9 +1302,11 @@ exit:
/* Return a newly allocated buffer containing the list of columns to be
* copied. On error return NULL and set an exception.
*/
static char *_psyco_curs_copy_columns(cursorObject *self, PyObject *columns)
static char *_psyco_curs_copy_columns(PyObject *columns)
{
PyObject *col, *coliter;
Py_ssize_t collen;
char *colname;
char *columnlist = NULL;
Py_ssize_t bufsize = 512;
Py_ssize_t offset = 1;
@ -1331,28 +1332,15 @@ static char *_psyco_curs_copy_columns(cursorObject *self, PyObject *columns)
columnlist[0] = '(';
while ((col = PyIter_Next(coliter)) != NULL) {
Py_ssize_t collen;
char *colname;
char *quoted_colname;
if (!(col = psyco_ensure_bytes(col))) {
Py_DECREF(coliter);
goto error;
}
Bytes_AsStringAndSize(col, &colname, &collen);
if (!(quoted_colname = psyco_escape_identifier(
self->conn, colname, collen))) {
Py_DECREF(col);
Py_DECREF(coliter);
goto error;
}
collen = strlen(quoted_colname);
while (offset + collen > bufsize - 2) {
char *tmp;
bufsize *= 2;
if (NULL == (tmp = PyMem_Realloc(columnlist, bufsize))) {
PQfreemem(quoted_colname);
Py_DECREF(col);
Py_DECREF(coliter);
PyErr_NoMemory();
@ -1360,11 +1348,10 @@ static char *_psyco_curs_copy_columns(cursorObject *self, PyObject *columns)
}
columnlist = tmp;
}
strncpy(&columnlist[offset], quoted_colname, collen);
strncpy(&columnlist[offset], colname, collen);
offset += collen;
columnlist[offset++] = ',';
Py_DECREF(col);
PQfreemem(quoted_colname);
}
Py_DECREF(coliter);
@ -1411,9 +1398,8 @@ curs_copy_from(cursorObject *self, PyObject *args, PyObject *kwargs)
char *columnlist = NULL;
char *quoted_delimiter = NULL;
char *quoted_null = NULL;
char *quoted_table_name = NULL;
const char *table_name;
const char *table_name;
Py_ssize_t bufsize = DEFAULT_COPYBUFF;
PyObject *file, *columns = NULL, *res = NULL;
@ -1434,9 +1420,8 @@ curs_copy_from(cursorObject *self, PyObject *args, PyObject *kwargs)
EXC_IF_GREEN(copy_from);
EXC_IF_TPC_PREPARED(self->conn, copy_from);
if (!(columnlist = _psyco_curs_copy_columns(self, columns))) {
if (NULL == (columnlist = _psyco_curs_copy_columns(columns)))
goto exit;
}
if (!(quoted_delimiter = psyco_escape_string(
self->conn, sep, -1, NULL, NULL))) {
@ -1448,12 +1433,7 @@ curs_copy_from(cursorObject *self, PyObject *args, PyObject *kwargs)
goto exit;
}
if (!(quoted_table_name = psyco_escape_identifier(
self->conn, table_name, -1))) {
goto exit;
}
query_size = strlen(command) + strlen(quoted_table_name) + strlen(columnlist)
query_size = strlen(command) + strlen(table_name) + strlen(columnlist)
+ strlen(quoted_delimiter) + strlen(quoted_null) + 1;
if (!(query = PyMem_New(char, query_size))) {
PyErr_NoMemory();
@ -1461,15 +1441,10 @@ curs_copy_from(cursorObject *self, PyObject *args, PyObject *kwargs)
}
PyOS_snprintf(query, query_size, command,
quoted_table_name, columnlist, quoted_delimiter, quoted_null);
table_name, columnlist, quoted_delimiter, quoted_null);
Dprintf("curs_copy_from: query = %s", query);
Py_CLEAR(self->query);
if (!(self->query = Bytes_FromString(query))) {
goto exit;
}
/* This routine stores a borrowed reference. Although it is only held
* for the duration of curs_copy_from, nested invocations of
* Py_BEGIN_ALLOW_THREADS could surrender control to another thread,
@ -1488,9 +1463,6 @@ curs_copy_from(cursorObject *self, PyObject *args, PyObject *kwargs)
Py_CLEAR(self->copyfile);
exit:
if (quoted_table_name) {
PQfreemem(quoted_table_name);
}
PyMem_Free(columnlist);
PyMem_Free(quoted_delimiter);
PyMem_Free(quoted_null);
@ -1521,7 +1493,6 @@ curs_copy_to(cursorObject *self, PyObject *args, PyObject *kwargs)
char *quoted_null = NULL;
const char *table_name;
char *quoted_table_name = NULL;
PyObject *file = NULL, *columns = NULL, *res = NULL;
if (!PyArg_ParseTupleAndKeywords(
@ -1541,14 +1512,8 @@ curs_copy_to(cursorObject *self, PyObject *args, PyObject *kwargs)
EXC_IF_GREEN(copy_to);
EXC_IF_TPC_PREPARED(self->conn, copy_to);
if (!(quoted_table_name = psyco_escape_identifier(
self->conn, table_name, -1))) {
if (NULL == (columnlist = _psyco_curs_copy_columns(columns)))
goto exit;
}
if (!(columnlist = _psyco_curs_copy_columns(self, columns))) {
goto exit;
}
if (!(quoted_delimiter = psyco_escape_string(
self->conn, sep, -1, NULL, NULL))) {
@ -1560,7 +1525,7 @@ curs_copy_to(cursorObject *self, PyObject *args, PyObject *kwargs)
goto exit;
}
query_size = strlen(command) + strlen(quoted_table_name) + strlen(columnlist)
query_size = strlen(command) + strlen(table_name) + strlen(columnlist)
+ strlen(quoted_delimiter) + strlen(quoted_null) + 1;
if (!(query = PyMem_New(char, query_size))) {
PyErr_NoMemory();
@ -1568,15 +1533,10 @@ curs_copy_to(cursorObject *self, PyObject *args, PyObject *kwargs)
}
PyOS_snprintf(query, query_size, command,
quoted_table_name, columnlist, quoted_delimiter, quoted_null);
table_name, columnlist, quoted_delimiter, quoted_null);
Dprintf("curs_copy_to: query = %s", query);
Py_CLEAR(self->query);
if (!(self->query = Bytes_FromString(query))) {
goto exit;
}
self->copysize = 0;
Py_INCREF(file);
self->copyfile = file;
@ -1589,9 +1549,6 @@ curs_copy_to(cursorObject *self, PyObject *args, PyObject *kwargs)
Py_CLEAR(self->copyfile);
exit:
if (quoted_table_name) {
PQfreemem(quoted_table_name);
}
PyMem_Free(columnlist);
PyMem_Free(quoted_delimiter);
PyMem_Free(quoted_null);
@ -1657,10 +1614,6 @@ curs_copy_expert(cursorObject *self, PyObject *args, PyObject *kwargs)
Py_INCREF(file);
self->copyfile = file;
Py_CLEAR(self->query);
Py_INCREF(sql);
self->query = sql;
/* At this point, the SQL statement must be str, not unicode */
if (pq_execute(self, Bytes_AS_STRING(sql), 0, 0, 0) >= 0) {
res = Py_None;
@ -1874,7 +1827,7 @@ static struct PyMemberDef cursorObject_members[] = {
"Number of records ``iter(cur)`` must fetch per network roundtrip."},
{"description", T_OBJECT, OFFSETOF(description), READONLY,
"Cursor description as defined in DBAPI-2.0."},
{"lastrowid", T_OID, OFFSETOF(lastoid), READONLY,
{"lastrowid", T_LONG, OFFSETOF(lastoid), READONLY,
"The ``oid`` of the last row inserted by the cursor."},
/* DBAPI-2.0 extensions */
{"rownumber", T_LONG, OFFSETOF(row), READONLY,
@ -1951,11 +1904,10 @@ cursor_setup(cursorObject *self, connectionObject *conn, const char *name)
/* default tzinfo factory */
{
/* The datetime api doesn't seem to have a constructor to make a
* datetime.timezone, so use the Python interface. */
PyObject *m = NULL;
if ((m = PyImport_ImportModule("datetime"))) {
self->tzinfo_factory = PyObject_GetAttrString(m, "timezone");
if ((m = PyImport_ImportModule("psycopg2.tz"))) {
self->tzinfo_factory = PyObject_GetAttrString(
m, "FixedOffsetTimezone");
Py_DECREF(m);
}
if (!self->tzinfo_factory) {

View File

@ -1,7 +1,6 @@
/* diagnostics.c - definition for the psycopg Diagnostics type
*
* Copyright (C) 2013-2019 Matthew Woodcraft <matthew@woodcraft.me.uk>
* Copyright (C) 2020-2021 The Psycopg Team
*
* This file is part of psycopg.
*

View File

@ -1,7 +1,6 @@
/* diagnostics.c - present information from libpq error responses
*
* Copyright (C) 2013-2019 Matthew Woodcraft <matthew@woodcraft.me.uk>
* Copyright (C) 2020-2021 The Psycopg Team
*
* This file is part of psycopg.
*

View File

@ -1,7 +1,6 @@
/* error.h - definition for the psycopg base Error type
*
* Copyright (C) 2013-2019 Daniele Varrazzo <daniele.varrazzo@gmail.com>
* Copyright (C) 2020-2021 The Psycopg Team
*
* This file is part of psycopg.
*

View File

@ -1,7 +1,6 @@
/* error_type.c - python interface to the Error objects
*
* Copyright (C) 2013-2019 Daniele Varrazzo <daniele.varrazzo@gmail.com>
* Copyright (C) 2020-2021 The Psycopg Team
*
* This file is part of psycopg.
*
@ -65,8 +64,6 @@ base_exception_from_sqlstate(const char *sqlstate)
switch (sqlstate[0]) {
case '0':
switch (sqlstate[1]) {
case '8': /* Class 08 - Connection Exception */
return OperationalError;
case 'A': /* Class 0A - Feature Not Supported */
return NotSupportedError;
}

View File

@ -1,7 +1,6 @@
/* green.c - cooperation with coroutine libraries.
*
* Copyright (C) 2010-2019 Daniele Varrazzo <daniele.varrazzo@gmail.com>
* Copyright (C) 2020-2021 The Psycopg Team
*
* This file is part of psycopg.
*

View File

@ -1,7 +1,6 @@
/* green.c - cooperation with coroutine libraries.
*
* Copyright (C) 2010-2019 Daniele Varrazzo <daniele.varrazzo@gmail.com>
* Copyright (C) 2020-2021 The Psycopg Team
*
* This file is part of psycopg.
*

View File

@ -3,7 +3,6 @@
* streaming replication
*
* Copyright (C) 2003-2019 Federico Di Gregorio <fog@debian.org>
* Copyright (C) 2020-2021 The Psycopg Team
*
* This file is part of psycopg.
*
@ -38,7 +37,6 @@
#include "psycopg/win32_support.h"
#else
#include <arpa/inet.h>
#include <sys/time.h>
#endif
/* support routines taken from pg_basebackup/streamutil.c */

View File

@ -1,7 +1,6 @@
/* libpq_support.h - definitions for libpq_support.c
*
* Copyright (C) 2003-2019 Federico Di Gregorio <fog@debian.org>
* Copyright (C) 2020-2021 The Psycopg Team
*
* This file is part of psycopg.
*

View File

@ -1,7 +1,6 @@
/* lobject.h - definition for the psycopg lobject type
*
* Copyright (C) 2006-2019 Federico Di Gregorio <fog@debian.org>
* Copyright (C) 2020-2021 The Psycopg Team
*
* This file is part of psycopg.
*

View File

@ -1,7 +1,6 @@
/* lobject_int.c - code used by the lobject object
*
* Copyright (C) 2006-2019 Federico Di Gregorio <fog@debian.org>
* Copyright (C) 2020-2021 The Psycopg Team
*
* This file is part of psycopg.
*
@ -85,7 +84,11 @@ _lobject_parse_mode(const char *mode)
pos += 1;
break;
default:
#if PY_2
rv |= LOBJECT_BINARY;
#else
rv |= LOBJECT_TEXT;
#endif
break;
}
@ -173,7 +176,7 @@ lobject_open(lobjectObject *self, connectionObject *conn,
self->oid = lo_creat(self->conn->pgconn, INV_READ | INV_WRITE);
}
Dprintf("lobject_open: large object created with oid = %u",
Dprintf("lobject_open: large object created with oid = %d",
self->oid);
if (self->oid == InvalidOid) {

Some files were not shown because too many files have changed in this diff Show More